SlowGuess commited on
Commit
61935d5
·
verified ·
1 Parent(s): 850c1ac

Add Batch 8adb3207-aa8f-48ca-8b73-8ed06381fcbf

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2502.00xxx/2502.00688/a2b1fbdd-f674-4bb6-bacf-af46f1703ae8_content_list.json +0 -0
  3. 2502.00xxx/2502.00688/a2b1fbdd-f674-4bb6-bacf-af46f1703ae8_model.json +0 -0
  4. 2502.00xxx/2502.00688/a2b1fbdd-f674-4bb6-bacf-af46f1703ae8_origin.pdf +3 -0
  5. 2502.00xxx/2502.00688/full.md +0 -0
  6. 2502.00xxx/2502.00688/images.zip +3 -0
  7. 2502.00xxx/2502.00688/layout.json +0 -0
  8. 2502.00xxx/2502.00691/76c0165d-8b1e-4abd-a4f8-844530a88428_content_list.json +0 -0
  9. 2502.00xxx/2502.00691/76c0165d-8b1e-4abd-a4f8-844530a88428_model.json +0 -0
  10. 2502.00xxx/2502.00691/76c0165d-8b1e-4abd-a4f8-844530a88428_origin.pdf +3 -0
  11. 2502.00xxx/2502.00691/full.md +767 -0
  12. 2502.00xxx/2502.00691/images.zip +3 -0
  13. 2502.00xxx/2502.00691/layout.json +0 -0
  14. 2502.00xxx/2502.00698/45c647af-889e-410e-b6ce-46c9f0c0f8b1_content_list.json +0 -0
  15. 2502.00xxx/2502.00698/45c647af-889e-410e-b6ce-46c9f0c0f8b1_model.json +0 -0
  16. 2502.00xxx/2502.00698/45c647af-889e-410e-b6ce-46c9f0c0f8b1_origin.pdf +3 -0
  17. 2502.00xxx/2502.00698/full.md +797 -0
  18. 2502.00xxx/2502.00698/images.zip +3 -0
  19. 2502.00xxx/2502.00698/layout.json +0 -0
  20. 2502.00xxx/2502.00722/597a9475-72e3-4fad-95d4-ebca0143adb7_content_list.json +0 -0
  21. 2502.00xxx/2502.00722/597a9475-72e3-4fad-95d4-ebca0143adb7_model.json +0 -0
  22. 2502.00xxx/2502.00722/597a9475-72e3-4fad-95d4-ebca0143adb7_origin.pdf +3 -0
  23. 2502.00xxx/2502.00722/full.md +458 -0
  24. 2502.00xxx/2502.00722/images.zip +3 -0
  25. 2502.00xxx/2502.00722/layout.json +0 -0
  26. 2502.00xxx/2502.00782/fe7680fc-da3e-4a3b-bd9d-6cdcbf7cab47_content_list.json +0 -0
  27. 2502.00xxx/2502.00782/fe7680fc-da3e-4a3b-bd9d-6cdcbf7cab47_model.json +0 -0
  28. 2502.00xxx/2502.00782/fe7680fc-da3e-4a3b-bd9d-6cdcbf7cab47_origin.pdf +3 -0
  29. 2502.00xxx/2502.00782/full.md +761 -0
  30. 2502.00xxx/2502.00782/images.zip +3 -0
  31. 2502.00xxx/2502.00782/layout.json +0 -0
  32. 2502.00xxx/2502.00792/0b69818d-930d-4d27-bd99-411f78543698_content_list.json +0 -0
  33. 2502.00xxx/2502.00792/0b69818d-930d-4d27-bd99-411f78543698_model.json +0 -0
  34. 2502.00xxx/2502.00792/0b69818d-930d-4d27-bd99-411f78543698_origin.pdf +3 -0
  35. 2502.00xxx/2502.00792/full.md +558 -0
  36. 2502.00xxx/2502.00792/images.zip +3 -0
  37. 2502.00xxx/2502.00792/layout.json +0 -0
  38. 2502.00xxx/2502.00806/1d5ae528-ade2-4d1e-852f-6151569ee57d_content_list.json +0 -0
  39. 2502.00xxx/2502.00806/1d5ae528-ade2-4d1e-852f-6151569ee57d_model.json +0 -0
  40. 2502.00xxx/2502.00806/1d5ae528-ade2-4d1e-852f-6151569ee57d_origin.pdf +3 -0
  41. 2502.00xxx/2502.00806/full.md +538 -0
  42. 2502.00xxx/2502.00806/images.zip +3 -0
  43. 2502.00xxx/2502.00806/layout.json +0 -0
  44. 2502.00xxx/2502.00816/48acc41e-0a24-436d-bbd7-69385dea3615_content_list.json +0 -0
  45. 2502.00xxx/2502.00816/48acc41e-0a24-436d-bbd7-69385dea3615_model.json +0 -0
  46. 2502.00xxx/2502.00816/48acc41e-0a24-436d-bbd7-69385dea3615_origin.pdf +3 -0
  47. 2502.00xxx/2502.00816/full.md +627 -0
  48. 2502.00xxx/2502.00816/images.zip +3 -0
  49. 2502.00xxx/2502.00816/layout.json +0 -0
  50. 2502.00xxx/2502.00873/9de04ed4-cd03-45c2-bb87-7052345f2dbb_content_list.json +0 -0
.gitattributes CHANGED
@@ -4847,3 +4847,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
4847
  2502.04xxx/2502.04382/5bfca52c-83b4-40f6-9538-570998f6a3b9_origin.pdf filter=lfs diff=lfs merge=lfs -text
4848
  2502.06xxx/2502.06827/76bfe26f-494b-4b5f-9f88-e12a13b1207d_origin.pdf filter=lfs diff=lfs merge=lfs -text
4849
  2502.18xxx/2502.18474/b79399af-9f74-431f-b973-4f933394413f_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4847
  2502.04xxx/2502.04382/5bfca52c-83b4-40f6-9538-570998f6a3b9_origin.pdf filter=lfs diff=lfs merge=lfs -text
4848
  2502.06xxx/2502.06827/76bfe26f-494b-4b5f-9f88-e12a13b1207d_origin.pdf filter=lfs diff=lfs merge=lfs -text
4849
  2502.18xxx/2502.18474/b79399af-9f74-431f-b973-4f933394413f_origin.pdf filter=lfs diff=lfs merge=lfs -text
4850
+ 2502.00xxx/2502.00688/a2b1fbdd-f674-4bb6-bacf-af46f1703ae8_origin.pdf filter=lfs diff=lfs merge=lfs -text
4851
+ 2502.00xxx/2502.00691/76c0165d-8b1e-4abd-a4f8-844530a88428_origin.pdf filter=lfs diff=lfs merge=lfs -text
4852
+ 2502.00xxx/2502.00698/45c647af-889e-410e-b6ce-46c9f0c0f8b1_origin.pdf filter=lfs diff=lfs merge=lfs -text
4853
+ 2502.00xxx/2502.00722/597a9475-72e3-4fad-95d4-ebca0143adb7_origin.pdf filter=lfs diff=lfs merge=lfs -text
4854
+ 2502.00xxx/2502.00782/fe7680fc-da3e-4a3b-bd9d-6cdcbf7cab47_origin.pdf filter=lfs diff=lfs merge=lfs -text
4855
+ 2502.00xxx/2502.00792/0b69818d-930d-4d27-bd99-411f78543698_origin.pdf filter=lfs diff=lfs merge=lfs -text
4856
+ 2502.00xxx/2502.00806/1d5ae528-ade2-4d1e-852f-6151569ee57d_origin.pdf filter=lfs diff=lfs merge=lfs -text
4857
+ 2502.00xxx/2502.00816/48acc41e-0a24-436d-bbd7-69385dea3615_origin.pdf filter=lfs diff=lfs merge=lfs -text
4858
+ 2502.00xxx/2502.00873/9de04ed4-cd03-45c2-bb87-7052345f2dbb_origin.pdf filter=lfs diff=lfs merge=lfs -text
4859
+ 2502.00xxx/2502.00883/f47ddd1c-532f-44cd-8428-51929fa04654_origin.pdf filter=lfs diff=lfs merge=lfs -text
4860
+ 2502.00xxx/2502.00935/4879401e-dc8a-432d-8610-a295bcc707cc_origin.pdf filter=lfs diff=lfs merge=lfs -text
4861
+ 2502.00xxx/2502.00940/b141ca45-025f-4c21-96f1-8c725e4366d6_origin.pdf filter=lfs diff=lfs merge=lfs -text
4862
+ 2502.00xxx/2502.00975/1a6504f8-9226-4de7-98e1-a23727e66347_origin.pdf filter=lfs diff=lfs merge=lfs -text
4863
+ 2502.00xxx/2502.00987/1b1cc25c-b00b-4667-8365-40e3b63e4975_origin.pdf filter=lfs diff=lfs merge=lfs -text
4864
+ 2502.00xxx/2502.00988/cd3652df-3643-4164-9654-df43919cd4dc_origin.pdf filter=lfs diff=lfs merge=lfs -text
4865
+ 2502.01xxx/2502.01061/24776a98-524d-45ec-8800-00aee1b6483b_origin.pdf filter=lfs diff=lfs merge=lfs -text
4866
+ 2502.01xxx/2502.01066/ac1d0823-38ba-4c37-ab80-ddce36c2269a_origin.pdf filter=lfs diff=lfs merge=lfs -text
4867
+ 2502.01xxx/2502.01100/514b4d44-0e0e-4abd-9fde-41b3c9b890a2_origin.pdf filter=lfs diff=lfs merge=lfs -text
4868
+ 2502.01xxx/2502.01105/dd904df6-aa36-4a15-babb-238c2f93b3f5_origin.pdf filter=lfs diff=lfs merge=lfs -text
4869
+ 2502.01xxx/2502.01108/4538d876-e673-4901-9d37-9b44e451f30d_origin.pdf filter=lfs diff=lfs merge=lfs -text
4870
+ 2502.01xxx/2502.01113/8980d2c2-42f9-4754-a374-3316f1193c3a_origin.pdf filter=lfs diff=lfs merge=lfs -text
4871
+ 2502.01xxx/2502.01122/f41fd7a1-7a57-4ff8-b361-d9a9e7a069d7_origin.pdf filter=lfs diff=lfs merge=lfs -text
4872
+ 2502.01xxx/2502.01143/b836af51-9d17-47f7-9ca8-9d07e3b3f721_origin.pdf filter=lfs diff=lfs merge=lfs -text
4873
+ 2502.01xxx/2502.01146/7ab5e181-5ca8-49da-8713-558d3b6841c0_origin.pdf filter=lfs diff=lfs merge=lfs -text
4874
+ 2502.01xxx/2502.01157/a42c8e11-3297-4908-9525-1fce1f6cd1e0_origin.pdf filter=lfs diff=lfs merge=lfs -text
4875
+ 2502.01xxx/2502.01236/1a57d48d-1737-4a0f-a7b6-321668487c40_origin.pdf filter=lfs diff=lfs merge=lfs -text
4876
+ 2502.01xxx/2502.01376/f3e7c0f6-39fe-404c-a0ea-3e1a5c94a587_origin.pdf filter=lfs diff=lfs merge=lfs -text
4877
+ 2502.01xxx/2502.01384/39899acb-3862-47b5-a251-bb1936a34881_origin.pdf filter=lfs diff=lfs merge=lfs -text
4878
+ 2502.01xxx/2502.01390/b2c23008-07df-484b-8c67-4265eacb0522_origin.pdf filter=lfs diff=lfs merge=lfs -text
4879
+ 2502.01xxx/2502.01456/3b933121-4fd3-4c22-bcf5-0a9c2a330c87_origin.pdf filter=lfs diff=lfs merge=lfs -text
4880
+ 2502.01xxx/2502.01465/ff3b64f6-4312-4cb5-9d24-f364437e2f6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
4881
+ 2502.01xxx/2502.01467/77c94cc6-dfcf-4058-b674-e39b20e72206_origin.pdf filter=lfs diff=lfs merge=lfs -text
4882
+ 2502.01xxx/2502.01477/099ccc67-3a0a-4af2-ba93-7f341af3648a_origin.pdf filter=lfs diff=lfs merge=lfs -text
4883
+ 2502.01xxx/2502.01506/97b40db7-50ef-48a1-9aa1-afc3dfa451e8_origin.pdf filter=lfs diff=lfs merge=lfs -text
4884
+ 2502.01xxx/2502.01534/cc66a3bc-950e-42b2-938b-374188cb603b_origin.pdf filter=lfs diff=lfs merge=lfs -text
4885
+ 2502.01xxx/2502.01536/ab5545bf-81f3-4754-b9d5-16e57e4127a9_origin.pdf filter=lfs diff=lfs merge=lfs -text
4886
+ 2502.01xxx/2502.01537/51b378a7-608c-4a29-9338-366a5dd0e8d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
4887
+ 2502.01xxx/2502.01549/5c475e5a-33fd-4378-8bf2-4b904380af10_origin.pdf filter=lfs diff=lfs merge=lfs -text
4888
+ 2502.01xxx/2502.01563/a8dffe38-b2fe-4a27-9583-2a893d23c2af_origin.pdf filter=lfs diff=lfs merge=lfs -text
4889
+ 2502.01xxx/2502.01572/1172e53b-9a39-412a-a604-68f0151032ca_origin.pdf filter=lfs diff=lfs merge=lfs -text
4890
+ 2502.01xxx/2502.01590/ffdf2e05-15d2-4ba1-a246-c73fa4b717f2_origin.pdf filter=lfs diff=lfs merge=lfs -text
4891
+ 2502.01xxx/2502.01600/f131fa82-fb64-4b6d-a4e6-2ff5a99a6ba8_origin.pdf filter=lfs diff=lfs merge=lfs -text
4892
+ 2502.01xxx/2502.01612/98724fb0-4f8b-4a0b-9fd8-04a854ff1f69_origin.pdf filter=lfs diff=lfs merge=lfs -text
4893
+ 2502.01xxx/2502.01615/5debef53-230b-4c8d-994a-8e7fce503531_origin.pdf filter=lfs diff=lfs merge=lfs -text
4894
+ 2502.01xxx/2502.01633/9ff1e9aa-1cc6-484e-a8a9-68ca58ef2aa0_origin.pdf filter=lfs diff=lfs merge=lfs -text
4895
+ 2502.01xxx/2502.01635/764615a8-0851-43f2-bd2a-f3bce0821798_origin.pdf filter=lfs diff=lfs merge=lfs -text
4896
+ 2502.01xxx/2502.01715/492e070a-e68a-438c-8007-99b5ab07cc8b_origin.pdf filter=lfs diff=lfs merge=lfs -text
4897
+ 2502.01xxx/2502.01718/a5962eb5-86c3-460e-bc58-4beb75f64fae_origin.pdf filter=lfs diff=lfs merge=lfs -text
4898
+ 2502.01xxx/2502.01720/87cde87b-c80d-4cc0-9ed1-b522f6b1c0cb_origin.pdf filter=lfs diff=lfs merge=lfs -text
4899
+ 2502.01xxx/2502.01776/9e11c7f9-8427-40f1-8ff5-1beb751bc670_origin.pdf filter=lfs diff=lfs merge=lfs -text
4900
+ 2502.01xxx/2502.01819/e8ff73f0-61df-427c-80c4-d155a8274126_origin.pdf filter=lfs diff=lfs merge=lfs -text
4901
+ 2502.01xxx/2502.01822/452bbc02-9b62-4edb-bbc2-cd21aa28bf95_origin.pdf filter=lfs diff=lfs merge=lfs -text
4902
+ 2502.01xxx/2502.01828/ceb5b4cc-80a3-4d78-9812-177dcc005845_origin.pdf filter=lfs diff=lfs merge=lfs -text
4903
+ 2502.01xxx/2502.01839/580c5639-2874-4180-aff2-5fd208f846e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
4904
+ 2502.01xxx/2502.01941/55976fb5-97e6-4de8-9ed1-5b2b00cb6e15_origin.pdf filter=lfs diff=lfs merge=lfs -text
4905
+ 2502.01xxx/2502.01946/affd2bd2-8368-4141-a18d-f211a5d01b01_origin.pdf filter=lfs diff=lfs merge=lfs -text
4906
+ 2502.01xxx/2502.01951/9520b3ae-0ea5-4e06-bce0-7b0af8f355cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
4907
+ 2502.01xxx/2502.01966/fc1334e4-0dc0-4469-9746-298928ce0a23_origin.pdf filter=lfs diff=lfs merge=lfs -text
4908
+ 2502.01xxx/2502.01968/b360501d-ac56-454d-bfe0-7df8f7bc98ca_origin.pdf filter=lfs diff=lfs merge=lfs -text
4909
+ 2502.01xxx/2502.01976/2bb67427-b85d-499f-a53a-9ffde570bf19_origin.pdf filter=lfs diff=lfs merge=lfs -text
4910
+ 2502.01xxx/2502.01993/bcba2f1d-dc1b-4233-88f3-87c62f3b3208_origin.pdf filter=lfs diff=lfs merge=lfs -text
4911
+ 2502.05xxx/2502.05209/2cd0a660-de8f-4438-b040-e8ca4903fc93_origin.pdf filter=lfs diff=lfs merge=lfs -text
4912
+ 2502.06xxx/2502.06805/2dc54b0c-7511-4b0e-a0cd-a1b47264dd71_origin.pdf filter=lfs diff=lfs merge=lfs -text
4913
+ 2502.06xxx/2502.06807/84660250-1c16-48b2-80f4-c46a423de42e_origin.pdf filter=lfs diff=lfs merge=lfs -text
2502.00xxx/2502.00688/a2b1fbdd-f674-4bb6-bacf-af46f1703ae8_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00688/a2b1fbdd-f674-4bb6-bacf-af46f1703ae8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00688/a2b1fbdd-f674-4bb6-bacf-af46f1703ae8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccd36822543f06cd767b02c684d43a67bf18ef0abafc9521ac91984b29c62524
3
+ size 3383627
2502.00xxx/2502.00688/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00688/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a860e1f4bf4e2e5a8faa4ea3ea384f04bb23533e334c78e132e9ea75d246c21
3
+ size 2548806
2502.00xxx/2502.00688/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00691/76c0165d-8b1e-4abd-a4f8-844530a88428_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00691/76c0165d-8b1e-4abd-a4f8-844530a88428_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00691/76c0165d-8b1e-4abd-a4f8-844530a88428_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1afef7e80eeea1b4612545b6bcb76defcfbb6aaf8410d9cbb8082f1fc45ceddb
3
+ size 1222228
2502.00xxx/2502.00691/full.md ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # To Code or not to Code? Adaptive Tool Integration for Math Language Models via Expectation-Maximization
2
+
3
+ Haozhe Wang<sup>1</sup>, Long Li<sup>2</sup>, Chao Qu<sup>2</sup>, Fengming Zhu<sup>1</sup>, Weidi Xu<sup>2</sup>, Wei Chu<sup>2</sup>, Fangzhen Lin<sup>1†</sup>
4
+
5
+ Hong Kong University of Science and Technology<sup>1</sup>, INFLY Tech<sup>2</sup>
6
+
7
+ # Abstract
8
+
9
+ Recent advances in mathematical problemsolving with language models (LMs) integrate chain-of-thought (CoT) reasoning and code execution to harness their complementary strengths. However, existing hybrid frameworks exhibit a critical limitation: they depend on externally dictated instructions or rigid code-integration templates, lacking metacognitive awareness – the capacity to dynamically evaluate intrinsic capabilities and autonomously determine when and how to integrate tools. This rigidity motivates our study of autonomous code integration, enabling models to adapt tool-usage strategies as their reasoning abilities evolve during training.
10
+
11
+ While reinforcement learning (RL) shows promise for boosting LLM reasoning at scale (e.g., DeepSeek-R1), we demonstrate its inefficiency in learning autonomous code integration due to inadequate exploration of the vast combinatorial space of CoT-code interleaving patterns. To address this challenge, we propose a novel Expectation-Maximization (EM) framework that synergizes structured exploration (E-step) with off-policy RL optimization (M-step), creating a self-reinforcing cycle between metacognitive tool-use decisions and evolving capabilities. Experiments reveal our method achieves superior results through improved exploration. Notably, our 7B model improves over $11\%$ on MATH500 and $9.4\%$ on AIME without o1-like CoT. Code, models and data are released on https://github.com/HaozheH3/AutoCode.
12
+
13
+ # 1 Introduction
14
+
15
+ Large Language Models (LLMs) have demonstrated remarkable performance across various domains (Kaddour et al., 2023; Achiam et al., 2023; Dubey et al., 2024; Team et al., 2023; Yang et al., 2024a). Yet, solving complex mathematical problems still remains challenging, as the task requires
16
+
17
+ ![](images/977074f5021eac52856127391e8c9aa399cf10a4f6e68df1e3faa3140441023d.jpg)
18
+ Figure 1: Analysis of existing tool-integrated math LLMs. While tool integration can in principle bring complementary benefits to CoT reasoning, existing models show critical rigidity when incorporating code, lacking the metacognitive capacity to earn the synergistic benefits from code integration.
19
+
20
+ hybrid skills in abstract reasoning, symbolic manipulation, and precise numerical computation (Gao et al., 2023; Yue et al., 2023; Gou et al., 2023; Li, 2024). Current approaches adopt two complementary paradigms: (1) chain-of-thought (CoT) reasoning, which decomposes problems into intermediate reasoning steps (Wei et al., 2022; Yu et al., 2023), and (2) external tool integration, where models generate code snippets to offload computations to interpreters or symbolic solvers (Toshniwal et al., 2024; Yue et al., 2023). While CoT reasoning excels at semantic parsing and stepwise logic, its reliance on token-level autoregressive generation often propagates numerical errors. Conversely, tool-based approaches ensure computational precision but suffer from a semantic-to-symbolic translation gap, where even minor syntactic errors or contextual misinterpretations disrupt execution (Li, 2024).
21
+
22
+ Recent hybrid frameworks like Mammoth (Yue et al., 2023), Deepseek-Math (Gou et al., 2023; Shao et al., 2024), and Qwen-2.5-Math (Yang et al., 2024b) attempt to combine these paradigms through interleaved CoT-code reasoning. However, as our empirical analysis reveals (Fig. 1), current methods exhibit a critical rigidity: they
23
+
24
+ either default to CoT reasoning unless explicitly prompted for code generation or adhere to static templates for tool invocation. We trace this limitation to prevailing supervised fine-tuning (SFT) paradigms that condition models to (1) passively follow user instructions (e.g., "Let's write a Python program" (Yue et al., 2023)), (2) replicate fixed code-integration patterns from curated datasets (Yang et al., 2024b), or (3) imitate teacher-forced tool-use trajectories (Gou et al., 2023; Shao et al., 2024). Consequently, LLMs lack metacognitive awareness – the capacity to dynamically evaluate their intrinsic capabilities against problem contexts and autonomously determine when and how to integrate tools. This deficiency motivates our central research question:
25
+
26
+ How can mathematical LLMs learn autonomous code integration (AutoCode) that optimally complements their inherent reasoning capabilities?
27
+
28
+ Reinforcement learning (RL) offers a promising pathway by optimizing policies through self-generated trajectories, as evidenced by recent successes like DeepSeek R1 (Guo et al., 2025). However, we empirically observe that standard RL methods is inefficient in learning autonomous code integration (AutoCode) strategies (see Sec. 4.2). This stems from RL's tendency to exploit local policy neighborhoods, thereby insufficiently exploring the vast combinatorial space of potential CoT-code interleaving patterns. Such myopic exploration constrains the discovery of high-reward reasoning paths that judiciously blend both modalities, particularly as the model's reasoning capabilities evolve during training.
29
+
30
+ To address this challenge, we propose a novel Expectation-Maximization (EM) framework that synergizes guided exploration with policy optimization. Our key innovation lies in formulating code-integration decisions as latent variables within an EM paradigm, creating a self-reinforcing cycle: the E-step identifies high-potential code-integration decisions through guided exploration, while the M-step optimizes policy parameters for joint metacognitive tool-usage and reasoning.
31
+
32
+ This dual mechanism enables models to adapt tool-use strategies as their capabilities evolve during training. Practically, we achieve efficiency through two design choices: (1) an offline data curation step (E-step) that prioritizes high-return code invocation decisions through guided expo
33
+
34
+ ![](images/0b247470450d247ddf3e0736b3bec1eab23bae0623906e4de62323a5d98170ea.jpg)
35
+ Figure 2: Example of Autonomous Code Integration. We aim to enable LLMs to determine tool-usage strategies based on their own capability boundaries. In the example, the model write code to solve the problem that demand special tricks, strategically bypassing its inherent limitations.
36
+
37
+ ration, and (2) an off-policy RL optimization step (M-step) that jointly improves tool-usage and reasoning. This approach offers enhanced control and efficiency compared to standard RL, which is particularly beneficial for resource-constrained companies or researchers.
38
+
39
+ Extensive experiments demonstrate that our method (a) preserves higher training efficiency while achieving better performance, and (b) learns intelligent code integration strategies that achieves higher accuracy than either CoT or code prompted in isolation. Notably, our show consistent improvements across different benchmarks, raising MATH500 from $60.4\%$ to $71.4\%$ .
40
+
41
+ Our contribution is summarized as follows: (1) We diagnose a critical gap in mathematical LLM – the inability to autonomously integrate tools based on metacognitive awareness – and demonstrate standard RL's inefficiency in addressing it. (2) We propose a novel EM-based framework that jointly adapts the tool-usage strategies with evolving reasoning abilities, with a simple yet efficient implementation. (3) We demonstrate superior results in both training efficiency and accuracy on challenging benchmarks.
42
+
43
+ # 2 Background
44
+
45
+ Problem Statement. Modern tool-augmented language models address mathematical problems $x_{q} \in \mathcal{X}_{Q}$ by generating step-by-step solutions that
46
+
47
+ interleave natural language reasoning with executable Python code (Fig. 2). Formally, given a problem $x_{q}$ , a model $\mathcal{M}_{\theta}$ iteratively constructs a solution $y_{a} = \{y_{1},\dots ,y_{T}\}$ by sampling components $y_{t}\sim p(y_{t}|y_{< t},x_{q})$ , where $y_{< t}$ encompasses both prior reasoning steps, code snippets and execution results $\mathbf{e}_t$ from a Python interpreter. The process terminates upon generating an end token, and the solution is evaluated via a binary reward $r(y_{a},x_{q}) = \mathbb{I}(y_{a}\equiv y^{*})$ indicating equivalence to the ground truth $y^{*}$ . The learning objective is formulated as:
48
+
49
+ $$
50
+ \max _ {\theta} \mathbb {E} _ {x _ {q} \sim \mathcal {X} _ {Q}} \left[ r (y _ {a}, x _ {q}) \right]
51
+ $$
52
+
53
+ Challenge and Motivation. Developing autonomous code integration (AutoCode) strategies poses unique challenges, as optimal tool-usage behaviors must dynamically adapt to a model's intrinsic capabilities and problem-solving contexts. While traditional supervised fine-tuning (SFT) relies on imitation learning from expert demonstrations, this paradigm fundamentally limits the emergence of self-directed tool-usage strategies. Unfortunately, current math LLMs predominantly employ SFT to orchestrate tool integration (Yue et al., 2023; Gou et al., 2023; Shao et al., 2024; Li, 2024), their rigid adherence to predefined reasoning templates therefore struggles with the dynamic interplay between a model's evolving problem-solving competencies and the adaptive tool-usage strategies required for diverse mathematical contexts.
54
+
55
+ Reinforcement learning (RL) offers a promising alternative by enabling trial-and-error discovery of autonomous behaviors. Recent work like DeepSeek-R1 (Guo et al., 2025) demonstrates RL's potential to enhance reasoning without expert demonstrations. However, we observe that standard RL methods (e.g., PPO (Schulman et al., 2017)) suffer from a critical inefficiency (see Sec. 4.2): Their tendency to exploit local policy neighborhoods leads to insufficient exploration of the vast combinatorial space of code-integrated reasoning paths, especially when only given a terminal reward in mathematical problem-solving.
56
+
57
+ To bridge this gap, we draw inspiration from human metacognition – the iterative process where learners refine tool-use strategies through deliberate exploration, outcome analysis, and belief updates. A novice might initially attempt manual root-finding via algebraic methods, observe computational bottlenecks or inaccuracies, and therefore prompting the usage of calculators. Through
58
+
59
+ systematic reflection on these experiences, they internalize the contextual efficacy of external tools, gradually forming stable heuristics that balance reasoning with judicious tool invocation.
60
+
61
+ To this end, our focus diverges from standard agentic tool-use frameworks (Yuan et al., 2025), which merely prioritize successful tool execution. Instead, we aim to instill human-like metacognition in LLMs, enabling them to (1) determine tool-usage based on their own capability boundaries (see the analysis in Sec. 4.2), and (2) dynamically adapt tool-usage strategies as their reasoning abilities evolve (via our EM framework).
62
+
63
+ # 3 Methodology
64
+
65
+ Inspired by human metacognitive processes, we introduce an Expectation-Maximization (EM) framework that trains LLMs for autonomous code integration (AutoCode) through alternations (Fig. 3):
66
+
67
+ 1. Guided Exploration (E-step): Identifies high-potential code-integrated solutions by systematically probing the model's inherent capabilities.
68
+ 2. Self-Refinement (M-step): Optimizes the model's tool-usage strategy and chain-of-thought reasoning using curated trajectories from the E-step.
69
+
70
+ # 3.1 The EM Framework for AutoCode
71
+
72
+ A central challenge in AutoCode lies in the code triggering decisions, represented by the binary decision $c \in \{0,1\}$ . While supervised fine-tuning (SFT) suffers from missing ground truth for these decisions, standard reinforcement learning (RL) struggles with the combinatorial explosion of code-integrated reasoning paths. Our innovation bridges these approaches through systematic exploration of both code-enabled ( $c = 1$ ) and non-code ( $c = 0$ ) solution paths, constructing reference decisions for policy optimization.
73
+
74
+ We formalize this idea within a maximum likelihood estimation (MLE) framework. Let $P(r = 1|x_{q};\theta$ denote the probability of generating a correct response to query $x_{q}$ under model $\mathcal{M}_{\theta}$ . Our objective becomes:
75
+
76
+ $$
77
+ \mathcal {J} _ {\mathrm {M L E}} (\theta) \doteq \log P (r = 1 | x _ {q}; \theta) \tag {1}
78
+ $$
79
+
80
+ This likelihood depends on two latent factors: (1) the code triggering decision $\pi_{\theta}(c|x_q)$ and (2) the solution generation process $\pi_{\theta}(y_a|x_q,c)$ . Here, for notation-wise clarity, we consider code-triggering decision at a solution's beginning ( $c$ following $x_q$
81
+
82
+ ![](images/e1b2dfe49f091994bac47113afdbe1a0fd73d94583a764518e6059c79a2d7880.jpg)
83
+ Figure 3: Method Overview. (Left) shows an overview for the EM framework, which alternates between finding a reference strategy for guided exploration (E-step) and off-policy RL (M-step). (Right) shows the data curation for guided exploration. We generate $K$ rollouts, estimate values of code-triggering decisions and subsample the initial data with sampling weights per Eq. 5.
84
+
85
+ ![](images/97e254c531ac0a247602e7b93f77b30f5a0106dde7a38a88bca371c3ecf91acf.jpg)
86
+
87
+ immediately). We show generalization to midreasoning code integration in Sec. 3.2.
88
+
89
+ The EM framework provides a principled way to optimize this MLE objective in the presence of latent variables (Bishop and Nasrabadi, 2006). We derive the evidence lower bound (ELBO): $\mathcal{I}_{\mathrm{ELBO}}(s,\theta)\doteq$
90
+
91
+ $$
92
+ \mathbb {E} _ {s (c | x _ {q})} \left[ \log \frac {\pi_ {\theta} (c | x _ {q}) \cdot P (r = 1 | c , x _ {q} ; \theta)}{s (c | x _ {q})} \right] \tag {2}
93
+ $$
94
+
95
+ where $s(c|x_q)$ serves as a surrogate distribution approximating optimal code triggering strategies. It is also considered as the reference decisions for code integration.
96
+
97
+ E-step: Guided Exploration computes the reference strategy $s(c|x_q)$ by maximizing the ELBO, equivalent to minimizing the KL-divergence: $\max_s\mathcal{I}_{\mathrm{ELBO}}(s,\theta) =$
98
+
99
+ $$
100
+ - \mathrm {D} _ {\mathrm {K L}} (s (c | x _ {q}) \| P (r = 1, c | x _ {q}; \theta)) \tag {3}
101
+ $$
102
+
103
+ The reference strategy $s(c|x_q)$ thus approximates the posterior distribution over code-triggering decisions $c$ that maximize correctness, i.e., $P(r = 1, c|x_q; \theta)$ . Intuitively, it guides exploration by prioritizing decisions with high potential: if decision $c$ is more likely to lead to correct solutions, the reference strategy assigns higher probability mass to it, providing guidance for the subsequent RL procedure.
104
+
105
+ M-step: Self-Refinement updates the model pa
106
+
107
+ rameters $\theta$ through a composite objective:
108
+
109
+ $$
110
+ \begin{array}{l} \max_{\theta}\mathcal{J}_{\mathrm{ELBO}}(s,\theta) = \mathbb{E}_{\substack{c\sim s(c|x_{q})\\ y_{a}\sim \pi_{\theta}(y_{a}|x_{q},c)}}\Bigl[r(x_{q},y_{a})\Bigr] \\ - \mathcal {C E} \left(s \left(c \mid x _ {q}\right) \| \pi_ {\theta} \left(c \mid x _ {q}\right)\right) \tag {4} \\ \end{array}
111
+ $$
112
+
113
+ The first term implements reward-maximizing policy gradient updates for solution generation, while while the second aligns native code triggering with reference strategies through cross-entropy minimization (see Fig. 3 for an illustration of the optimization). This dual optimization jointly enhances both tool-usage policies and reasoning capabilities.
114
+
115
+ # 3.2 Practical Implementation
116
+
117
+ In the above EM framework, we alternate between finding a reference strategy $s$ for code-triggering decisions in the E-step, and perform reinforcement learning under the guidance from $s$ in the M-step. We implement this framework through an iterative process of offline data curation and off-policy RL. Offline Data Curation. We implement the E-step through Monte Carlo rollouts and subsampling. For each problem $x_{q}$ , we estimate the reference strategy as an energy distribution:
118
+
119
+ $$
120
+ s ^ {*} (c \mid x _ {q}) = \frac {\exp (\alpha \cdot \pi_ {\theta} (c \mid x _ {q}) Q (x _ {q} , c ; \theta))}{Z (x _ {q})}. \tag {5}
121
+ $$
122
+
123
+ where $Q(x_{q},c;\theta)$ estimates the expected value through $K$ rollouts per decision, $\pi_{\theta}(c|x_{q})$ represents the model's current prior and the $Z(x_{q})$ is the partition function to ensure normalization. Intuitively, the strategy will assign higher probability
124
+
125
+ mass to the decision $c$ that has higher expected value $Q(x_{q},c;\theta)$ meanwhile balancing its intrinsic preference $\pi_{\theta}(c|x_q)$ .
126
+
127
+ Our curation pipeline proceeds through:
128
+
129
+ - Generate $K$ rollouts for $c = 0$ (pure reasoning) and $c = 1$ (code integration), creating candidate dataset $\mathcal{D}$ .
130
+ - Compute $Q(x_{q},c)$ as the expected success rate across rollouts for each pair $(x_{q},c)$ .
131
+ - Subsample $\mathcal{D}_{\mathrm{train}}$ from $\mathcal{D}$ using importance weights according to Eq. 5.
132
+
133
+ To explicitly probe code-integrated solutions, we employ prefix-guided generation - e.g., prepending prompts like "Let's first analyze the problem, then consider if python code could help" - to bias generations toward freeform code-reasoning patterns.
134
+
135
+ This pipeline enables guided exploration by focusing on high-potential code-integrated trajectories identified by the reference strategy, contrasting with standard RL's reliance on local policy neighborhoods. As demonstrated in Sec. 4.2, this strategic data curation significantly improves training efficiency by shaping the exploration space.
136
+
137
+ Off-Policy RL. To mitigate distributional shifts caused by mismatches between offline data and the policy, we optimize a clipped off-policy RL objective. The refined M-step (Eq. 4) becomes:
138
+
139
+ $$
140
+ \begin{array}{l} \underset {(x _ {q}, y _ {a})} {\mathbb {E}} \left[ \operatorname {c l i p} \left(\frac {\pi_ {\theta} (y _ {a} | x _ {q})}{\pi_ {\text {r e f}} (y _ {a} | x _ {q})}, 1 - \epsilon , 1 + \epsilon\right) \cdot A \right] \\ - \mathbb {E} _ {\left(x _ {q}, c\right)} \left[ \log \pi_ {\theta} \left(c \mid x _ {q}\right) \right] \tag {6} \\ \end{array}
141
+ $$
142
+
143
+ where $(x_{q},c,y_{a})$ is sampled from the dataset $\mathcal{D}_{\mathrm{train}}$ . The importance weight $\frac{\pi_{\theta}(y_a|x_q)}{\pi_{\mathrm{ref}}(y_a|x_q)}$ accounts for off-policy correction with PPO-like clipping. The advantage function $A(x_{q},y_{a})$ is computed via query-wise reward normalization (Schulman et al., 2017).
144
+
145
+ Generalizing to Mid-Reasoning Code Integration. Our method extends to mid-reasoning code integration by initiating Monte Carlo rollouts from partial solutions $(x_{q},y_{< t})$ . Notably, we observe emergence of mid-reasoning code triggers after initial warm-up with prefix-probed solutions. Thus, our implementation requires only two initial probing strategies: explicit prefix prompting for code integration and vanilla generation for pure reasoning, which jointly seed diverse mid-reasoning code usage in later iterations.
146
+
147
+ # 4 Experiments
148
+
149
+ Our experiments investigate three key research questions:
150
+
151
+ Q1: Method Effectiveness. How does our approach enhance performance across both in-domain and out-of-domain mathematical benchmarks compared to existing math LLMs?
152
+ Q2: Baseline Comparisons. How does our method compare to standard RL and SFT baselines in terms of training efficiency and exploration patterns?
153
+ Q3: AutoCode Analysis. What strategies does the model learn for code integration, and how do these strategies contribute to performance gains?
154
+
155
+ Datasets and Benchmarks. Our method only requires a query set for training. We collect public available queries from MATH (Hendrycks et al., 2021) and Numina (Li et al., 2024), and sample $7K$ queries based on difficulties. We upload the collected data to the anonymous repo. For evaluation, we employ: GSM8k (Cobbe et al., 2021), MATH500 (Hendrycks et al., 2021), GaokaoMath2023 (Liao et al., 2024), OlympiadBench (He et al., 2024), the American Invitational Mathematics Examination (AIME24), and the American Mathematics Competitions (AMC23). This benchmark suite spans elementary to Olympiad-level mathematics. We adopt Pass@1 accuracy (Chen et al., 2021; Guo et al., 2025) as our primary metric, using evaluation scripts from DeepseekMath (Shao et al., 2024) and Qwen2Math (Yang et al., 2024a). For competition-level benchmarks (AIME/AMC), we use 64 samples with temperature 0.6 following Deepseek R1 protocols.
156
+
157
+ Baselines and Implementation. We compare against three model categories:
158
+
159
+ - Proprietary models: o1 (Jaech et al., 2024), GPT-4 (Achiam et al., 2023) and Claude (Anthropic, 2024)
160
+ - Recent math-specialized LMs: NuminaMath (Li et al., 2024), Mathstral (AI, 2024), Mammoth (Yue et al., 2023), ToRA (Gou et al., 2023), DartMath (Tong et al., 2024). We do not compare with models that rely on test-time scaling, such as MCTS or long CoT.
161
+ - Foundation models enhanced with our method: Qwen2Math (Yang et al., 2024a), DeepseekMath (Shao et al., 2024) and Qwen-2.5 (Yang et al., 2024b).
162
+
163
+ Our implementation uses $K = 8$ rollouts per query (temperature=1.0, top-p=0.9). Training completes in about 10 hours on $8 \times$ A100 (80GB) GPUs
164
+
165
+ Table 1: Public SFT Data Used in this Work. We collect public query set for AutoCode Training. After Dedduplication, the total amount of query used is ${119}\mathrm{\;K}$ . If the base model is not trained to write code for math problems, we use the SFT annotations associated with the above queries.
166
+
167
+ <table><tr><td>Dataset</td><td>Size</td><td>Unique Queries</td><td>CoT Solutions</td><td>Code Solutions</td></tr><tr><td>Openmath (Toshniwal et al., 2024)</td><td>129917</td><td>70002</td><td>25116</td><td>104801</td></tr><tr><td>Math-Instruct (Yue et al., 2023)</td><td>237781</td><td>219607</td><td>188644</td><td>49137</td></tr><tr><td>Metamath (Yue et al., 2023)</td><td>285000</td><td>161337</td><td>285000</td><td>0</td></tr><tr><td>MMOS (Chen et al., 2024)</td><td>134610</td><td>69007</td><td>0</td><td>134610</td></tr></table>
168
+
169
+ across three epochs of 7K queries. We list the collected public SFT data in Tab. 1.
170
+
171
+ # 4.1 Main Results
172
+
173
+ Notably, we observe a minimum performance gain of $11\%$ on the MATH500 benchmark, escalating to an impressive $9.4\%$ absolute improvement on the highly challenging AIME benchmark. Across in-domain benchmarks, our method yields an average improvement of $8.9\%$ , and for out-of-domain benchmarks, we achieve a substantial average gain of $6.98\%$ . These results validate the effectiveness of our approach across model families and problem difficulty levels.
174
+
175
+ # 4.2 Ablation Study
176
+
177
+ We conduct three primary analyses: (a) comparison with standard RL and SFT baselines to validate our method's effectiveness in facilitating exploration, (b) visualization of exploration patterns to reveal limitations in the standard RL paradigm, and (c) behavioral analysis of code integration strategies. These analyses collectively demonstrate our method's benefits in facilitating guided exploration and explains how it improves performance.
178
+
179
+ Training Efficiency. We evaluated the learning dynamics of our approach in direct comparison to three established training paradigms:
180
+
181
+ - Base+RL: On-policy Reinforcement Learning (RL) initialized from a base model without Supervised Fine-Tuning (SFT). This follows the methodology of DeepSeek R1, designed to isolate and assess the pure effects of RL training.
182
+ - SFT: Supervised Fine-Tuning, the prevailing training paradigm widely adopted in current tool-integrated math Language Models (LMs).
183
+ - $SFT + RL$ : Standard RL applied after SFT, serving as a conventional baseline for evaluating our EM-based RL method.
184
+
185
+ From the figure, we make the following key observations:
186
+
187
+ - While Reinforcement Learning directly from the base model $(Base + RL)$ exhibits consistent performance improvement, its training efficiency is lower than training paradigms incorporating SFT. In addition, the model rarely explores code-integrated solutions, with the code invocation rate below $5\%$ . This strongly suggest that reinforcement learning tool-usage behavior from scratch is inherently inefficient.
188
+
189
+ - SFT effectively provides a strong initialization point, but SFT alone exhibits limited asymptotic performance. This suggests that SFT lacks the capacity to adapt and optimize beyond the scope of the expert demonstrations, thereby limiting further improvement.
190
+
191
+ - Standard RL applied after SFT shows initial further improvement but subsequently plateaus, even after an extended training stage. This suggests the exploration-exploitation dilemma when applying RL for LLM post-training: standard RL with vanilla rollout exploration tends to exploit local optima and insufficiently explores the combinatorial code-integrated trajectories.
192
+
193
+ To further substantiate the exploration limitations inherent in the conventional $SFT + RL$ paradigm, we present a visualization of the exploration patterns. We partitioned the model-generated responses during self-exploration into three distinct training phases and analyzed the statistical distribution of code invocation rates across queries as the model's policy evolved throughout training. As depicted in Figure 5, the distribution of code invocation progressively concentrates towards the extremes - either minimal or maximal code use - indicating the model's growing tendency to exploit its local policy neighborhood. This exploitation manifests as a focus on refining established code-triggering decisions, rather than engaging in broader exploration of alternative approaches.
194
+
195
+ These empirical observations lend strong support to our assertion that standard RL methods are
196
+
197
+ Table 2: Effectiveness of AutoCode4Math. The column "Code?" indicates whether code integration is involved, with $\star$ representing autonomous determination of code integration by the model. The improvement over code-driven inference is highlighted in the colored row, denoted as $\Delta$ .
198
+
199
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Code?</td><td colspan="2">In-domain</td><td colspan="4">Out-of-domain</td></tr><tr><td>GSM8K</td><td>MATH500</td><td>GaoKao</td><td>Olympiad</td><td>AIME24</td><td>AMC23</td></tr><tr><td colspan="8">Proprietary Model</td></tr><tr><td>OpenAI-o1-preview (Jaech et al., 2024)</td><td>X</td><td></td><td>85.5</td><td>62.1</td><td>52.1</td><td>44.6</td><td>81.8</td></tr><tr><td>GPT-4o (Achiam et al., 2023)</td><td>X</td><td>92.9</td><td>76.4</td><td>67.5</td><td>43.3</td><td>9.3</td><td>45.8</td></tr><tr><td>Claude-3.5-Sonnet-1022 (Anthropic, 2024)</td><td>X</td><td>95</td><td>78.3</td><td></td><td></td><td>16.0</td><td></td></tr><tr><td colspan="8">Open-Source Models</td></tr><tr><td>Mammoth-70B (Yue et al., 2023)</td><td>✓</td><td>76.9</td><td>41.8</td><td>25.2</td><td></td><td></td><td></td></tr><tr><td>ToRA-70B (Gou et al., 2023)</td><td>✓</td><td>84.3</td><td>49.7</td><td>31.7</td><td></td><td></td><td></td></tr><tr><td>NuminaMath-72B (Li et al., 2024)</td><td>✓</td><td>91.4</td><td>59.2</td><td>49.4</td><td>36.7</td><td>6.5</td><td>40.6</td></tr><tr><td>Mathstral-7B (AI, 2024)</td><td>X</td><td>84.9</td><td>56.6</td><td>46</td><td>21.5</td><td></td><td></td></tr><tr><td>Mammoth-Mistral-7B (Yue et al., 2023)</td><td>✓</td><td>74.22</td><td>37.8</td><td>22.08</td><td>9.63</td><td>6.67</td><td>20.0</td></tr><tr><td>NuminaMath-7B-CoT (Li et al., 2024)</td><td>X</td><td>81.27</td><td>53.0</td><td>48.83</td><td>22.22</td><td>3.33</td><td>25.0</td></tr><tr><td>Dart-Math-DeepSeek-7B (Tong et al., 2024)</td><td>X</td><td>87.64</td><td>50.0</td><td>45.45</td><td>18.52</td><td>3.33</td><td>35.0</td></tr><tr><td>Dart-Math-Llama3-8B (Tong et al., 2024)</td><td>X</td><td>82.71</td><td>45.0</td><td>34.80</td><td>23</td><td>0.0</td><td>17.5</td></tr><tr><td colspan="8">AutoCode Training</td></tr><tr><td>Qwen2Math-Base-7B (Yang et al., 2024a)</td><td>X</td><td>80.74</td><td>48.80</td><td>43.37</td><td>21.62</td><td>6.5</td><td>19.8</td></tr><tr><td>AutoCode4Math-Qwen2</td><td>★</td><td>88.1</td><td>61.86</td><td>50.13</td><td>26.37</td><td>13.2</td><td>30.0</td></tr><tr><td>Δ</td><td></td><td>7.36↑</td><td>13.06↑</td><td>6.76↑</td><td>4.75↑</td><td>6.7↑</td><td>10.2↑</td></tr><tr><td>DeepseekMath-Instruct-7B (Shao et al., 2024)</td><td>✓</td><td>84.46</td><td>51.00</td><td>44.68</td><td>20.44</td><td>1.6</td><td>17.4</td></tr><tr><td>AutoCode4Math-DeepSeek</td><td>★</td><td>89.26</td><td>63.32</td><td>50.53</td><td>26.95</td><td>9.5</td><td>28.8</td></tr><tr><td>Δ</td><td></td><td>4.8↑</td><td>12.32↑</td><td>5.85↑</td><td>6.51↑</td><td>7.9↑</td><td>11.4↑</td></tr><tr><td>Qwen-2.5-Base-7B (Yang et al., 2024b)</td><td>X</td><td>84.88</td><td>60.4</td><td>45.45</td><td>30.37</td><td>13.2</td><td>39.38</td></tr><tr><td>AutoCode4Math-Qwen2.5</td><td>★</td><td>89.12</td><td>71.4</td><td>51.69</td><td>32.6</td><td>22.6</td><td>45.18</td></tr><tr><td>Δ</td><td></td><td>4.24↑</td><td>11.0↑</td><td>6.24↑</td><td>2.23↑</td><td>9.4↑</td><td>5.8↑</td></tr></table>
200
+
201
+ susceptible to premature exploitation of the local policy space when learning AutoCode strategies. In sharp contrast, our proposed EM method facilitates a more guided exploration by sub-sampling trajectories according to the reference strategy (Sec. 3.2). This enables continuous performance (evidenced in Sec. 4.1) and mitigating the risk of converging to suboptimal local optima (Fig. 4).
202
+
203
+ Analysis on Code Integration Behaviors. We investigated the properties of the learned code integration strategies to gain deeper insights into the mechanisms behind our method's performance gains. Our central hypothesis posits that optimal code integration unlocks synergistic performance benefits by effectively combining the strengths of CoT and code executions. This synergy presents a "free lunch" scenario: a well-learned metacognitive tool-usage strategy can elevate overall performance, provided the model demonstrates competence in solving distinct subsets of queries using either CoT or code execution.
204
+
205
+ To empirically validate this "free lunch" principle and demonstrate the superiority of our approach in realizing it, we benchmarked our model against
206
+
207
+ bases that inherently support both code execution and Chain-of-Thought (CoT) reasoning: GPT-4, Mammoth-70B, and DeepseekMath-Instruct-7B. Our analysis evaluated the model's autonomous decision to invoke code when not explicitly instructed on which strategy to employ. We compared this "AutoCode" performance against scenarios where models were explicitly prompted to utilize either code or CoT reasoning. We also considered the theoretical "free lunch" upper bound – the accuracy achieved by combining the successful predictions from either strategy (i.e., taking the union of queries solved by CoT or code).
208
+
209
+ As visually presented in Figure 6, existing baseline models exhibit inferior performance in AutoCode mode compared to scenarios where code invocation is explicitly prompted, e.g., DeepseekMath-Instruct-7B shows a degradation of $11.54\%$ in AutoCode mode. This suggests that their AutoCode strategies are often suboptimal, performing closer to random selection between CoT and code (selection accuracy near $50\%$ ), resulting in AutoCode falling between the performance of explicitly triggered CoT and code. In contrast, our
210
+
211
+ ![](images/bcd8990eb16e21cf55cd37fafbda993cbe944087b6c3b4652a25a041fd026bed.jpg)
212
+ Figure 4: Training Efficiency and Convergence. We benchmark the learning dynamics of our approach against three two training paradigms: supervised fine-tuning and reinforcement learning (RL). The Pass@1 accuracy is evaluated on an held-out dev-set. We use Qwen-2.5-Base as the base model. SFT is conducted using collected public data (Toshniwal et al., 2024; Yue et al., 2023). The dashed lines indicate asymptotic performance.
213
+
214
+ ![](images/54835cf23939d78ecf5f7cf5924847b38188c7543e5500ce5ed56793def74b38.jpg)
215
+ Figure 5: Visualization of Exploration in the SFT+RL paradigm. The distribution of code invocation rates across queries to visualize policy's exploration of code-integrated trajectories. Without external guidance, LLM tends to exploit its local policy neighborhood, concentrating code usage toward extremes as training phase evolves.
216
+
217
+ models learn more effective code integration strategies. AutoCode4Math-Qwen2.5, for example, improves upon explicitly code-triggered performance by $7\%$ , indicating a true synergistic integration of reasoning and code execution.
218
+
219
+ To quantify the effectiveness of these learned "AutoCode" strategies, we calculated the CoT/ code selection accuracy. We used the outcome of explicit instruction (i.e., performance when explicitly prompted for CoT or code) as a proxy for the ground-truth optimal method selection. Our model achieves a selection accuracy of $89.53\%$ , showcasing the high efficacy of the learned code integration strategy.
220
+
221
+ ![](images/372c6277f951de44ac148d9f02df3586e5d27cf0b9d4d49b0666f71726633a7f.jpg)
222
+ Figure 6: Analysis of AutoCode Strategies. We compare AutoCode performance against scenarios where models explicitly prompted to utilize code or CoT, and consider the union of solved queries as the bound for AutoCode performance. Existing models show inferior AutoCode performance than explicit instructed, with their AutoCode strategies close to random (50%). Our approach consistently improves AutoCode performance, with AutoCode selection accuracy near 90%.
223
+
224
+ # 5 Related Work and Discussion
225
+
226
+ Tool-Integrated Math LLMs. Math language models adopted two major paradigms: Chain-of-Thought (CoT) reasoning and the use of external tools, such as Python programs (Yu et al., 2023; Yue et al., 2023; Toshniwal et al., 2024). Each paradigm offers unique benefits, and recent hybrid frameworks (Yue et al., 2023; Gou et al., 2023; Li, 2024; Shao et al., 2024; Yang et al., 2024b) increasingly seek to combine them for synergy. However, current models exhibit critical rigidity, motivating
227
+
228
+ our work to realize the true metacognitive capacity that enjoys synergistic benefits of CoT and code.
229
+
230
+ EM for RL. Expectation-Maximization (EM) has proven effective for maximum likelihood problems involving hidden variables, such as Expert Iteration (Anthony et al., 2017), Iterative Maximum Likelihood (Wu, 2016; Agarwal et al., 2019), MetaReinforcement Learning (Zintgraf et al., 2019; Wang et al., 2020), and Adversarial Games (Wang et al., 2023). In the context of math LLMs, the most relevant works are (Singh et al., 2023) and (Ni et al., 2022), which apply EM-style iterative self-training to math problem-solving. Unlike these approaches, we leverage the EM framework for guided exploration during reinforcement learning of language models.
231
+
232
+ # 6 Conclusion
233
+
234
+ Existing tool-integrated math language models lack the metacognitive capacity to effectively determine code integration, hindering their ability to fully realize the synergistic benefits of tool integration and CoT. To address this critical gap, we propose a novel EM-based framework that combines guided exploration with policy optimization. Our experiments demonstrate the limitations of standard SFT and RL in efficiently exploring the combinatorial space of code-integrated trajectories and highlight the superior training efficiency and performance of our approach.
235
+
236
+ # 7 Limitations
237
+
238
+ The scope of our work is primarily focused on mathematical problem-solving. While we observe promising results on challenging benchmarks like MATH500, the generalizability of our approach to other domains requiring the metacognitive capacity of tool integration and CoT, such as scientific reasoning or code generation for general-purpose tasks, remains to be explored. Future work should investigate the effectiveness of our framework across a wider range of tasks and domains.
239
+
240
+ # References
241
+
242
+ Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
243
+ Rishabh Agarwal, Chen Liang, Dale Schuurmans, and Mohammad Norouzi. 2019. Learning to generalize
244
+
245
+ from sparse and underspecified rewards. In International conference on machine learning, pages 130-140. PMLR.
246
+ Mistral AI. 2024. Mathstral.
247
+ Thomas Anthony, Zheng Tian, and David Barber. 2017. Thinking fast and slow with deep learning and tree search. Advances in neural information processing systems, 30.
248
+ Anthropic. 2024. The claude 3 model family: Opus, sonnet, haiku.
249
+ Christopher M Bishop and Nasser M Nasrabadi. 2006. Pattern recognition and machine learning, volume 4. Springer.
250
+ Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde De Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374.
251
+ Zui Chen, Yezeng Chen, Jiaqi Han, Zhijie Huang, Ji Qi, and Yi Zhou. 2024. An empirical study of data ability boundary in llms' math reasoning. Preprint, arXiv:2403.00799.
252
+ Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168.
253
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
254
+ Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023. Pal: Program-aided language models. In International Conference on Machine Learning, pages 10764–10799. PMLR.
255
+ Zhibin Gou, Zhihong Shao, Yeyun Gong, Yujiu Yang, Minlie Huang, Nan Duan, Weizhu Chen, et al. 2023. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452.
256
+ Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948.
257
+ Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008.
258
+
259
+ Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874.
260
+ Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720.
261
+ Jean Kaddour, Joshua Harris, Maximilian Mozes, Herb Bradley, Roberta Raileanu, and Robert McHardy. 2023. Challenges and applications of large language models. arXiv preprint arXiv:2307.10169.
262
+ Jia Li, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Huang, Kashif Rasul, Longhui Yu, Albert Q Jiang, Ziju Shen, et al. 2024. Numinamath: The largest public dataset in ai4maths with 860k pairs of competition math problems and solutions.
263
+ Long Li. 2024. How do humans write code? large models do it the same way too. arXiv preprint arXiv:2402.15729.
264
+ Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. 2024. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190.
265
+ Ansong Ni, Jeevana Priya Inala, Chenglong Wang, Oleksandr Polozov, Christopher Meek, Dragomir Radev, and Jianfeng Gao. 2022. Learning math reasoning from self-sampled correct and partially-correct solutions. arXiv preprint arXiv:2205.14318.
266
+ John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347.
267
+ Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, YK Li, Yu Wu, and Daya Guo. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300.
268
+ Avi Singh, John D Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Peter J Liu, James Harrison, Jaehoon Lee, Kelvin Xu, Aaron Parisi, et al. 2023. Beyond human data: Scaling self-training for problem-solving with language models. arXiv preprint arXiv:2312.06585.
269
+ Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. 2023. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805.
270
+
271
+ Yuxuan Tong, Xiwen Zhang, Rui Wang, Ruidong Wu, and Junxian He. 2024. DART-math: Difficulty-aware rejection tuning for mathematical problemsolving. In The Thirty-eighth Annual Conference on Neural Information Processing Systems.
272
+ Shubham Toshniwal, Ivan Moshkov, Sean Narethiran, Daria Gitman, Fei Jia, and Igor Gitman. 2024. Openmathinstruct-1: A 1.8 million math instruction tuning dataset. arXiv preprint arXiv:2402.10176.
273
+ Haozhe Wang, Chao Du, Panyan Fang, Li He, Liang Wang, and Bo Zheng. 2023. Adversarial constrained bidding via minimax regret optimization with causality-aware reinforcement learning. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pages 2314-2325.
274
+ Haozhe Wang, Jiale Zhou, and Xuming He. 2020. Learning context-aware task reasoning for efficient meta reinforcement learning. In Proceedings of the 19th International Conference on Autonomous Agents and MultiAgent Systems, pages 1440-1448.
275
+ Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.
276
+ Yonghui Wu. 2016. Google's neural machine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144.
277
+ An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. 2024a. Qwen2 technical report. arXiv preprint arXiv:2407.10671.
278
+ An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024b. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115.
279
+ Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. 2023. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284.
280
+ Siyu Yuan, Zehui Chen, Zhiheng Xi, Junjie Ye, Zhengyin Du, and Jiecao Chen. 2025. Agent-r: Training language model agents to reflect via iterative self-training. arXiv preprint arXiv:2501.11425.
281
+ Xiang Yue, Xingwei Qu, Ge Zhang, Yao Fu, Wenhao Huang, Huan Sun, Yu Su, and Wenhu Chen. 2023. Mammoth: Building math generalist models through hybrid instruction tuning. arXiv preprint arXiv:2309.05653.
282
+ Luisa Zintgraf, Kyriacos Shiarlis, Maximilian Igl, Sebastian Schulze, Yarin Gal, Katja Hofmann, and Shimon Whiteson. 2019. Varibad: A very good method
283
+
284
+ for bayes-adaptive deep rl via meta-learning. arXiv preprint arXiv:1910.08348.
285
+
286
+ # A Appendix
287
+
288
+ # A.1 Derivation of the EM
289
+
290
+ We first model RL as a maximum likelihood problem. Specifically, we consider 'maximizing expected return' as 'maximizing the likelihood of observing a correct response', this is equivalent to maximizing the following log-likelihood,
291
+
292
+ $$
293
+ \begin{array}{l} \max _ {\theta} \log P (r = 1 | x _ {q}; \theta) \\ = \max _ {\theta} \log \sum_ {c} p _ {\theta} (c | x _ {q}) \sum_ {y _ {a}} p _ {\theta} (y _ {a} | x _ {q}, c) \\ \end{array}
294
+ $$
295
+
296
+ $$
297
+ \begin{array}{l} \cdot P (r = 1 | y _ {a}, c, x _ {q}) \\ \stackrel {\cdot} {=} \mathcal {J} (\theta), \\ \end{array}
298
+ $$
299
+
300
+ where we factorize the language model as $p_{\theta}(y_a|x_q) = \sum_c p_{\theta}(c|x_q)p_{\theta}(y_a|x_q,c)$ .
301
+
302
+ Since the variable $c$ is unobservable for lack of reliable supervision, we resort to the EM framework. Specifically, we treat $c$ as a hidden variable, and introduce a proposal distribution $s(c|x_q)$ which represents the belief of $c$ given the fixed LLM $\mathcal{M}_{\theta}$ and the data evidence. We have the following derivations, $\mathcal{J}(\theta) =$
303
+
304
+ $$
305
+ \begin{array}{l} \log \sum_ {c} s (c | x _ {q}) \cdot \frac {p _ {\theta} (\cdot | x _ {q})}{s (\cdot | x _ {q})} \\ \cdot \sum_ {y _ {a}} p _ {\theta} \left(y _ {a} \mid x _ {q}, c\right) \cdot P (r = 1 \mid y _ {a}, c, x _ {q}) \\ \geq \sum_ {c} s (c | x _ {q}) \\ \left[ \log \frac {p _ {\theta} (c | x _ {q}) \sum_ {y _ {a}} p _ {\theta} (y _ {a} | x _ {q} , c) P (r = 1 | y _ {a} , c , x _ {q})}{s (c | x _ {q})} \right] \\ = \mathbb {E} _ {s (c | x _ {q})} \left[ \log \frac {p _ {\theta} (c | x _ {q}) \cdot P (r = 1 | c , x _ {q} ; \theta)}{s (c | x _ {q})} \right] \\ \dot {=} \mathcal {J} _ {\mathrm {E L B O}} (s, \theta), \\ \end{array}
306
+ $$
307
+
308
+ where $P(r = 1|c, x_q; \theta)$ denotes the likelihood of generating correct responses given $(x_q, c)$ following the solution-generation policy $p_{\theta}(y_a|x_q, c)$ .
309
+
310
+ In the E-step, we are essentially minimizing a KL divergence,
311
+
312
+ $$
313
+ \max _ {s} \mathcal {J} _ {\mathrm {E L B O}} (s, \theta) = \min _ {s} \mathrm {D} _ {\mathrm {K L}} \left(s \left(c \mid x _ {q}\right) \mid s ^ {*} (c \mid x _ {q})\right), \tag {7}
314
+ $$
315
+
316
+ where the minimizer is $s^{*}(c|x_{q}) \propto p_{\theta}(c|x_{q}) \cdot P(r = 1|c,x_{q};\theta)$ . Intuitively, the belief over the methodology $c$ following a posterior update: it is based on the prior of the current model $p_{\theta}(c|x_q)$ and the likelihood of data $P(r = 1|c,x_q;\theta)$ . The
317
+
318
+ optimal methodology-selection strategy assigns higher probability to method $c$ if following the current LLM it has higher likelihood to generate correct responses or higher prior over it.
319
+
320
+ To compute the optimal strategy, we define $P(r = 1|c,x_q;\theta) = \frac{\exp(\alpha\cdot p_\theta(c|x_q)Q^\theta(x_q,c))}{Z(x_q)}$ as an energy-based distribution, where the negative energy function evaluates the expected return induced by the current solution-generation policy: $Q(x_{q},c;\theta) = \mathbb{E}_{p_{\theta}(y_{a}|x_{q},c)}[R(x_{q},y_{a})]$ , $\alpha >0$ is a temperature controlling the sharpness of the distribution. Then the minimizer can be computed by enumerating over $c$ .
321
+
322
+ In the M-step, we optimize $\max_{\theta} \mathcal{J}_{\mathrm{ELBO}}(s, \theta) =$
323
+
324
+ $$
325
+ \begin{array}{l} \max _ {\theta} \mathbb {E} _ {s (c | x _ {q})} [ \log P (r = 1 | c, x _ {q}; \theta) ] \\ - \mathrm {D} _ {\mathrm {K L}} (s (c | x _ {q}) | p _ {\theta} (c | x _ {q})) \\ = \max _ {\theta} \mathbb {E} _ {s (c | x _ {q})} [ Q (x _ {q}, c; \theta) ] \\ - \mathrm {D} _ {\mathrm {K L}} (s (c | x _ {q}) | p _ {\theta} (c | x _ {q})) \\ \end{array}
326
+ $$
327
+
328
+ which maximizes the expected return while imitating the optimal strategy.
329
+
330
+ # A.2 Distribution of Multi-round
331
+
332
+ We show the distribution of multi-round responses of the model AutoCode4Math-Deepseek in Tab. 3.
333
+
334
+ # B Examples
335
+
336
+ # B.1 Success Cases
337
+
338
+ Listing 1: Success case for multi-round.
339
+
340
+ Round 1:
341
+
342
+ Question: Henry and 3 of his friends order 7 pizzas for lunch. Each pizza is cut into 8 slices. If Henry and his friends want to share the pizzas equally, how many slices can each of them have?
343
+
344
+ Solution:
345
+
346
+ There are 7 pizzas in total, and each pizza is cut into 8 slices. So, there are $7 \times 8 = 56$ slices in total.
347
+
348
+ Henry and his 3 friends want to share the pizzas equally, so they will each have $56 / 4 = 14$ slices.
349
+
350
+ Each of them can have 14 slices.
351
+
352
+ The answer is $\boxed{14}$.
353
+
354
+ Table 3: Distribution of Multi-Round Responses.
355
+
356
+ <table><tr><td>Model</td><td>Dataset</td><td>Queries</td><td>Round1</td><td>Round2</td><td>Round3</td></tr><tr><td rowspan="2">Deepseek-math-AutoCode</td><td>MATH</td><td>5000</td><td>1252</td><td>3719</td><td>29</td></tr><tr><td>GSM8k</td><td>1319</td><td>1013</td><td>304</td><td>2</td></tr></table>
357
+
358
+ Round2:
359
+
360
+ Question: Henry and 3 of his friends order 7 pizzas for lunch. Each pizza is cut into 8 slices. If Henry and his friends want to share the pizzas equally, how many slices can each of them have?
361
+
362
+ Solution:
363
+
364
+ ``python
365
+
366
+ def pizza_slices_per_person():
367
+
368
+ ""Henry and 3 of his friends order 7 pizzas for lunch. Each pizza is cut into 8 slices. If Henry and his friends want to share the pizzas equally, how many slices can each of them have?"""
369
+
370
+ total_pizzas = 7
371
+
372
+ slices_per_pizza = 8
373
+
374
+ total_slices = total_pizzas * slices_per_pizza
375
+
376
+ people = 4 # Henry and his 3 friends
377
+
378
+ slices_per_person = total_slices // people # Use floor division to get an integer result
379
+
380
+ return slices_per_person
381
+
382
+ slices_per_person = pizza_slices_per_person()
383
+
384
+ print(slices_per_person)
385
+
386
+ output
387
+
388
+ 14
389
+
390
+ ··
391
+
392
+ Each of them can have \(\)\\boxed{14}$ slices.
393
+
394
+ Round3:
395
+
396
+ Question: Peter has 4 boxes with the same number of chocolate bars in each, while Martha has 7 boxes with the same number of chocolate bars in
397
+
398
+ each. If Peter and Martha have totals of 64 and 56 chocolate bars respectively, how many more chocolate bars does Peter have in each box than Martha?
399
+
400
+ Solution:
401
+
402
+ If Peter has 4 boxes and Martha has 7 boxes, then Martha has $\frac{7}{4} = 1.75$ times as many boxes as Peter.
403
+
404
+ If Martha has 56 chocolate bars and Peter has 64 chocolate bars, then Peter has $64 / 56 = 1.142857$ times as many chocolate bars as Martha.
405
+
406
+ If Martha has 1.75 times as many boxes as Peter, and Peter has 1.142857 times as many chocolate bars as Martha, then Martha has $\frac{1.75}{1.142857} = 1.5$ times as many chocolate bars in each box as Peter.
407
+
408
+ Therefore, Peter has $1.5 - 1 = 0.5$ more chocolate bars in each box than Martha.
409
+
410
+ The answer is $\mathbb{S}\backslash$ boxed{0.5}$.
411
+
412
+ Sorry, the above solution may be incorrect. It's better to write a program.
413
+
414
+ ``python
415
+
416
+ def chocolate-bars_diffrence():
417
+
418
+ """Peter has 4 boxes with the same number of chocolate bars in each, while Martha has 7 boxes with the same number of chocolate bars in each. If Peter and Martha have totals of 64 and 56 chocolate bars respectively, how many more chocolate bars does Peter have in each box than Martha?"
419
+
420
+ peter_total = 64
421
+
422
+ martha_total = 56
423
+
424
+ peterboxes $= 4$
425
+
426
+ marthaboxes $= 7$
427
+
428
+ peter-bars_per_box = peter_total / peterboxes
429
+
430
+ martha-bars_per_box = martha_total / marthaboxes
431
+
432
+ difference $=$ peter-bars_per_box - martha-bars_per_box
433
+
434
+ return difference
435
+
436
+ difference = chocolate-bars_diffERENCE()
437
+
438
+ print(difference)
439
+
440
+ ---output
441
+
442
+ 8.0
443
+
444
+ #
445
+
446
+ Peter has \(\backslash\) boxed{8}\( more chocolate bars in each box than Martha.
447
+
448
+ # B.2 Failure Cases
449
+
450
+ Listing 2: Failure case for multi-round.
451
+
452
+ Round 1:
453
+
454
+ Question: The solution to the inequality
455
+
456
+ [ \frac{\text{frac}[\text{x} + \text{c}]{\text{x}^2 + \text{ax} + \text{b}}] \leqslant 0 \, \text{is}}{\text{x} \, \text{in} \, (-\text{infty}, -1) \, \text{cup} \, [1,2).} ]
457
+ [ \text{Find } \text{a} + \text{b} + \text{c}. ]
458
+
459
+ Solution:
460
+
461
+ Let's reason step by step.
462
+
463
+ The inequality is equivalent to
464
+
465
+ [(x + c)(x^2 + ax + b) \leq 0.] Since the roots of $x^2 + ax + b$ are $-1$ and $1, \mathbb{S}$ we can write
466
+
467
+ [ \text{[} x^2 + ax + b = (x + 1)(x - 1) = x^2 - 1. ] Then
468
+
469
+ [(x + c)(x^2 + ax + b) = (x + c)(x^2 - 1) = (x + c)(x + 1)(x - 1).]The inequality is satisfied when $x \in (-\infty, -1)\ \cup \{1, 2\}, $ so
470
+
471
+ [ \left( (x + c)(x + 1)(x - 1) \right) ] when $x$ in $-\infty, -1$ $\int_{-}^{+\infty}$ cup [1,2]. This means that one of $x + c,$ $x + 1,$ or $x - 1$ is nonpositive, and the other two are nonnegative.
472
+
473
+ If $x + c \le 0, $ then $x \le -c. $ Since $x \in [1,2), $ we must have $-c \ge 2, $ or $c \le -2. $
474
+
475
+ If $x - 1 \leq 0, $ then $x \leq 1. $ Since $x \in (-\infty, -1), $ we must have $ -1 \leq 1, $ which is always true.
476
+
477
+ If $x + 1 \leq 0, $ then $x \leq -1. $ Since $x \in [1,2), $ we must have $-1 \leq 1, $ which is always true.
478
+
479
+ Therefore, the solution is $x \infty (-\infty) \infty in (-\infty) \infty in [1,2).$ Since $x \infty in [1,2),$ we must have $-c \geq 2,$ or $c \leq -2.$
480
+
481
+ Hence, $a + b + c = 0 - 1 + (-2) = \boxed{b o x e d\{-3\}}.$
482
+
483
+ The answer is $\\boxed{-3}$.
484
+
485
+ Round2:
486
+
487
+ Question: The girls are trying to raise money for a carnival. Kim raises
488
+
489
+ $320 more than Alexandra, who raises$ 430, and Maryam raises $400 more than Sarah, who raises $300. How much money, in dollars, did they all raise in total?
490
+
491
+ Solution:
492
+
493
+ ``python
494
+
495
+ def total-money_raised():
496
+
497
+ ""The girls are trying to raise money for a carnival. Kim raises $320 more than Alexandra, who raises $430, and Maryam raises $400 more than Sarah, who raises $300. How much money, in dollars, did they all raise in total?"""
498
+
499
+ alexandra-money $= 430$
500
+
501
+ kim-money = alexandra-money + 320
502
+
503
+ sarah-money $= 300$
504
+
505
+ maryam-money = sarah-money + 400
506
+
507
+ total-money = alexandra-money + kim-money + sarah-money + maryam-money
508
+
509
+ return total-money
510
+
511
+ total-money $=$ total-money_raised() print(total-money)
512
+
513
+ ··
514
+
515
+ output
516
+
517
+ 2180
518
+
519
+ ··
520
+
521
+ The girls raised a total of \\boxed {2180}\\( dollars.
522
+
523
+ Round3:
524
+
525
+ Question: The digits from 1 to 6 are
526
+
527
+ ```txt
528
+ arranged to form a six-digit multiple of 5. What is the probability that the number is greater than 500,000? Express your answer as a common fraction.
529
+ ```
530
+
531
+ Solution:
532
+
533
+ Let's reason step by step.
534
+
535
+ ```txt
536
+ To form a six-digit multiple of 5, the last digit must be either 0 or 5.
537
+ ```
538
+
539
+ ```txt
540
+ Since the number must be greater than 500,000, the last digit must be 5.
541
+ ```
542
+
543
+ ```txt
544
+ The remaining five digits can be arranged in $5! $ ways.
545
+ ```
546
+
547
+ ```latex
548
+ Therefore, the probability is $\frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \frac{1}{2} \times \dots$.
549
+ \[ \left\{\begin{array}{l}5! \times 6! = \boxed{\text{boxed}} \\ 5! \times 6! = \boxed{\text{boxed}} \\ 5! \times 6! = \boxed{\text{boxed}} \\ 5! \times 6! = \boxed{\text{boxed}} \\ 5! \times 6! = \boxed{\text{boxed}} \\ 5! \times 6! = \boxed{\text{boxed}} \\ 5! \times 6! = \sqrt[3]{2} \end{array}\right.
550
+ ```
551
+
552
+ ```txt
553
+ The answer is $\boxed{\rm{frac{1}{6}}}\$\$\
554
+ ```
555
+
556
+ ```txt
557
+ Based on the above reasoning, we can verify by writing a program.
558
+ ```
559
+
560
+ ```txt
561
+ ``python
562
+ ```
563
+
564
+ ```python
565
+ from sympy import Rational
566
+ ```
567
+
568
+ ```txt
569
+ def probabilitygreaterthanfive_million ():
570
+ ```
571
+
572
+ ```txt
573
+ ""The digits from 1 to 6 are arranged to form a six-digit multiple of 5. What is the probability that the number is greater than 500,000? Express your answer as a common fraction :""
574
+ ```
575
+
576
+ ```txt
577
+ Total number of arrangements total Arrangements $= 6*5*4*3*2*1$
578
+ ```
579
+
580
+ ```txt
581
+ Number of arrangements that start with 5 or 6 valid Arrangements $= 2*5*4*3*2*1$
582
+ ```
583
+
584
+ ```txt
585
+ Number of arrangements that start with 1
586
+ invalid Arrangements_1 = 1*5*4*3*2*1
587
+ ```
588
+
589
+ ```txt
590
+ Number of arrangements that start with 2
591
+ invalid Arrangements_2 = 1*4*3*2*1
592
+ ```
593
+
594
+ ```txt
595
+ Number of arrangements that start with 3
596
+ ```
597
+
598
+ ```txt
599
+ invalid Arrangements_3 = 1*3*2*1
600
+ ```
601
+
602
+ ```txt
603
+ Total number of invalid arrangements
604
+ ```
605
+
606
+ ```txt
607
+ invalid Arrangements $=$ invalid Arrangements_1 $^+$ invalid Arrangements_2 $^+$ invalid Arrangements_3
608
+ ```
609
+
610
+ ```txt
611
+ Calculate the probability probability $=$ Rational( valid Arrangements, total arrangements)
612
+ ```
613
+
614
+ ```txt
615
+ return probability
616
+ ```
617
+
618
+ ```python
619
+ result = probabilitygreaterthanfive_million ()
620
+ ```
621
+
622
+ ```txt
623
+ print(result)
624
+ ```
625
+
626
+ ```txt
627
+ ··
628
+ ```
629
+
630
+ ```txt
631
+ ``output
632
+ ```
633
+
634
+ ```txt
635
+ 1/3
636
+ ```
637
+
638
+ ```txt
639
+ ··
640
+ ```
641
+
642
+ ```txt
643
+ The probability that the number is greater than 500,000 is \\boxed{\frac{1}{3}}.
644
+ ```
645
+
646
+ # B.3 AutoCode 4shots
647
+
648
+ Listing 3: Autocode 4shots example.
649
+
650
+ ```txt
651
+ User: Simplify \\sqrt{25000}\\(." Assistant:
652
+ ```
653
+
654
+ ```txt
655
+ Let's write a program.
656
+ ``python
657
+ ```
658
+
659
+ ```python
660
+ from sympy import sqrt
661
+ ```
662
+
663
+ ```python
664
+ def simplify_sqrt():
665
+ """Simplify $\\sqrt{25000}$.''
666
+ simplified_sqrt = sqrt(25000)
667
+ ```
668
+
669
+ ```txt
670
+ return simplified_sqrt
671
+ ```
672
+
673
+ ```txt
674
+ result $=$ simplify_sqrt() print(result)
675
+ ```
676
+
677
+ ```txt
678
+ User:The function $f(x)$ satisfies $\backslash [f(x + f(x)) = 4f(x)]$ for all $x,$ and $f(1) = 4.$ Find $f(21).$
679
+ ```
680
+
681
+ ```txt
682
+ Assistant:
683
+ ```
684
+
685
+ ```txt
686
+ Let's reason step by step.
687
+ ```
688
+
689
+ ```txt
690
+ We can start by plugging in $x = 1$ into the given equation. This gives us [f(1 + f(1)) = 4f(1). ]
691
+ ```
692
+
693
+ Since we know that $f(1) = 4$ , we can substitute this into the equation to get
694
+
695
+ [ \lfloor f(1 + 4) = 4 \, \text{cdot} \, 4. ]
696
+
697
+ Simplifying, we have
698
+
699
+ [ \backslash [f(5) = 16. \backslash ] ]
700
+
701
+ Next, we can plug in $x = 5$ into the given equation. This gives us
702
+
703
+ [ \lfloor f(5 + f(5)) = 4f(5). ]
704
+
705
+ Since we know that $f(5) = 16$ , we can substitute this into the equation to get
706
+
707
+ [ \lfloor f(5 + 16) = 4 \, \text{cdot} \, 16. ]
708
+
709
+ Simplifying, we have
710
+
711
+ [ \lfloor f(21) = 64. \rfloor ]
712
+
713
+ Therefore, the value of $f(21)$ is $\mathbb{S}\backslash$ boxed{64}$.
714
+
715
+ The answer is $ \backslash \boxed{64}$.
716
+
717
+ User:Find the matrix that corresponds to reflecting over the $x$-axis.
718
+
719
+ Assistant:
720
+
721
+ Let's reason step by step.
722
+
723
+ The reflection of a point $\mathbb{S}(\mathbf{x},\mathbf{y})\mathbf{\Phi}$ over the $\$ x$ -axis is $\mathbb{S}(\mathbf{x}, - \mathbf{y})\mathbf{\Phi}$ .
724
+
725
+ Thus, the matrix that corresponds to reflecting over the $x-$axis is $\boxed{ \begin{pmatrix} 1 & 0 \\ -1 & \end{pmatrix} }
726
+
727
+ The answer is $\boxed{\begin{pmatrix} \\ \end{pmatrix}1\&0\backslash 0\& -1\backslash end{pmatrix}}$.
728
+
729
+ User:If $a$ is three times $b$, $b$ istwice $c$, and $c$ is four times $d$, what is the value of $\frac{1}{2} \{a\backslash c\text{dot} c\}\{b\backslash c\text{dot} d\}$$?
730
+
731
+ Assistant:
732
+
733
+ Let's write a program.
734
+
735
+ ``python
736
+
737
+ from sympy import symbols, simplify
738
+
739
+ def ratio_of产品的():
740
+
741
+ a, b, c, d = symbols('a b c d')
742
+
743
+ Given relations
744
+
745
+ a = 3 * b
746
+
747
+ b = 2 * c
748
+
749
+ c = 4 * d
750
+
751
+ Substitute the relations into the expression
752
+
753
+ expr = a * c / (b * d)
754
+
755
+ expr $=$ expr.subs({a:3\*b,b:2\*c, c:4\*d})
756
+
757
+ Simplify the expression
758
+
759
+ simplified_expr = simplify(expr)
760
+
761
+ return simplified_expr
762
+
763
+ result = ratio_of产品的()
764
+
765
+ print(result)
766
+
767
+ ··
2502.00xxx/2502.00691/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08c52102432cf1440a09686a63a319cb67fb788c37685a2580a0f047f02eb3e2
3
+ size 573729
2502.00xxx/2502.00691/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00698/45c647af-889e-410e-b6ce-46c9f0c0f8b1_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00698/45c647af-889e-410e-b6ce-46c9f0c0f8b1_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00698/45c647af-889e-410e-b6ce-46c9f0c0f8b1_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b2f74a1b1b4eaeea64037a02e92bafd2da1a306df9c497fd4e2cbadd88762e
3
+ size 2172412
2502.00xxx/2502.00698/full.md ADDED
@@ -0,0 +1,797 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MM-IQ: Benchmarking Human-Like Abstraction and Reasoning in Multimodal Models
2
+
3
+ Huanqia Cai* Yijun Yang Winston Hu
4
+
5
+ Tencent Hunyuan Team
6
+
7
+ ![](images/6ac5d5b34f496b948bc48b304e96348a99fde82f12c6792cf3e6201f6cbdab1a.jpg)
8
+
9
+ <table><tr><td>Deepseek-vl-7b-chat</td><td>Gemini-1.5-pro-002</td><td>Qwen2.5-VL-7B-Instruct</td></tr><tr><td>GPT4o-2024-11-20</td><td>LLaVA-1.6-7B</td><td>Claude-3-S-sonnet</td></tr><tr><td>Qwen2-VL-728-Instruct</td><td>QVQ-72B-Preview</td><td>OpenA-03</td></tr><tr><td>Gemini-2.5-Pro</td><td>Claude-3-Sonnet-Thinking</td><td>Qwen2.5-VL-7B-Instruct (RL)</td></tr><tr><td>Human</td><td></td><td></td></tr></table>
10
+
11
+ ![](images/f2a09fb0348d524745009ca501b9c7182087cd39c69f769fd5160a30b311d034.jpg)
12
+
13
+ ![](images/3be8a52de841c401df1046259f0913541e37b73121c4ddff8d2760febcbf2241.jpg)
14
+ Figure 1: Left: Accuracy of large multimodal models vs. humans across eight reasoning paradigms of MM-IQ. Right: Visual examples of MM-IQ's reasoning paradigms (Detailed information can be found in Section 3.2).
15
+
16
+ ![](images/24b57012cf6894b85071b04d9aa77ab410a4e41752a767572498499517386a09.jpg)
17
+
18
+ ![](images/00a6757c58d541580603e94bbfe12e24ddad80e49dcaac3d92aa3e2aaa74656a.jpg)
19
+
20
+ ![](images/2c41997938f00a6d314eb95680b01c8289a70ae9d3916c12db9d394d3fe919d2.jpg)
21
+
22
+ ![](images/9f29d72712402192b6efb58affd8cfbad0b33a95931dd9b2b5ae4bbb0788f55e.jpg)
23
+
24
+ # Abstract
25
+
26
+ IQ testing has served as a foundational methodology for evaluating human cognitive capabilities, deliberately decoupling assessment from linguistic background, language proficiency, or domain-specific knowledge to isolate core competencies in abstraction and reasoning. Yet, artificial intelligence research currently lacks systematic benchmarks to quantify these critical cognitive capabilities in multimodal systems. To address this crucial gap, we propose MM-IQ, a comprehensive evaluation framework, which comprises a large-scale training set with 4,776 visual reasoning problems and 2,710 meticulously curated test items spanning 8 distinct reasoning paradigms. Through systematic evaluation of existing open-source and proprietary multimodal models, our benchmark reveals striking limitations: even state-of-the-art architectures achieve only marginally superior performance to random chance (33.17% vs. 25% baseline accuracy). This substantial performance chasm highlights the inadequacy of current multimodal models in approximating fundamental human reasoning capacities, underscoring the need for paradigm-shifting advancements to bridge this cognitive divide. Moreover, inspired by the recent surge of large reasoning models, we also release a multimodal reasoning model as the baseline that is trained via reinforcement learning with verifiable reward functions, reaching competitive performance to the state-of-the-art with a notably smaller model size.
27
+
28
+ $\oplus$ Homepage:acechq.github.io/MMIQ-benchmark/
29
+
30
+ # 1 Introduction
31
+
32
+ The rapid advancement of large multimodal models (LMMs) has intensified debates about their capacity for human-like abstraction and reasoning. While existing benchmarks evaluate specialized capabilities such as OCR, object localization, and medical image analysis [16, 34, 15], these task-specific metrics fail to quantify the critical cognitive dimensions in multimodal systems. This limitation mirrors a long-standing challenge in human cognitive assessment: early methods conflated domain knowledge with innate reasoning ability until IQ testing emerged to isolate core cognitive competencies through language- and knowledge-agnostic evaluations [25]. Inspired by this paradigm, we argue that multimodal intelligence evaluation should also similarly decouple linguistic proficiency and task-specific knowledge from the measurement of abstract reasoning capacities.
33
+
34
+ Abstract Visual Reasoning (AVR) offers a plausible solution to the above challenge. As shown in Figure 2, AVR problems usually contain visual puzzles with simple 2D/3D shapes. Solving these problems requires identifying and understanding the underlying abstract rules and generalizing them to novel configurations. Although there exists a wide range of AVR benchmarks, e.g., RAVEN [35], Bongard-LOGO [21], and SVRT [7], most of them have limited input modalities, reasoning paradigms, and restricted problem configurations, which can lead to biased evaluation results [30].
35
+
36
+ To this end, we propose MM-IQ, a comprehensive AVR benchmark comprising 2,710 meticulously curated test items spanning 8 distinct reasoning paradigms. Like human IQ tests, MM-IQ fully eliminates domain-specific and linguistic biases while systematically diversifying problem configurations to prevent pattern memorization, presenting striking challenges for LMMs: even state-of-the-art models achieve only $33.17\%$ accuracy, marginally exceeding random chance $(25\%)$ but far below human-level performance $(51.27\%)$ . This substantial performance chasm highlights the inadequacy of current LMMs in approximating fundamental human reasoning capacities, underscoring the need for paradigm-shifting advancements to bridge this cognitive divide. By applying IQ-testing principles to multimodal models, MM-IQ fills a critical gap in existing multimodal benchmarks, e.g., MMBench [15] and MMMU [34] that focus on broad task coverage rather than core reasoning abilities. Our results demonstrate that current architectures lack the intrinsic abstraction abilities necessary for human-like intelligence, shedding light on potential directions toward developing systems capable of genuine cognitive adaptation.
37
+
38
+ To facilitate further research and support the community in building and refining models with stronger abstract reasoning abilities, we purposely release a high-quality training set consisting of 4,776 question-answer pairs. Our preliminary experiments show that leveraging reinforcement learning (RL) on this training set can lead to notable performance improvement, suggesting that accurate, high-quality data and appropriately chosen training algorithms may help close the reasoning gap between current LMMs and humans.
39
+
40
+ # 2 Related Work
41
+
42
+ Following [19, 12, 18], all existing AVR benchmarks, including our MM-IQ, can be cataloged along three dimensions: input shape, problem configuration, and reasoning paradigm, as shown in Table 1. Input shape refers to the input forms of the objects in the given image, which contributes to evaluating models' cognition abilities of different shapes. Diverse problem configurations assess models' abstract reasoning capabilities across multi-dimensional aspects, including pattern recognition (Raven's Progressive Matrices [23]), analogical transfer ability (Visual Analogy [9]), discrimination ability (Odd-one-out [20]), extrapolation and generalization ability (Visual Extrapolation [32]), and numerical reasoning ability (Arithmetic Reasoning [37]), etc. MM-IQ's inclusion of diverse problem configurations ensures a thorough evaluation of multimodal models' abstract reasoning capabilities across various AVR problems. Reasoning paradigm is a more fine-grained category that evaluates LMMs' abstract reasoning capabilities, like logical deduction, temporal and spatial cognition, geometric, etc. It includes various reasoning paradigms such as temporal movement, spatial relationships, logical operations, and both 2D and 3D geometry, which are based on the internal forms, relationships, and numbers of objects in the given image. Existing benchmarks have only three paradigms on average except for MARVEL, which has five ones, but its quantity is relatively small. Although RAVEN [35], G-set [20], VAP [9], and DOPT [32] have more than 1,000 instances, all of their data are generated by computer programs, which lack diversity and complexity [6]. MM-IQ test set
43
+
44
+ <table><tr><td colspan="2"></td><td>RAVEN*</td><td>G-set*</td><td>VAP*</td><td>SVRT</td><td>DOPT*</td><td>ARC</td><td>MNS</td><td>IQTest</td><td>MARVEL</td><td>MM-IQ</td></tr><tr><td rowspan="3">Input Shape</td><td>Geometric</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Abstract</td><td></td><td></td><td></td><td>✓</td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Concrete Object</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>✓</td></tr><tr><td rowspan="6">Problem Configuration</td><td>Raven&#x27;s Progressive Matrices [23]</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Visual Analogy [9]</td><td></td><td></td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td>✓</td><td>✓</td></tr><tr><td>Odd-one-out [20]</td><td></td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td>✓</td><td></td><td>✓</td></tr><tr><td>Visual Extrapolation [32]</td><td></td><td></td><td></td><td></td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Arithmetic Reasoning [37]</td><td></td><td></td><td></td><td></td><td></td><td></td><td>✓</td><td>✓</td><td></td><td>✓</td></tr><tr><td>Visual Grouping</td><td></td><td></td><td></td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td>✓</td></tr><tr><td rowspan="8">Reasoning Paradigm</td><td>Temporal Movement</td><td>✓</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td></tr><tr><td>Spatial Relationship</td><td></td><td></td><td></td><td>✓</td><td></td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td></tr><tr><td>2D-Geometry</td><td></td><td></td><td></td><td>✓</td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>3D-Geometry</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Logical Operation</td><td>✓</td><td></td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td></td><td>✓</td></tr><tr><td>Concrete Object</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>✓</td></tr><tr><td>Visual Instruction</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>✓</td></tr><tr><td>mathematics</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td colspan="2">Dataset Size</td><td>14,000</td><td>1,500</td><td>100,000</td><td>23</td><td>95,200</td><td>600</td><td>-</td><td>228</td><td>770</td><td>2,710 + 4,776</td></tr></table>
45
+
46
+ Table 1: Comparison between our MM-IQ and related benchmarks: RAVEN* [35], G-set* [20], VAP* [9], SVRT [7], DOPT* [32], ARC [6], MNS [37], IQTest [17], MARVEL [12]. * denotes that the dataset is automatically produced through procedural content generation.
47
+
48
+ comprises a total of 2,710 meticulously selected problems, $3\mathrm{x}$ larger than MARVEL, and covers a diverse spectrum of 8 fine-grained reasoning paradigms.
49
+
50
+ Training Reasoning Models with RL. RL has recently emerged as a pivotal approach for incentivizing the reasoning capabilities of both large language models (LLMs) and large multimodal models (LMMs). Several representative studies have demonstrated that RL-based training strategies can significantly improve models' performance on complex reasoning tasks. For instance, DeepSeek-R1 [8] combines Group Relative Policy Optimization (GRPO) [24] and a verifiable, rule-based reward method to achieve substantial improvements in math and coding benchmarks. Kimi-1.5 [28] also observes a similar phenomenon that pure RL can train LLMs to solve complex reasoning tasks via long CoT context characterized by emergent reflection, backtracking search, and self-verification. In the multimodal domain, models such as Vision-R1 [10] and R1-V [5] have applied R1-style RL to specific downstream tasks, including geometric reasoning and object counting, exhibiting promising progress in reproducing RL's success for LMMs. These advancements underscore the growing importance of RL techniques in pushing the limit of abstract reasoning for unimodal and multimodal AI systems and highlight the necessity of comprehensive benchmarks like MM-IQ to evaluate and foster such progress.
51
+
52
+ # 3 Construction of MM-IQ
53
+
54
+ Three features distinguish MM-IQ from other existing benchmarks for LMMs: (1) MM-IQ adopts data from professional and authoritative examinations and performs rigorous quality control, which ensures its correctness and validity; (2) MM-IQ is a comprehensive AVR benchmark for evaluating the intelligence of LMMs, comprising a total of 2,710 problems and covering a diverse spectrum of 8 fine-grained reasoning paradigms; (3) MM-IQ also provides a high-quality training set containing 4,776 carefully selected problem-answer pairs to accelerate the future research toward an excellent multimodal reasoner like humans.
55
+
56
+ # 3.1 Data Collection
57
+
58
+ The collection of MM-IQ involves three stages. Initially, we examined existing AVR datasets [35, 20, 6, 21] and discovered that most of them are generated by hand-coded procedures. Although programmatic synthesis can produce substantial amounts of data, it often lacks the necessary diversity. Hence, we chose to collect AVR problems from existing resources. Following [14, 12, 36], we collected problems from publicly available questions of the National Civil Servants Examination of China. These problems are specifically designed to evaluate civil servant candidates' critical thinking and problem-solving skills, and they meet our criteria for both quantity and diversity. The collected data underwent a rigorous filtering process conducted by human annotators to eliminate any low-quality entries. The filtering principle is that the problems can be solved only by extracting and utilizing high-level abstract reasoning information based on visual inputs.
59
+
60
+ To create a systematic and comprehensive benchmark, we proceeded to categorize the data into different reasoning paradigms and further augmented underrepresented paradigms. Based on the
61
+
62
+ descriptions of collected problems, we classified them into the corresponding reasoning paradigms. Additionally, we identified the common attributes of each paradigm's problems, such as attributes and entity types, and supplemented those with fewer instances to ensure that each fine-grained attribute or entity type had sufficient problems.
63
+
64
+ The final stage involved a more thorough cleaning of the collected data through dedduplication and extraction of the final answers. We performed dedduplication in two ways. The first way was to employ the MD5 hashing algorithm to find the same images and remove them if their input text was the same. Secondly, we utilized the problems' corresponding information, where similar ones were considered suspected duplicates, and then reviewed by human annotators based on the input image and corresponding information to identify and eliminate duplications.
65
+
66
+ Additionally, the final answers were extracted by human annotators to facilitate efficient evaluation later. To further support the development of the open-source community, we also translated all content of questions and answers from Chinese to English based on GPT-4, resulting in a bilingual version of the dataset. All translations were verified by humans to ensure their correctness. Specifically, the data distribution of the reasoning paradigms is shown in the Figure 4, where concrete object and visual instruction are less than $2\%$ since they are rare in the existing data.
67
+
68
+ Based on the above data collection process, we constructed a comprehensive and well-curated evaluation set, which contains a total of 2,710 samples covering eight reasoning paradigms. Following a similar processing pipeline, we further constructed a training set. To mitigate the risk of test set leakage, we utilized the image encoder of CLIP-ViT-B32 [22] to extract image features for the training set and removed any images whose cosine similarity with those in the test set exceeded $97.5\%$ . As a result, the training set comprises 4,776 samples.
69
+
70
+ # 3.2 Reasoning Paradigms of MM-IQ
71
+
72
+ For simplicity and consistency, we follow MARVEL [12], a dataset evaluating LMMs' AVR ability but 3x smaller than ours, and extend its taxonomy to 8 categories, including logical operation, mathematics, 2D-geometry, 3D-geometry, visual instruction, temporal movement, spatial relationship, and concrete object. Notably, we merge mathematical and quantity categories from MARVEL's taxonomy into mathematics to align more closely with our taxonomy.
73
+
74
+ Logical Operation refers to the application of logical operators, such as AND (conjunction), OR (disjunction), XOR (exclusive disjunction), etc. This reasoning process involves observing and summarizing the abstract logical operations represented in the given graphics to derive general logical rules, which can then be applied to identify the required graphics. An example of reasoning involving the AND operation is shown in Figure 2.
75
+
76
+ 2D-Geometry encompasses two distinct categories. The first category involves understanding the attribute patterns of the provided 2D geometric graphics, such as symmetry, straightness, openness, and closure, and making analogies or extrapolations based on these attributes. The second category focuses on graphic splicing, which entails identifying a complete pattern that can be formed by assembling existing 2D geometric fragments. Together, these two types assess the capability of LMMs to perceive geometric shapes from both local and global perspectives. A visualized example of 2D-geometry reasoning concerning the symmetry property is shown in Figure 9 (see Appendix A.4 for details).
77
+
78
+ 3D-Geometry can be categorized into three categories. The first category assesses the capability of LMMs to perceive 3D geometry comprehensively by observing a polyhedron and identifying the required view from a specific direction. The second category is analogous to 2D graphic splicing, but it involves basic fragments and target objects that are three-dimensional in nature. The third category evaluates LMMs' comprehension of the interior structure of a 3D solid shape with the goal of identifying a cross-sectional view of the solid. An example of 3D-geometry reasoning for the specific directional view is shown in Figure 14 (see Appendix A.4 for details).
79
+
80
+ Visual Instruction employs visual cues such as points, lines, and arrows to highlight key areas necessary for solving visual puzzles. Unlike other reasoning paradigms, this approach allows test-takers to concentrate solely on these visual indicators rather than requiring a comprehensive observation of the entire panel. A visualized example of visual instruction reasoning with arrows is shown in Figure 10 (see Appendix A.4 for details).
81
+
82
+ Temporal Movement focuses on changes in position or movement, including translation, rotation, and flipping. This paradigm encompasses several problem configurations discussed in Section 2, including Raven's Progressive Matrices, Visual Analogy, and Visual Extrapolation. A visualized example of temporal movement reasoning involving rotation is shown in Figure 15 (see Appendix A.4 for details).
83
+
84
+ Spatial Relationship examines the static relative positional relationships among objects. This paradigm also encompasses various problem configurations, including Raven's Progressive Matrices, Visual Analogy, Visual Extrapolation and Visual Grouping. An example of spatial relationship reasoning is shown in Figure 11 (see Appendix A.4 for details).
85
+
86
+ Concrete Object involves real-world objects, such as vases, leaves, or animals, and requires LMMs to categorize these objects based on their characteristics, which may require external knowledge to solve. A visualized example of concrete object reasoning is shown in Figure 13 (see Appendix A.4 for details).
87
+
88
+ Mathematics evaluates LMMs' ability to reason about quantity, numbers, and arithmetic operations through visual inputs. This paradigm contains two types of tasks. The first type involves perceiving basic graphical elements, such as points, angles, lines, and planes, and applying arithmetic operations to these elements. The second type involves identifying an arithmetic expression that is satisfied by the numbers in the given images and determining the missing number based on the four fundamental operators: addition, subtraction, multiplication, and division. Examples of the two types of mathematics reasoning are shown in Figure 16 and Figure 12. The intersection point is the basic element used in Figure 16 (see Appendix A.4 for details).
89
+
90
+ ![](images/7598c3b7d62df79e88c7e78acf9e9fc4c1806160fc25ed41ffe3417b183b804b.jpg)
91
+ Prompt: Choose the most appropriate option from the given four choices to fill in the question mark, so that it presents a certain regularity:
92
+
93
+ ![](images/59c9285659fa373a64605bcef73d640f9c473a431240ba0981c8935ef5b5efb2.jpg)
94
+ A B C D
95
+ Figure 2: A visualized example of logical operation paradigm.
96
+
97
+ # LMM's Response:
98
+
99
+ The first two panels in each row have similar parts to the third panel.
100
+
101
+ In the first and second rows, the third panel is obtained by removing the differences and finding the similarities between the first two panels; the same rule is applied to the third row, and the small circle is obtained by removing the differences and finding the similarities between the first two panels. Only option C meets the requirements. Therefore, option C is selected.
102
+
103
+ Ground Truth: C
104
+
105
+ # 4 Experiments
106
+
107
+ # 4.1 Experimental Setup
108
+
109
+ RL-trained Baseline To set a baseline on the MM-IQ's training set, we choose the Qwen2.5-VL-7B-Instruct [3] as the base model and utilize the GRPO algorithm that is implemented by the EasyR1 framework [38]. The entire training process is conducted on a cluster of 32 H20 GPUs. We set the global batch size to 128 and the rollout batch size to 512 to ensure stable gradient estimation across 4 distributed nodes. The sampling temperature is set to 1.0, and the number of responses to each question is 14 in order to balance exploration and exploitation during training. The learning rate is fixed at 1.0e-6 by default. The maximum response length during training is constrained to 4,096. The model is trained for a total of 25 episodes. The reward function used in the training process is a weighted sum of two components: format (with a weight of 0.1) and accuracy (with a weight of 0.9). For both format and accuracy, a correct response (via regular expression checking) is assigned a value of 1, while an incorrect response is assigned a value of 0. Additionally, a KL penalty with a coefficient of 1.0e-2 is applied to regularize the training process. Other unspecified parameters are set to the default values provided by the EasyR1 framework.
110
+
111
+ Table 2: LMMs and Human Performance on MM-IQ (%). Abbreviations adopted: LO for Logical Operation; 2D-G for 2D-Geometry; 3D-G for 3D-Geometry; VI for Visual Instruction; TM for Temporal Movement; SR for Spatial Relationship; CO for Concrete Object. Qwen2.5-VL-7B-Instruct (RL) denotes our RL-trained baseline.
112
+
113
+ <table><tr><td>Model</td><td>Mean</td><td>LO</td><td>Math</td><td>2D-G</td><td>3D-G</td><td>VI</td><td>TM</td><td>SR</td><td>CO</td></tr><tr><td colspan="10">Open-Source LMMs</td></tr><tr><td>LLaVA-1.6-7B [13]</td><td>19.45</td><td>24.22</td><td>20.34</td><td>17.92</td><td>15.83</td><td>20.00</td><td>18.23</td><td>17.82</td><td>18.42</td></tr><tr><td>Deepseek-vl-7b-chat [4]</td><td>22.17</td><td>19.53</td><td>20.30</td><td>22.25</td><td>27.39</td><td>35.56</td><td>23.72</td><td>24.75</td><td>15.79</td></tr><tr><td>Qwen2-VL-72B-Instruct [31]</td><td>26.38</td><td>24.74</td><td>24.40</td><td>28.60</td><td>27.39</td><td>24.44</td><td>26.93</td><td>32.67</td><td>23.68</td></tr><tr><td>QVQ-72B-Preview [29]</td><td>26.94</td><td>28.91</td><td>25.59</td><td>29.23</td><td>26.38</td><td>26.67</td><td>25.43</td><td>22.77</td><td>34.21</td></tr><tr><td>Qwen2.5-VL-7B-Instruct [3]</td><td>25.90</td><td>25.95</td><td>24.46</td><td>23.56</td><td>29.14</td><td>25.53</td><td>27.47</td><td>27.96</td><td>26.31</td></tr><tr><td>Qwen2.5-VL-7B-Instruct (RL)</td><td>30.77</td><td>30.28</td><td>29.48</td><td>32.05</td><td>30.15</td><td>27.66</td><td>32.05</td><td>34.74</td><td>39.47</td></tr><tr><td colspan="10">Proprietary LMMs</td></tr><tr><td>GPT-4o [1]</td><td>26.87</td><td>25.52</td><td>25.70</td><td>28.32</td><td>27.64</td><td>26.67</td><td>25.69</td><td>27.72</td><td>50.00</td></tr><tr><td>Gemini-1.5-Pro-002 [27]</td><td>26.86</td><td>19.53</td><td>27.43</td><td>28.03</td><td>25.88</td><td>24.44</td><td>31.17</td><td>25.74</td><td>39.47</td></tr><tr><td>Claude-3.5-Sonnet [2]</td><td>27.49</td><td>23.41</td><td>29.48</td><td>26.60</td><td>24.37</td><td>35.56</td><td>25.69</td><td>27.72</td><td>42.11</td></tr><tr><td>Gemini-2.5-Pro [27]</td><td>31.23</td><td>33.33</td><td>32.15</td><td>30.96</td><td>28.79</td><td>36.17</td><td>27.23</td><td>34.74</td><td>42.10</td></tr><tr><td>Claude-3-7-Sonnet-Thinking [2]</td><td>31.55</td><td>32.57</td><td>30.88</td><td>33.69</td><td>29.62</td><td>31.91</td><td>28.43</td><td>35.59</td><td>57.89</td></tr><tr><td>OpenAI-o3 [11]</td><td>33.17</td><td>35.11</td><td>35.04</td><td>30.96</td><td>29.39</td><td>36.17</td><td>31.56</td><td>30.50</td><td>50.00</td></tr><tr><td>Human Performance</td><td>51.27</td><td>61.36</td><td>45.03</td><td>60.11</td><td>47.48</td><td>46.67</td><td>55.61</td><td>36.63</td><td>65.79</td></tr></table>
114
+
115
+ Evaluation We evaluated open-source and closed-source LMMs on the MM-IQ test set with zero-shot prompting and employed the same question prompt for all models. The few-shot prompting results will be included in the future version of MM-IQ since how to design appropriate multimodal prompts is still an open problem [33, 26]. For open-source LMMs, we selected widely used and state-of-the-art models, including QVQ-72B-Preview [29], Qwen2-VL-72B-Instruct [31], Deepseek-VL-7B-Chat [4], and LLaVA-1.6-7B [13]. For closed-source LMMs, we adopted o3[11], Claude-3-7-Sonnet-Thinking[2], Gemini-2.5-Pro[27], GPT-4o-2024-08-06 [1], Gemini-1.5-Pro002 [27], and Claude-3.5-Sonnet-2024-06-20 [2]. For a fair comparison, we employed the same settings and default hyperparameters for all LMMs (please refer to Table 4 in Appendix A.1 for more details). Each model generates a single response to each problem in the dataset. Notably, o3, Claude3-7-Sonnet-Thinking, Gemini-2.5-pro, and QVQ-72B-Preview are specifically designed for long CoT reasoning. The evaluation process of LMMs consists of three steps: (1) response generation, (2) answer extraction, and (3) accuracy calculation. We extract the final answer using regular expression (regex) matching. For example, the final answer will be extracted from the response "The correct answer is A." as "A". If there is no valid answer in the model's response, it will be considered incorrect.
116
+
117
+ # 4.2 Performance
118
+
119
+ Overall Performance: Table 2 presents the performance of human participants and a diverse set of LMMs on the MM-IQ benchmark. Across all evaluated models, there remains a substantial gap between LMMs and human-level performance, with humans achieving a mean accuracy of $51.27\%$ , while the best-performing LMM, o3, attains $33.17\%$ . This performance gap is consistent across most paradigms, particularly in which requiring logical reasoning, 2D/3D geometry, and concrete object recognition, where human accuracy is markedly higher.
120
+
121
+ Focusing on open-source LMMs, we observe that models employing long chain-of-thought (CoT) strategies, such as QVQ-72B-Preview, achieve the highest mean accuracy $(26.94\%)$ among their peers, with notable improvements in challenging categories like logical operation and mathematics. However, it is important to note that open-source models with short CoT, such as Qwen2-VL-72B-Instruct $(26.38\%)$ and Qwen2.5-VL-7B-Instruct $(25.90\%)$ , deliver performance that is comparable to, and in some cases even surpasses, several proprietary models that also use short CoT (e.g., GPT-4o at $26.87\%$ , Gemini-1.5-Pro-002 at $26.86\%$ ). This suggests that, in the absence of long CoT reasoning, the performance gap between open-source and proprietary LMMs is relatively small.
122
+
123
+ For all proprietary LMMs such as Gemini and o3, we observed a consistent phenomenon that models with long CoT outputs, such as Gemini-2.5-Pro (31.23%), Claude-3-7-Sonnet-Thinking (31.55%), and o3 (33.17%), outperform their short counterparts. The performance gains are particularly evident in paradigms that require advanced reasoning capabilities, such as logical operations and mathematics.
124
+
125
+ For instance, OpenAI-o3 achieves the highest accuracy in these paradigms. These results imply the effectiveness of long CoT in enhancing the abstraction and reasoning capabilities of LMMs, and indicate that more advances in this direction may help reduce the distance to human-level performance.
126
+
127
+ Performance of RL-trained Baseline: Figure 3 depicts the accuracy curve on the MM-IQ test set of our RL-trained baseline with respect to training steps. Notably, our baseline model demonstrates a significant improvement over its original version and other open-source LMMs, achieving performance comparable to Gemini-2.5-Pro, as shown in Table 2. The improvement is consistent across all reasoning paradigms, with the RL-trained baseline achieving the highest scores among open-source models in logical operation (30.28%), math (29.48%), 2D-geometry (32.05%), 3D-geometry (30.15%), visual instruction (27.66%), temporal movement (32.05%), spatial relationship (34.74%), and concrete object reasoning (39.47%). These results suggest that RL can effectively incentivize the reasoning capabilities of LMMs, even for relatively small models.
128
+
129
+ ![](images/11c70966f43b41543d729b4c9b8789dc00b70e39e1917672967c08ca9a53847b.jpg)
130
+ Figure 3: The test accuracy of Qwen2.5-VL-7B-Instruct on MM-IQ during RL training.
131
+
132
+ # 5 Empirical Analysis
133
+
134
+ # 5.1 What Improvements does Reinforcement Learning Bring?
135
+
136
+ The performance analysis of the RL-trained baseline reveals notable improvements compared to its original version, prompting us to investigate which capabilities have been enhanced by RL. Specifically, the paradigms that require more complex reasoning gain higher improvements, such as 2D-Geometry (from $23.56\%$ to $32.05\%$ ) and Spatial Relationship (from $27.96\%$ to $34.74\%$ ).
137
+
138
+ To understand the nature of these improvements, we examined specific instances where the RL-trained model outperformed its non-RL counterpart. One such example is illustrated in Figure 21 in Appendix A.6. This case demonstrates that the RL-trained model is better able to capture abstract rules within images and finer-grained relationships between objects, rather than merely focusing on superficial shape or positional information. This enhanced ability to abstract and reason allows the model to perform better overall, despite some remaining incorrect image descriptions and understandings. Across a wide range of such cases, we consistently observe that this enhanced pattern abstraction and reasoning capability is a key factor contributing to the overall performance gains, particularly in tasks that demand complex reasoning.
139
+
140
+ Additionally, the model has developed some reflective capabilities, although it may not always lead to correct analyses, as illustrated in Figure 22 in Appendix A.6. This suggests that while the current model size may constrain its capabilities, the emergence of long CoT reasoning is a promising direction for future work. Therefore, we plan to train larger models using RL with MM-IQ training set to explore the frontier of reasoning in LMMs.
141
+
142
+ # 5.2 Long CoT vs. Short CoT
143
+
144
+ As shown in Table 2, models using long CoT reasoning consistently outperform those with short CoT reasoning, especially for proprietary models, which raises a question: in which aspects do long CoT models demonstrate enhanced capabilities compared to their short CoT counterparts? To systematically investigate this improvement, we compare Gemini-1.5-Pro (short
145
+
146
+ Table 3: Comparison of Reasoning Chain Length (Average Token Count)
147
+
148
+ <table><tr><td>Model</td><td>Avg. Token Length</td></tr><tr><td>Gemini-1.5-Pro (Short CoT)</td><td>98</td></tr><tr><td>Gemini-2.5-Pro (Long CoT)</td><td>2130</td></tr></table>
149
+
150
+ CoT) and Gemini-2.5-Pro (long CoT) on the MM-IQ benchmark. Table 3 shows that the average token length of long CoT responses is over 20 times that of short CoT. Through a systematic analysis of a large number of model-generated examples, we observe that long CoT responses are not simply longer in length, but exhibit a fundamentally different reasoning process.
151
+
152
+ Specifically, long CoT models often adopt a combination of multiple thinking templates, including hypothesis generation, paradigm analysis, application, verification, etc. During this process, the model first proposes potential paradigms based on the input, then applies these hypotheses to the problem at hand, and finally verifies whether the resulting answer is consistent with all observed evidence. If inconsistencies are detected, the model revisits its previous steps, refines its hypotheses, and repeats the process until a satisfactory solution is found or the context length reaches a predefined threshold.
153
+
154
+ An example of this looped reasoning process is illustrated in Figure 19 (Appendix A.5), where the model not only identifies and follows human-like reasoning paradigms, but also actively checks the validity of the generated answer and makes necessary correction. In contrast, short CoT models typically provide a naive, single-pass response, lacking such a loop of self-correction and verification.
155
+
156
+ # 5.3 Failure Analysis of Non-Reasoning LMMs on MM-IQ
157
+
158
+ ![](images/1592291597819a44eec54998040548612fd69f9a816326d7332a9dc181662458.jpg)
159
+ Figure 4: Left: Distribution over different error types across three representative LMMs. Right: Quantitative distribution of reasoning paradigms in MM-IQ's test set.
160
+
161
+ ![](images/960702ee533e0b6d1050263c60e3b72693da222c79f2207d6bdea859c97fc655.jpg)
162
+
163
+ Table 2 demonstrates that the highest accuracy of non-reasoning LMMs (Claude-3.5-Sonnet: $27.49\%$ ) is almost equivalent to randomly guessing a correct answer among four options, which motivates us to ask: Does the top-performing non-reasoning LMM, e.g., Claude, actually possess the reasoning abilities required by AVR tasks? To investigate this, we selected three representative models: Claude-3.5-Sonnet, Qwen2-VL-72B-Instruct, and LLaVA-1.6-7B, and examined their generated wrong responses through human-in-the-loop evaluation. Specifically, we sampled a total of 90 predictions from each model for analysis. These 90 questions included 10 instances from each reasoning paradigm, as well as 20 from the Mathematical paradigm, given its larger proportion in the MM-IQ dataset $(35.1\%)$ .
164
+
165
+ Response Style and Structure. We first examined the average length and style of the generated responses. Compared to LLaVA-1.6-7B and Qwen2-VL-72B-Instruct, the best LMM Claude-3.5-Sonnet produces longer and structured answers. Moreover, Claude-3.5-Sonnet's responses begin with a detailed description of the given image and strategic problem-solving plans, followed by a discussion of each option to determine the correct answer. A visual example of Claude-3.5-Sonnet's
166
+
167
+ response is illustrated in Figure 17 in Appendix A.5. In contrast, LLaVA-1.6-7B and Qwen2-VL-72B-Instruct fail to generate such responses. These observations suggest that structured reasoning may enhance performance on MM-IQ.
168
+
169
+ Error Typology. We examine each wrong response and categorize them into three types: incorrect paradigm reasoning, incorrect final answers, and incorrect visual understanding, examples of which can be found in Figure 6, Figure 7, and Figure 8 in Appendix A.3. As shown in Figure 4, incorrect paradigm reasoning forms a major part of failures (32.3% on average). In these responses, we observe that LMMs often solve problems by using wrong rules or focusing on more superficial changes. These wrong rules include objects in the image becoming progressively more compact and complex. A corresponding visualized example is provided in Figure 6, where the red texts denote the incorrect reasoning due to the usage of infeasible rules.
170
+
171
+ We also observed that Qwen2-VL-72B-Instruct and LLaVA-1.6-7B frequently make errors by directly providing the final answer without any thinking. To further investigate whether the absence of thinking processes is a critical factor, we remove all responses' thinking processes and reevaluate their accuracy. For top-performing models, such as Qwen2-VL-72B-Instruct, only generating the final answer results in a performance drop of $4.7\%$ (from $26.9\%$ to $22.5\%$ ) on average. Conversely, for LLaVA-1.6-7B, it leads to an improvement of $2.8\%$ (from $19.4\%$ to $22.2\%$ ), implying that larger and stronger LMMs benefit more from long CoT reasoning.
172
+
173
+ A deeper analysis of visual understanding errors reveals that all three models struggle with paradigms involving complex visual content, such as logical operations, temporal movement, and spatial relationships (see Figure 5 in Appendix A.2). Moreover, there is an inverse correlation between a model's overall performance and the proportion of its visual understanding errors. For instance, Claude-3.5-Sonnet performs poorly on temporal movement and spatial relationship reasoning paradigms, and also performs worse on visual understanding of both paradigms. This underscores the necessity of enhancing the models' perceptual capacity to accurately interpret complex visual paradigms, thereby improving LMMs' reasoning capabilities. Due to limited space, more discussions can be found in Appendix A.2.
174
+
175
+ In summary, our failure analysis of LMMs on the MM-IQ dataset highlights several critical points for further research and improvement in multimodal abstract reasoning: 1) Structured Response Generation: Models like Claude-3.5-Sonnet, which produce longer and more structured responses, tend to perform better, suggesting that enhancing the ability to generate structured and detailed reasoning chains can improve accuracy. 2) Abstract Pattern Recognition: A significant portion of errors stems from incorrect reasoning due to reliance on simpler rules. Improving models' ability to identify and apply high-level abstract paradigms is essential. 3) Explanatory Vs. Concise Answers: The presence of detailed explanations can improve performance in stronger models but may not benefit weaker ones, highlighting the nuanced role of explanatory reasoning in model accuracy. 4) Visual Understanding: All models exhibit poor performance on complex visual paradigms, such as logical operations and spatial relationships, indicating a need for enhanced perceptual capabilities to accurately interpret intricate visual details. Addressing these challenges is crucial for advancing the reasoning capabilities of LMMs.
176
+
177
+ # 6 Conclusion
178
+
179
+ We propose MM-IQ, a comprehensive benchmark for evaluating the abstract visual reasoning of LMMs, which comprises a large-scale training set with 4,776 visual reasoning problems and 2,710 meticulously curated test items across 8 distinct reasoning paradigms, enabling a rigorous assessment of LMMs' abstraction and reasoning capabilities. Experimental results reveal striking limitations in current state-of-the-art LMMs, with the leading models achieving only slightly above the accuracy of random guessing, far behind human performance. We conduct a thorough failure analysis that identifies several key points for improvement, including structured reasoning, abstract pattern recognition, visual understanding, and inference-time scaling. MM-IQ is expected to complement existing multimodal benchmarks and provide a valuable resource for steering progress in multimodal research and promoting the advancements of AGI.
180
+
181
+ Limitation and broader impacts Due to limited resources, we cannot train larger models using RL on MM-IQ as strong baselines, and plan to extend this in future work. Regarding broader impacts of MM-IQ, it is a fundamental research project for AI and LMMs, and we anticipate no adverse societal consequences beyond those generally associated with other widely used datasets and benchmarks.
182
+
183
+ # References
184
+
185
+ [1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
186
+ [2] Anthropic. The Claude 3 Model Family: Opus, Sonnet, Haiku. https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf, 2024. Accessed: 2025-01-11.
187
+ [3] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.
188
+ [4] Xiao Bi, Deli Chen, Guanting Chen, Shanhuang Chen, Damai Dai, Chengqi Deng, Honghui Ding, Kai Dong, Qiushi Du, Zhe Fu, et al. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024.
189
+ [5] Hardy Chen, Haoqin Tu, Fali Wang, Hui Liu, Xianfeng Tang, Xinya Du, Yuyin Zhou, and Cihang Xie. Sft or r1? an early investigation into training r1-like reasoning large vision-language models. arXiv preprint arXiv:2504.11468, 2025.
190
+ [6] François Chollet. On the measure of intelligence. arXiv preprint arXiv:1911.01547, 2019.
191
+ [7] François Fleuret, Ting Li, Charles Dubout, Emma K Wampler, Steven Yantis, and Donald Geman. Comparing machines and humans on a visual categorization test. Proceedings of the National Academy of Sciences, 108(43):17621-17625, 2011.
192
+ [8] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.
193
+ [9] Felix Hill, Adam Santoro, David GT Barrett, Ari S Morcos, and Timothy Lillicrap. Learning to make analogies by contrasting abstract relational structure. arXiv preprint arXiv:1902.00120, 2019.
194
+ [10] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025.
195
+ [11] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.
196
+ [12] Yifan Jiang, Jiarui Zhang, Kexuan Sun, Zhivar Sourati, Kian Ahrabian, Kaixin Ma, Filip Ilievski, and Jay Pujara. Marvel: Multidimensional abstraction and reasoning through visual evaluation and learning. arXiv preprint arXiv:2404.13591, 2024.
197
+ [13] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024. URL https://llava-v1.github.io/blog/2024-01-30-llava-last/.
198
+ [14] Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning. arXiv preprint arXiv:2007.08124, 2020.
199
+ [15] Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In European conference on computer vision, pages 216-233. Springer, 2025.
200
+ [16] Yuliang Liu, Zhang Li, Biao Yang, Chunyuan Li, Xucheng Yin, Cheng-lin Liu, Lianwen Jin, and Xiang Bai. On the hidden mystery ofOCR in large multimodal models. arXiv preprint arXiv:2305.07895, 2023.
201
+
202
+ [17] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023.
203
+ [18] Mikołaj Małkinski and Jacek Mnandyuk. Deep learning methods for abstract visual reasoning: A survey on raven's progressive matrices. arXiv preprint arXiv:2201.12382, 2022.
204
+ [19] Mikołaj Małkiński and Jacek Mańdziuk. A review of emerging research directions in abstract visual reasoning. Information Fusion, 91:713-736, 2023.
205
+ [20] Jacek Mańdziuk and Adam Zychowski. Deepiq: A human-inspired ai system for solving iq test problems. In 2019 International Joint Conference on Neural Networks (IJCNN), pages 1-8. IEEE, 2019.
206
+ [21] Weili Nie, Zhiding Yu, Lei Mao, Ankit B Patel, Yuke Zhu, and Anima Anandkumar. Bongardlogo: A new benchmark for human-level concept learning and reasoning. Advances in Neural Information Processing Systems, 33:16468-16480, 2020.
207
+ [22] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021.
208
+ [23] Jean Raven. Raven progressive matrices. In Handbook of nonverbal assessment, pages 223-237. Springer, 2003.
209
+ [24] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.
210
+ [25] RE Snow. The topography of ability and learning correlations. Advances in the psychology of human intelligence/Erlbaum, 1984.
211
+ [26] Yan Tai, Weichen Fan, Zhao Zhang, and Ziwei Liu. Link-context learning for multimodal llms. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 27176-27185, 2024.
212
+ [27] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023.
213
+ [28] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025.
214
+ [29] Qwen Team. Qvq: To see the world with wisdom, December 2024. URL https://qwenlm.github.io/blog/qvq-72b-preview/.
215
+ [30] Han LJ Van der Maas, Lukas Snoek, and Claire E Stevenson. How much intelligence is there in artificial intelligence? a 2020 update. Intelligence, 87:101548, 2021.
216
+ [31] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024.
217
+ [32] Taylor Webb, Zachary Dulberg, Steven Frankland, Alexander Petrov, Randall O'Reilly, and Jonathan Cohen. Learning representations that support extrapolation. In International conference on machine learning, pages 10136-10146. PMLR, 2020.
218
+ [33] Shukang Yin, Chaoyou Fu, Sirui Zhao, Ke Li, Xing Sun, Tong Xu, and Enhong Chen. A survey on multimodal large language models. arXiv preprint arXiv:2306.13549, 2023.
219
+
220
+ [34] Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9556-9567, 2024.
221
+ [35] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu. Raven: A dataset for relational and analogical visual reasoning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5317-5327, 2019.
222
+ [36] Ge Zhang, Xinrun Du, Bei Chen, Yiming Liang, Tongxu Luo, Tianyu Zheng, Kang Zhu, Yuyang Cheng, Chunpu Xu, Shuyue Guo, et al. Cmmmu: A chinese massive multi-discipline multimodal understanding benchmark. arXiv preprint arXiv:2401.11944, 2024.
223
+ [37] Wenhe Zhang, Chi Zhang, Yixin Zhu, and Song-Chun Zhu. Machine number sense: A dataset of visual arithmetic problems for abstract and relational reasoning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 1332-1340, 2020.
224
+ [38] Yaowei Zheng, Junting Lu, Shenzhi Wang, Zhangchi Feng, Dongdong Kuang, and Yuwen Xiong. Easyr1: An efficient, scalable, multi-modality rl training framework. https://github.com/hiyouga/EasyR1, 2025.
225
+
226
+ # A Appendix
227
+
228
+ # A.1 Experimental Setup
229
+
230
+ Table 4: Generating parameters for various LMMs.
231
+
232
+ <table><tr><td>Model</td><td>Generation Setup</td></tr><tr><td>Claude-3.5-Sonnet-2024-06-20</td><td>temperature = 1.0, output_token_limit = 8,192, top_p = 1.0</td></tr><tr><td>GPT-4o-2024-08-06</td><td>temperature = 1.0, output_token_limit = 16,384, top_p = 1.0</td></tr><tr><td>Gemini-1.5-Pro-002</td><td>temperature = 1.0, output_token_limit = 8,192</td></tr><tr><td>DeepSeek-vl-7b-chat</td><td>temperature = 1.0, output_token_limit = 2,048, do_sample = False, top_p = 1.0</td></tr><tr><td>LLaVA-1.6-7B</td><td>temperature = 0, output_token_limit = 2,048</td></tr><tr><td>Qwen2-VL-72B-Instruct</td><td>temperature = 1.0, output_token_limit = 8,192, top_p = 0.001, top_k = 1, do_sample = True, repetition_penalty = 1.05</td></tr><tr><td>QVQ-72B-Preview</td><td>temperature = 0.01, output_token_limit = 8,192, top_p = 0.001, top_k = 1, do_sample = True, repetition_penalty = 1.0</td></tr><tr><td>Claude-3-7-Sonnet-Thinking-20250219</td><td>temperature = 1.0, output_token_limit = 20,000, top_p = 1.0</td></tr><tr><td>Gemini-2.5-pro-preview-03-25</td><td>temperature = 1.0, output_token_limit = 20,000, top_p = 0.95, top_k = 64</td></tr><tr><td>o3-2025-03-01-preview</td><td>temperature = 1.0, output_token_limit = 20,000, top_p = 1.0</td></tr></table>
233
+
234
+ # A.2 Analysis of Incorrect Visual Understanding
235
+
236
+ ![](images/277ebe0135988ee1ae278ed264b6fb377929798cc947e460cb55fcf9d242581f.jpg)
237
+ Figure 5: Proportions of incorrect visual understanding across eight reasoning paradigms.
238
+
239
+ A more granular analysis of visual understanding errors across different reasoning paradigms (see Figure 5) reveals several key patterns. For tasks involving logical operations, temporal movement, and spatial relationships, all three models exhibit a high proportion of visual understanding errors (up to $40\%$ for Claude-3.5-sonnet in these categories). This suggests that, for these complex paradigms, the primary bottleneck lies in the models' ability to accurately perceive and interpret visual information, rather than in subsequent reasoning steps.
240
+
241
+ In contrast, for mathematical and visual instruction paradigms, the proportion of visual understanding errors is much lower (typically below $20\%$ ), indicating that errors in these categories are more
242
+
243
+ likely due to reasoning or calculation failures rather than perceptual limitations. For concrete object recognition, all models show relatively low visual understanding error rates, reflecting their strong performance in basic object identification.
244
+
245
+ Comparing the three models, Claude-3.5-sonnet tends to have a higher proportion of visual understanding errors in the most challenging paradigms (such as logical operation, temporal movement, and spatial relationship), while Qwen2-VL-72B-Instruct and LLaVA-1.6-7B show a more balanced distribution of error types across paradigms. This highlights that, although some models may achieve higher overall accuracy, their performance in complex visual reasoning tasks is still constrained by perceptual limitations.
246
+
247
+ In summary, the analysis of error distributions and types reveals that visual understanding remains a major challenge for LMMs, especially in tasks involving abstract, dynamic, or relational visual reasoning. Addressing these weaknesses is crucial for advancing the overall reasoning capabilities of multimodal models.
248
+
249
+ # A.3 Examples of Three Error Typologies.
250
+
251
+ This section presents visualized examples of three error typologies-including incorrect paradigm reasoning, incorrect final answers, and incorrect visual understanding, which can be found in Figure 6, Figure 7, and Figure 8.
252
+
253
+ Prompt: Which of the following options conforms to the given pattern of changes in the figures?
254
+
255
+ ![](images/e109975d50fcce260855d13741d6773d70d9268464f0fc6c3439b44b219965f0.jpg)
256
+
257
+ ![](images/916fbdcdbc60a47509e73604a9e76444450d265bd61f2a0d34d37b0796eb197c.jpg)
258
+
259
+ ![](images/269e0a04142d885e4f61efdcce73d56bdb388ebbbf966cfd984a9a390d914a9a.jpg)
260
+
261
+ # LMM's Response:
262
+
263
+ To solve this problem, we need to analyze the pattern in the top row and then find which option in the bottom row follows that pattern.
264
+
265
+ In the top row, we see: 1. A circle inside a triangle inside a square 2. The same shapes, but the triangle is cut off on one side 3. The same shapes, but now the circle is cut off 4. A new pattern of intersecting squares 5. The same intersecting squares, but with an additional square added
266
+
267
+ The pattern seems to be adding complexity or elements as we move from left to right.
268
+
269
+ Now, looking at the options A, B, C, and D:
270
+
271
+ A: Shows two intersecting squares
272
+ B: Shows three intersecting squares
273
+ C: Shows four intersecting squares
274
+ D: Shows five intersecting squares
275
+
276
+ Following the pattern of increasing complexity and number of elements, the correct answer should be D. It shows the most complex arrangement with five intersecting squares, which follows the progression we see in the top row.
277
+
278
+ Therefore, the answer that best conforms to the given pattern is option D.
279
+
280
+ Final Answer: D $\times$
281
+
282
+ Ground Truth: C
283
+
284
+ Figure 6: An example of incorrect paradigm reasoning. The red parts are the incorrect reasoning due to wrongly recognizing simpler rules. The correct reasoning solution is that the intersection in the third panel is the sum of the intersections in the first two panels in the same box.
285
+
286
+ Prompt: Choose the only option from the four figures to fill in the question mark, so that it presents a certain regularity.
287
+
288
+ ![](images/3b4e094865464c77d563bd1a1928216457e4514fa825360daae7a976ebff0c2c.jpg)
289
+ Figure 7: An example of incorrect final answers.
290
+
291
+ LMM's Response:
292
+
293
+ The only option that presents a certain regularity is B.
294
+
295
+ Final Answer: B $\times$ Ground Truth: A
296
+
297
+ Prompt: Choose the most appropriate one from the given four options to present a certain regularity:
298
+
299
+ ![](images/e7cf30956ff0519ad7312e26495a4eb779deafc55054e6ab4132933a52828478.jpg)
300
+ Figure 8: An example of incorrect visual understanding. The red parts are the incorrect visual understanding.
301
+
302
+ ![](images/2996d159da9118d72445b759ef90b4a9559ad464b81529fe19b1a1a08526812d.jpg)
303
+
304
+ LMM's Response:
305
+
306
+ To solve this problem, we need to analyze the pattern in the given sequence of shapes and then determine which of the options A, B, C, or D best continues that pattern.
307
+
308
+ Let's examine the sequence:
309
+
310
+ 1. Star (5-pointed)
311
+ 2. Shape with two curved lines
312
+ 3. Shape with two straight lines forming an acute angle
313
+ 4. Shape with one curved line (quarter circle)
314
+ 5. Shape with one straight line
315
+
316
+ The pattern seems to be a progression from more complex shapes to simpler ones, reducing the number of lines or points: - From 5 points (star) to 2 lines, then to 1 line
317
+
318
+ - From curved lines to straight lines Following this pattern, the next shape should be even simpler, possibly just a single straight line or a single point.
319
+
320
+ Looking at the options:
321
+
322
+ A: Curved line
323
+ B: Straight line
324
+ C: Two intersecting straight lines
325
+ D: Single point
326
+
327
+ Option B (straight line) or D (single point) would be the most logical continuations of the pattern. However, since we've already seen a straight line in the sequence, the single point (Option D) represents a further simplification and reduction, making it the most appropriate choice to continue the observed pattern.
328
+
329
+ Therefore, the best answer is D.
330
+
331
+ Final Answer: D $\times$
332
+
333
+ Ground Truth: B
334
+
335
+ # A.4 Examples of Reasoning Paradigms of MM-IQ
336
+
337
+ This section presents visualized examples of various reasoning paradigms—including visual instruction, spatial relationship, mathematics, concrete object, 2D-geometry, 3D-geometry reasoning, and temporal movement. Each example (see Figures 10, 11, 12, 13, 9, 14, and 15) illustrates the corresponding paradigm to facilitate better understanding of the MM-IQ benchmark.
338
+
339
+ Prompt: The option that best fits the given pattern of figures is ( ).
340
+
341
+ ![](images/c5f6083482d747d79029ec03a73d88f8074e5b2126df212925db22dae8f4a93e.jpg)
342
+ Figure 9: A visualized example of 2D-geometry paradigm.
343
+
344
+ ![](images/1f167576be64ec44e9776da7457a404c479546079ffcea4b83a1f2a7418b5513.jpg)
345
+
346
+ # LMM's Response:
347
+
348
+ To determine the option that best fits the given pattern of figures, let's analyze the symmetry of each figure in the sequence:
349
+
350
+ The first panel is a cylinder, which has rotational symmetry around its vertical axis. The second panel is a circle with two vertical lines inside, which has vertical symmetry. The third panel is a rectangle inside a square, which have both vertical and horizontal symmetry. The fourth panel is a circle with two small circles inside, which has rotational symmetry around its center. Now, let's consider the options:
351
+
352
+ A. A trapezoid with an X inside, which has vertical symmetry.
353
+ B. A recycling symbol, which has no symmetry.
354
+ C. A cube, which has multiple planes of symmetry.
355
+ D. A spiral, which has no symmetry.
356
+
357
+ Based on the symmetry analysis, the option that best fits the pattern of figures is A, as it has rotational symmetry similar to the other figures in the sequence.
358
+
359
+ # Ground Truth: A
360
+
361
+ # A.5 Examples of Top-performing Models
362
+
363
+ This section showcases three examples from top-performing models, each illustrating a distinct CoT reasoning style. Figure 17 provides a representative short CoT response from Claude-3.5-Sonnet. In contrast, Figure 18 and Figure 20 present the comparison between Gemini-1.5-Pro (short CoT) and Gemini-2.5-Pro (long CoT).
364
+
365
+ # A.6 Case Study of RL-trained Baseline
366
+
367
+ In this section, we provide two representative cases for the RL-trained baseline. Figure 21 illustrates the difference in model responses before and after reinforcement learning. Figure 22 showcases how the model performs when equipped with a reflection mechanism.
368
+
369
+ Prompt: Choose the most appropriate option from the given four options to present a certain regularity:
370
+
371
+ ![](images/a1faa0391dbfeab465fde49d8adfea1b532c0c4a5b21b8ca5413dab535c56e7d.jpg)
372
+ Figure 10: A visualized example of visual instruction paradigm.
373
+
374
+ ![](images/8e44a0853ef280304e11dc406833eee0c60e4bf4c95981ebbfe4e9c4b2373f95.jpg)
375
+ Figure 11: A visualized example of spatial relationship paradigm.
376
+
377
+ # LMM's Response:
378
+
379
+ By looking at the images, we notice that each picture has arrows, so we focus on their direction. When we examine the figure in the question from left to right, we see that all the arrows point to acute angles. Among the four options, only option B meets the requirement that the arrows point to acute angles.
380
+
381
+ Therefore, option B is the correct choice.
382
+
383
+ # Ground Truth: B
384
+
385
+ Prompt: Choose the most appropriate option from the given four options to present a certain regularity:
386
+
387
+ ![](images/b853e8bafd99fc3b56221291ef4759a146edbbe133e50e046722a3d74c99734c.jpg)
388
+
389
+ # LMM's Response:
390
+
391
+ As can be seen from the figure, the number of elements is the same, but the styles are different, mainly indicating that the positions and methods of placing multiple elements vary. We can identify the pattern in the three figures from the left box and then apply this pattern to the right box. All the figures in the square box on the left are composed of solid circles, triangles, and quadrilaterals. The positions of the solid circles are upper inner, lower, and lower inner, respectively; the positions of the triangles are upper outer, upper inner, and upper, respectively; and the positions of the quadrilaterals are lower, upper outer, and lower outer, respectively. By applying this pattern to the square box on the right, the figure at the question mark should be a solid trapezoid in the lower inner position, a rhombus in the upper position, and a circle in the lower outer position, which corresponds to option D. Therefore, the correct option is option D.
392
+
393
+ # Ground Truth: D
394
+
395
+ Prompt: Choose the most appropriate option from the given choices:
396
+
397
+ A:4;B:5;C:6;D:7.
398
+
399
+ ![](images/53abd9fbdcc863fa4c415bb9bce26e23ca883d3ad817ac24335275285d43ddfc.jpg)
400
+ Figure 12: A visualized example of mathematics paradigm.
401
+
402
+ ![](images/cb2436df632627fb142c8f753bef593dbe74aa7adedc58cc83f6ec19f05f81e2.jpg)
403
+
404
+ ![](images/42a70f512cee374e4dae8b6edccb584af99fa45cb33ea365cdbea9c7efb2d5bd.jpg)
405
+
406
+ ![](images/6e2e17624130bb1d9d620c1dbb0af33ce8c1cf0d323204e4fefc704457f5f1c7.jpg)
407
+
408
+ # LMM's Response:
409
+
410
+ By observing the numbers in the figure, we notice the following patterns: $6 - 4 = 8 - 6$ , $5 - 7 = 1 - 3$ , and $3 - 8 = -3 - 2$ . The pattern is that the number in the bottom right corner minus the number in the top left corner equals the number in the bottom left corner minus the number in the top right corner.
411
+
412
+ Using this pattern, we can set up the equation: $? - 9 = 6 - 8$ . Solving for $?$ , we find that $? = 7$ .
413
+
414
+ Therefore, we choose option D.
415
+
416
+ # Ground Truth: D
417
+
418
+ Prompt: Choose the most appropriate option from the given four choices to fill in the question mark, so that it presents a certain regularity:
419
+
420
+ ![](images/5895f3e16493570e8138c92abee2841333e043c6b2aeb241a241adc5447d7fce.jpg)
421
+ Figure 13: A visualized example of concrete object paradigm.
422
+
423
+ ![](images/a5b98f690b91a495e45599858883499ab89501b74428a11a7dbd8efbf5ea9354.jpg)
424
+
425
+ # LMM's Response:
426
+
427
+ To solve this puzzle, we need to identify the pattern or regularity in the given grid.
428
+
429
+ Looking at the grid:
430
+
431
+ 1. The first row contains a hat, roller skates, and gloves, which are all wearable items.
432
+ 2. The second row contains a radio, a gramophone, and a television, which are all related to audio-visual media.
433
+ 3. The third row contains tools (screwdrivers), a stool, and the missing item.
434
+
435
+ The pattern seems to be categorization based on function or use. The third row appears to be related to household or utility items.
436
+
437
+ Given the options: - A: Fire extinguisher - B: Pills - C: Soccer ball - D: Padlock
438
+
439
+ The most appropriate choice to complete the pattern of household or utility items is the padlock (D).
440
+
441
+ # Ground Truth: D
442
+
443
+ Prompt: The one that matches the top view is:
444
+
445
+ ![](images/8f3139567b1ea35a548d76dcb679fc904d2f08dd060d81c86e651d5ccb844f80.jpg)
446
+
447
+ ![](images/153da9ea5dd20a960963a136880f352706705332dd8f7bc668ee12195f516e75.jpg)
448
+ Figure 14: A visualized example of 3D-geometry reasoning.
449
+
450
+ ![](images/f1664ba21b6fd89f42312e04116b3663286b7badd29228d4d5bb82b3cce03074.jpg)
451
+
452
+ ![](images/0d12a023c1bb866d10cb4b41c7bc06e8b39e9d86b9f68fcac27e131dabb17e3f.jpg)
453
+
454
+ ![](images/e31baa224f103e29271360b8ac6b2b919aa740ad8c1d09ed53ac17f163e1d55a.jpg)
455
+
456
+ # LMM's Response:
457
+
458
+ The top view is the view obtained by orthographic projection from the top of the object, also called the top view. Analysis options:
459
+
460
+ A: There is a hollow cylinder in the three-dimensional figure that is not reflected, so it is excluded;
461
+ B: The hollow cylinder in the three-dimensional figure is only partially reflected, which is incomplete, so it is excluded;
462
+ C: There is a hollow cylinder in the middle of the bottom of the three-dimensional figure. The visible lines are represented by solid lines, and the invisible but real lines are represented by dotted lines. This is correct;
463
+ D: When observing the top row of the three-dimensional figure from top to bottom, there should be only 3 solid line surfaces, so it is excluded.
464
+
465
+ Therefore, the final choice is option C.
466
+
467
+ # Ground Truth: C
468
+
469
+ Prompt: Choose the most appropriate option from the given four choices to fill in the question mark, so that it presents a certain regularity:
470
+
471
+ ![](images/a7b66b857e6e85ab98e7b18043d75b43cce88f01385e52adc6bd9de21b9f38f3.jpg)
472
+
473
+ ![](images/7992cfafca20e389fd3a4651f74d905e3217c36efd7b76374c010da4251a10b1.jpg)
474
+ Figure 15: A visualized example of temporal movement paradigm.
475
+
476
+ # LMM's Response:
477
+
478
+ By observing the images, we can see that in the first row, image 1 is rotated 90 degrees clockwise to produce image 2, and then image 2 is flipped to the right to create image 3. The second row follows the same pattern.
479
+
480
+ Applying this same logic to the third row, we flip image 2 to the right, which allows us to obtain option C.
481
+
482
+ Therefore, the final choice is option C.
483
+
484
+ # Ground Truth: C
485
+
486
+ Prompt: Choose the most appropriate option from the given four choices to fill in the question mark, so that it presents a certain regularity:
487
+
488
+ ![](images/c7396d19b2ee57c17614df71d6dd47aad302ba7e9a223db07854e31de9c4d209.jpg)
489
+
490
+ ![](images/ac3b9856250a02e9c70c397b16e7369c1ac1b482403d756e571b892808b0dd95.jpg)
491
+ Figure 16: A visualized example of mathematics paradigm.
492
+
493
+ # LMM's Response:
494
+
495
+ Based on the pattern observed, each figure in the sequence has 7 intersections. After analyzing the options, only option A meets this requirement.
496
+
497
+ Therefore, the correct answer is option A.
498
+
499
+ # Ground Truth: A
500
+
501
+ Prompt: The option that best fits the given pattern of figures is ( ).
502
+
503
+ ![](images/77b8d51c43652d97f71d9153a5547780501db5289fc1e4b94bb9b9b93033d9b6.jpg)
504
+ Figure 17: An example of Claude-3.5-Sonnet's prediction.
505
+
506
+ ![](images/a1ed34e6c49cb3815ac63789f42d1da7db1739675fd98b7be767767c9714df17.jpg)
507
+
508
+ # Response:
509
+
510
+ To solve this problem, we need to analyze the pattern in the given sequence of figures and then determine which of the options (A, B, C, or D) best continues that pattern. Looking at the sequence:
511
+
512
+ 1. We start with a simple line connecting two points.
513
+ 2. Then we have a triangle with 3 points.
514
+ 3. Next is a shape with 4 points, forming a square with crossed lines inside.
515
+ 4. The fourth shape has 5 points, forming an irregular pentagon.
516
+
517
+ The pattern seems to be increasing the number of points by one each time, while also increasing the complexity of the connections between these points.
518
+
519
+ Following this pattern, the next shape should have 6 points.
520
+
521
+ Looking at the options:
522
+
523
+ A. A star shape with 5 points - doesn't fit the pattern as it doesn't increase to 6 points.
524
+ B. A house-like shape with 6 points - fits the number of points, but the connections are simpler than the previous shapes.
525
+ C. A hexagon with interconnected points - this fits both the number of points (6) and the complexity of connections.
526
+ D. An irregular shape with 6 points - fits the number of points, but the connections don't follow the increasing complexity.
527
+
528
+ Therefore, the option that best fits the given pattern is C. It has 6 points, continuing the increasing point count, and its interconnected structure follows the pattern of increasing complexity in how the points are connected.
529
+
530
+ Final Answer: C
531
+
532
+ Prompt: Choose the most appropriate one from the given four options to present a certain regularity:
533
+
534
+ ![](images/054372b56366a50cb6f3c8ab904a12a31007ed14230bffccb9831985940e8317.jpg)
535
+ Figure 18: An example of short CoT vs. long CoT
536
+
537
+ Gemini-1.5-Pro's Response (Short CoT):
538
+
539
+ Here's how to solve this matrix reasoning puzzle:
540
+
541
+ 1. **Connections Change:**** Notice how the lines connecting the circles change with each step. They seem to rotate or shift.
542
+ 2. **Color Change:**** The black and white circles also change positions. There's a sort of flipping or swapping happening.
543
+ 3. **Apply the Pattern:** Look at the last complete example in the top row. If you follow the observed changes in connections and circle colors, option C best fits the next step in the sequence.
544
+
545
+ So the answer is C. $\times$
546
+
547
+ # Gemini-2.5-Pro's Response (Final Output of Long CoT):
548
+
549
+ Let's analyze the pattern in the given figures. Each figure contains a 2x2 grid of circles, some black and some white. There are also lines connecting pairs of circles. First, let's observe the color pattern of the circles in all given figures. In all figures (including the options): - Top row: Left circle is black, Right circle is white. - Bottom row: Left circle is black, Right circle is white. This color pattern is consistent across all figures. Now, let's examine the connections between the circles in each figure. In the first row: - Figure 1: Horizontal connections. The top-left circle is connected to the top-right, and the bottom-left circle is connected to the bottom-right. - Figure 2: Diagonal connections. The top-left circle is connected to the bottom-right, and the top-right circle is connected to the bottom-left. - Figure 3: Vertical connections. The top-left circle is connected to the bottom-left, and the top-right circle is connected to the bottom-right.
550
+
551
+ In the second row: - Figure 4: Vertical connections (same as Figure 3). - Figure 5: Diagonal connections (same as Figure 2). - Figure 6 (Question Mark): We need to find the pattern for this figure.
552
+
553
+ Looking at the sequence of connections, we can observe a cycle: Horizontal -> Diagonal -> Vertical.
554
+
555
+ In the first row, the sequence is Horizontal, Diagonal, Vertical. In the second row, the sequence starts with Vertical, Diagonal. Following the cycle, the next connection type should be Horizontal.
556
+
557
+ Now let's examine the options A, B, C, and D to see which one has horizontal connections. - Option A: Diagonal connection. - Option B: Vertical connection. - Option C: Diagonal connection. - Option D: Horizontal connection.
558
+
559
+ Option D is the only option with horizontal connections, which matches the expected pattern. Also, the color pattern in option D is correct (Top row: Black-White, Bottom row: Black-White).
560
+
561
+ Therefore, Option D is the figure that conforms to the given pattern of changes.
562
+
563
+ Final Answer: The final answer is $\boxed{D}$ .
564
+
565
+ # Ground Truth: D
566
+
567
+ The user is asking to find the next figure in a sequence, based on the pattern observed in the given figures.
568
+
569
+ Let's analyze the given figures.
570
+
571
+ Each figure has 4 circles, arranged in a $2 \times 2$ grid. Some circles are black and some are white.
572
+
573
+ There are also lines connecting the circles.
574
+
575
+ Let's analyze the first row of figures:
576
+
577
+ Figure 1:
578
+
579
+ Top row: Black-White
580
+
581
+ Bottom row: Black-White
582
+
583
+ Horizontal connections between adjacent circles in each row.
584
+
585
+ Figure 2:
586
+
587
+ Top row: Black-White
588
+
589
+ Bottom row: Black-White
590
+
591
+ Diagonal connections between circles. Top-left to bottom-right, and top-right to bottom-left.
592
+
593
+ Figure 3:
594
+
595
+ Top row: Black-White
596
+
597
+ Bottom row: Black-White
598
+
599
+ Vertical connections between circles. Top-left to bottom-left, and top-right to bottom-right.
600
+
601
+ Let's analyze the second row of figures (used to find the answer):
602
+
603
+ Figure 4:
604
+
605
+ Top row: Black-White
606
+
607
+ Bottom row: Black-White
608
+
609
+ Vertical connections between circles. Top-left to bottom-left, and top-right to bottom-right.
610
+
611
+ Figure 5:
612
+
613
+ Top row: Black-White
614
+
615
+ Bottom row: Black-White
616
+
617
+ Diagonal connections between circles. Top-left to bottom-right, and top-right to bottom-left.
618
+
619
+ Figure 6 (Question Mark): We need to find this figure.
620
+
621
+ The pattern in the first row seems to be:
622
+
623
+ Horizontal -> Diagonal -> Vertical
624
+
625
+ The pattern in the second row starts with:
626
+
627
+ Vertical -> Diagonal -> ?
628
+
629
+ If we assume the pattern is repeating Horizontal -> Diagonal -> Vertical, then the next one after Diagonal should be Horizontal.
630
+
631
+ Let's check the options and see if any of them have horizontal connections.
632
+
633
+ Option A: Diagonal connection, not horizontal.
634
+
635
+ Option B: Vertical connection, not horizontal.
636
+
637
+ Option C: Diagonal connection, not horizontal.
638
+
639
+ Option D: Horizontal connection.
640
+
641
+ Option D has horizontal connections, and the circle colors are also Black-White in each row.
642
+
643
+ # Let's re-examine the pattern.
644
+
645
+ It is possible the pattern is just cycling through connection types: Horizontal -> Diagonal -> Vertical -> Horizontal -> Diagonal -> Vertical ...
646
+
647
+ First row is Horizontal, Diagonal, Vertical. Second row is Vertical, Diagonal, then should be Horizontal.
648
+
649
+ Option D matches the expected pattern of Horizontal connections and the colors are also consistent with the first figures (Top: Black-White, Bottom: Black-White).
650
+
651
+ The colors in all the figures are consistent:
652
+
653
+ Top Row: Black-White
654
+
655
+ Bottom Row: Black-White
656
+
657
+ The connections are changing in a cycle: Horizontal -> Diagonal -> Vertical.
658
+
659
+ First row: 1. Horizontal 2. Diagonal 3. Vertical
660
+
661
+ Second row: 1. Vertical (Figure 4) 2. Diagonal (Figure 5) 3. Next must be Horizontal (Figure 6 - Question Mark)
662
+
663
+ Option D is the only option with Horizontal connections and the correct color pattern.
664
+
665
+ Figure 19: An example of thinking process of long CoT.
666
+
667
+ Prompt: Choose the most appropriate one from the given four options to present a certain regularity:
668
+
669
+ ![](images/83ac16b161a1eb1bd3a7c2ceae2cfbdc3ba84e56840611e2f48b6c4d09e48bbf.jpg)
670
+
671
+ Gemini-1.5-Pro's Response (Short CoT):
672
+
673
+ This puzzle operates on two separate rules simultaneously:
674
+
675
+ 1. **Circle Color:** Each row and column contains one pair of white circles, one pair of black circles, and one mixed pair (one white, one black).
676
+ 2. **Base Lines:** Each row and column contains one single line base, one double line base, and one triple line base.
677
+
678
+ The missing square needs a mixed pair of circles and a triple line base to satisfy both rules within its row and column.
679
+
680
+ This corresponds to answer $\mathrm{^{**}C^{**}.X}$
681
+
682
+ # Gemini-2.5-Pro's Response (Final Output of Long CoT):
683
+
684
+ **Pattern Analysis:** Let's analyze the pattern in rows and columns for two features: the base lines and the filled circles.
685
+
686
+ **1. Base Lines:** - **Rows:** - Row 1: Single line, Double line, Triple line - Row 2: Double line, Triple line, Single line - Row 3: Triple line, Single line, ? The number of lines in the base seems to follow a cyclic sequence of 1, 2, 3, shifting to the right in each row. Thus, the missing base should have **double lines**:
687
+
688
+ - **Columns:** - Column 1: Single line, Double line, Triple line - Column 2: Double line, Triple line, Single line - Column 3: Triple line, Single line, ? Similarly, the columns also follow the same cyclic sequence of 1, 2, 3, shifting downwards. Thus, the missing base should have **double lines**.
689
+
690
+ **2. Filled Circles:** Let's analyze the positions of the filled circles (Top or Bottom or Both or None) in each cell.
691
+
692
+ - **Rows:** - Row 1: (None filled), (Both filled), (Bottom filled) - Row 2: (Top filled), (None filled), (Both filled) - Row 3: (Both filled), (Bottom filled), (?) The position of filled circles seems to be shifting in the sequence: None -> Top -> Bottom -> Both, moving to the right in each row. Following this, after 'Bottom filled', the next in the sequence should be 'Top filled'. So, the missing image should have **Top circle filled**.
693
+
694
+ - **Columns:** - Column 1: (None filled), (Top filled), (Both filled) - Column 2: (Both filled), (None filled), (Bottom filled) - Column 3: (Bottom filled), (Both filled), (?) Similarly, the columns also follow the same shifting sequence: None -> Top -> Bottom -> Both, moving downwards in each column. Following this, after 'Both filled', the next in the sequence should be 'Top filled'. So, the missing image should have **Top circle filled**.
695
+
696
+ **Conclusion:** Based on both row-wise and column-wise pattern analysis, the missing image should have: - **Double base lines** - **Top circle filled and Bottom circle unfilled**
697
+
698
+ Looking at the options, option \*\*B\*\* matches these criteria.
699
+
700
+ **Option Analysis:** - **A**: Single base line, both circles unfilled. Incorrect base lines and filled circles. - **B**: Double base line, top circle filled, bottom circle unfilled. **Correct match.**: - **C**: Single base line, both circles filled. Incorrect base lines and filled circles. - **D**: Double base line, both circles filled. Incorrect filled circles.
701
+
702
+ Final Answer: \*\*B\*\*
703
+
704
+ Prompt: Divide the following six figures into two categories, so that each category of figures has its own characteristics or patterns. The correct classification is:
705
+
706
+ A: ①��⑤, ②③⑥ B: ①②⑥, ③④⑤ C: ①⑤⑥, ②③④ D: ①②③, ④⑤⑥
707
+
708
+ ![](images/64158b0b1971e5e01e1e3328aa49d27f43c83f32ca115cf8112c1d2a3534204c.jpg)
709
+
710
+ ![](images/acd1656c6c4d1b7e4f909385c85df3a63ff52431e0312e1473d64692a290aaab.jpg)
711
+
712
+ ![](images/338875358fe88347167374f835482ac01b96098a3679f2734416278cc5bc206a.jpg)
713
+
714
+ ![](images/ee93dd8314c541c47993764ffe39f24c6d0aba3e3d60a6caa8d4a803b742fd49.jpg)
715
+
716
+ ![](images/fa34db29a34f27b81821894c9c2e64afa39aed6aeb2a6d0d989059aed858fc62.jpg)
717
+
718
+ ![](images/dd509db40ce736c39050eb2dd03b9df2098306ba32acea6063fb0ae9a901b253.jpg)
719
+
720
+ # Qwen2.5-VL-7B-Instruct's Response:
721
+
722
+ To classify the figures into two categories, we need to identify a common characteristic or pattern that distinguishes one group from the other. Let's analyze each figure:
723
+
724
+ 1. Figure ①: This figure consists of a series of shapes that are all pointing upwards.
725
+ 2. Figure (2): This figure is a circle divided into four equal parts, each containing a triangle.
726
+ 3. Figure ③: This figure represents a cloud with raindrops falling from it.
727
+ 4. Figure ④: This figure is a hexagon with a shape inside it that resembles a "T".
728
+ 5. Figure ⑤: This figure is a circle divided into eight equal parts, each containing a cross.
729
+ 6. Figure ⑥: This figure is a rectangle with two arrows pointing to the right. Now, let's categorize them:
730
+ - Category 1: Figures with shapes pointing in a specific direction.
731
+ - Figure ①: Shapes pointing upwards.
732
+ - Figure ④: The "T" shape inside the hexagon points upwards.
733
+ - Figure (5): The cross in each part of the circle points upwards.
734
+ - Category 2: Figures with no specific direction or pattern of pointing.
735
+ - Figure ②: The triangles are not pointing in any specific direction.
736
+ - Figure ③: The raindrops are falling downwards, not pointing in a specific direction.
737
+ - Figure ⑥: The arrows are pointing to the right, not in a specific direction.
738
+
739
+ Based on this analysis, the correct classification is: $\boxed{A}\times$
740
+
741
+ # Qwen2.5-VL-7B-Instruct (RL)'s Response:
742
+
743
+ <think> To classify the figures, we need to identify a common characteristic or pattern that distinguishes one group from the other. Let's analyze each figure:
744
+
745
+ 1. Figure ①: It has a symmetrical pattern with a central axis.
746
+
747
+ 2. Figure ②: It is a circle divided into four equal parts, forming a symmetrical pattern.
748
+
749
+ 3. Figure (3): It is a cloud with rain, which is a natural scene.
750
+
751
+ 4. Figure ④: It is a stop sign, which is a traffic sign.
752
+
753
+ 5. Figure ⑤: It is a clock face, which is a time-telling device.
754
+
755
+ 6. Figure ⑥: It is a rectangular box with two arrows pointing in opposite directions. Now, let's group them:
756
+
757
+ - Group 1: Figures that are symmetrical or have a clear pattern. - (1) (symmetrical pattern with a central axis) - (2) (circle divided into four equal parts) - (3) (rectangular box with arrows, which can be seen as symmetrical in a certain way)
758
+
759
+ - Group 2: Figures that are not symmetrical or have a different kind of pattern. - (3) (cloud with rain) - (4) (stop sign) - (5) (clock face)
760
+
761
+ However, upon closer inspection, we can see that the classification in option B aligns well with the pattern of symmetry and non-symmetry: - Group 1: (symmetrical pattern with a central axis), (circle divided into four equal parts), (rectangular box with arrows)
762
+
763
+ - Group 2: (3) (cloud with rain), (4) (stop sign), (5) (clock face)
764
+
765
+ But since 6 is more about symmetry in a different way, it might be better to group it with 1 and 2.
766
+
767
+ Therefore, the correct classification is: B: ①②⑥, ③④⑤</think>
768
+
769
+ Prompt: The option that best fits the given pattern of figures is ( ).
770
+
771
+ ![](images/c5d3b450f964f33a895ec03deaa94b10ff58807019383cc08ab7e0a2bc179f4d.jpg)
772
+
773
+ # Response:
774
+
775
+ <think> To solve this problem, we need to identify the pattern in the circles. Let's analyze the first three circles:
776
+
777
+ 1. In the first circle, the numbers are 3, 6, 5, 2, and 4. The middle number (4) is obtained by adding the numbers on opposite sides: $3 + 5 = 8$ , $6 + 2 = 8$ , and $4 + 4 = 8$ . However, the middle number is 4, which suggests we need to look at the products of the numbers on opposite sides.
778
+ 2. In the second circle, the numbers are 15, 12, 4, 5, and 6. The middle number (6) is obtained by adding the products of the numbers on opposite sides: $15 * 5 = 75$ , $12 * 4 = 48$ , and $6 * 6 = 36$ . However, the middle number is 6, which suggests we need to look at the differences of the numbers on opposite sides.
779
+ 3. In the third circle, the numbers are 24, 6, 14, 5, and 7. The middle number (7) is obtained by adding the differences of the numbers on opposite sides: $24 - 14 = 10$ , $6 - 5 = 1$ , and $7 - 7 = 0$ . However, the middle number is 7, which suggests we need to look at the quotients of the numbers on opposite sides.
780
+ 4. In the fourth circle, the numbers are 1, 5, 4, 12, and ?. The middle number is obtained by adding the quotients of the numbers on opposite sides: $\frac{1}{4} = 0.25$ , $\frac{5}{12} = 0.4167$ , and $\frac{12}{5} = 2.4$ . However, the middle number is 5, which suggests we need to look at the sums of the numbers on opposite sides.
781
+
782
+ Given the pattern is not clear, let's re-evaluate the sums of the numbers on opposite sides:
783
+
784
+ - In the first circle: $3 + 5 = 8$ , $6 + 2 = 8$ , $4 + 4 = 8$ - In the second circle: $15 + 5 = 20$ , $12 + 4 = 16$ , $6 + 6 = 12$
785
+ - In the third circle: $24 + 14 = 38$ , $6 + 5 = 11$ , $7 + 7 = 14$ - In the fourth circle: $1 + 12 = 13$ , $5 + 4 = 9$ , $? + 5 = 5$ The pattern seems to be the sum of the numbers on opposite sides.
786
+
787
+ Therefore, the missing number is $5 - 5 = 0$ , but the options provided are not correct. Let's re-evaluate the pattern: The pattern seems to be the sum of the numbers on opposite sides.
788
+
789
+ Therefore, the missing number is $5 - 5 = 0$ , but the options provided are not correct. Let's re-evaluate the pattern: The pattern seems to be the sum of the numbers on opposite sides.
790
+
791
+ Therefore, the missing number is $5 - 5 = 0$ , but the options provided are not correct.
792
+
793
+ The correct answer is B: $8 < /$ think $\begin{array}{r}\boxed{B} \end{array}$
794
+
795
+ # Final Answer: B
796
+
797
+ Figure 22: An example of reflection from the RL-trained baseline. The red parts are the reflective response.
2502.00xxx/2502.00698/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86c6b2b9e992bb54c59840addd9c9306cf36e50def475dd824616a77e98ea82a
3
+ size 668137
2502.00xxx/2502.00698/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00722/597a9475-72e3-4fad-95d4-ebca0143adb7_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00722/597a9475-72e3-4fad-95d4-ebca0143adb7_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00722/597a9475-72e3-4fad-95d4-ebca0143adb7_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1cd25c00e1331f45f86be00e5bb1441b98cbba4524578e6c7dbc0b23b245779
3
+ size 3417849
2502.00xxx/2502.00722/full.md ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Demystifying Cost-Efficiency in LLM Serving over Heterogeneous GPUs
2
+
3
+ Youhe Jiang $^{*12}$ Fangcheng Fu $^{*3}$ Xiaozhe Yao $^{*4}$ Guoliang He $^{*1}$ Xupeng Miao $^{5}$ Ana Klimovic $^{4}$ Bin Cui $^{3}$ Binhang Yuan $^{2}$ Eiko Yoneki $^{1}$
4
+
5
+ # Abstract
6
+
7
+ Recent advancements in Large Language Models (LLMs) have led to increasingly diverse requests, accompanied with varying resource (compute and memory) demands to serve them. However, this in turn degrades the cost-efficiency of LLM serving as common practices primarily rely on homogeneous GPU resources. In response to this problem, this work conducts a thorough study about serving LLMs over heterogeneous GPU resources on cloud platforms. The rationale is that different GPU types exhibit distinct compute and memory characteristics, aligning well with the divergent resource demands of diverse requests. Particularly, through comprehensive benchmarking, we discover that the cost-efficiency of LLM serving can be substantially optimized by meticulously determining GPU composition, deployment configurations, and workload assignments. Subsequently, we design a scheduling algorithm via mixed-integer linear programming, aiming at deducing the most cost-efficient serving plan under the constraints of price budget and real-time GPU availability. Remarkably, our approach effectively outperforms homogeneous and heterogeneous baselines under a wide array of scenarios, covering diverse workload traces, varying GPU availabilities, and multi-model serving. This casts new light on more accessible and efficient LLM serving over heterogeneous cloud resources.
8
+
9
+ *Equal contribution $^{1}$ Department of Computer Science, University of Cambridge, Cambridgeshire, UK $^{2}$ Department of Computer Science and Engineering, The Hong Kong University of Science and Technology, Hong Kong, China $^{3}$ Department of Computer Science, Peking University, Beijing, China $^{4}$ Department of Computer Science, ETH Zurich, Zürich, Switzerland $^{5}$ Department of Computer Science, Purdue University, West Lafayette, Indiana, US. Correspondence to: Binhang Yuan <biyuan@ust.hk>, Eiko Yoneki <eiko.yoneki@cl.cam.ac.uk>.
10
+
11
+ Proceedings of the $42^{st}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
12
+
13
+ ![](images/50a5d12dfb70cb3bbd75e9168279d763658f0d63d1f3d438c1010a4b95163261.jpg)
14
+ Figure 1: The real-world workload traces from the Swiss AI Center comprise over 500,000 traces collected over one month. We categorize the workload types based on input and output token lengths (longer than 512 and 128 are characterized as long).
15
+
16
+ ![](images/fb2c5d2a10ff1ef3b8d581d24adf9d461859c8427c5fd8d75572c3aaa3803c1f.jpg)
17
+
18
+ # 1. Introduction
19
+
20
+ Large Language Models (LLMs), including GPT-4 (OpenAI, 2024), Gemini (Reid et al., 2024), Llama3 (Dubey et al., 2024), Claude (Anthropic, 2024), Mixtral (Jiang et al., 2024), and DeepSeek-V3 (Liu et al., 2024a), have demonstrated unprecedented performance across a wide range of real-world applications (GitHub, 2024; Jeon & Lee, 2023; Peng et al., 2023), such as chatbots, education, and healthcare, profoundly impacting human lives. In this context, enhancing the cost-efficiency of LLM serving is crucial for democratizing access to these cutting-edge technologies.
21
+
22
+ Currently, predominant practices utilize homogeneous GPU resources to deploy LLMs and serve the incoming requests (Li et al., 2023; Kwon et al., 2023; Agrawal et al., 2024b). However, with the broadening application domains, serving LLMs is facing progressively varying request patterns, driving the serving workloads dynamic and diverse—a phenomenon referred to as workload heterogeneity (Sun et al., 2024; Zhao et al., 2024b). This contradiction makes the use of homogeneous GPU resources unsuitable.
23
+
24
+ To be specific, the requests to be served have varying input and output token lengths, as exemplified by the real-world LLM serving traces at the Swiss AI Center shown in Figure 1. Such differences can exhibit significantly divergent resource (compute and memory) demands across different types of workloads, owing to the distinct characteristics of the two phases of inference—the prefetch phase is compute-
25
+
26
+ Table 1: GPU Specifications and Pricing
27
+
28
+ <table><tr><td>GPU Type</td><td>Peak FP16 FLOPS</td><td>Memory Access Bandwidth</td><td>Memory Limit</td><td>Price (per GPU)</td></tr><tr><td>A6000</td><td>91 TFLOPS</td><td>960 GB/s</td><td>48 GB</td><td>0.83 $/h</td></tr><tr><td>A40</td><td>150 TFLOPS</td><td>696 GB/s</td><td>48 GB</td><td>0.55 $/h</td></tr><tr><td>L40</td><td>181 TFLOPS</td><td>864 GB/s</td><td>48 GB</td><td>0.83 $/h</td></tr><tr><td>A100</td><td>312 TFLOPS</td><td>1555 GB/s</td><td>80 GB</td><td>1.75 $/h</td></tr><tr><td>H100</td><td>1979 TFLOPS</td><td>3.35 TB/s</td><td>80 GB</td><td>2.99 $/h</td></tr><tr><td>4090</td><td>83 TFLOPS</td><td>1008 GB/s</td><td>24 GB</td><td>0.53 $/h</td></tr></table>
29
+
30
+ bounded as it processes input prompts in a single step, while the decoding phase is memory-bounded as it generates subsequent tokens step by step (Zhong et al., 2024; Patel et al., 2024). Therefore, when using homogeneous GPU resources, it is hard to fit the varying resource demands well.
31
+
32
+ On the contrary, the heterogeneity in resource demands presents a unique opportunity to enhance the overall serving efficiency by leveraging different GPU types. As shown in Table 1, various GPU types offer diverse compute and memory capabilities, making them well-suited for processing different types of workloads. Motivated as such, we try to explore two questions: Can serving LLMs over heterogeneous GPU resources achieve better cost-efficiency than homogeneous GPU resources? If yes, how can we enhance the cost-efficiency? To this end, this work makes two technical contributions correspondingly.
33
+
34
+ The first contribution is a comprehensive benchmarking of LLM serving over various GPU types, which offers a detailed understanding of cost-efficiency with heterogeneous GPU resources. Based on the benchmarking results, we reveal three key factors that are vital to the cost-efficiency:
35
+
36
+ - GPU composition (i.e., the number and types of GPUs that make up a heterogeneous cluster) is essential for optimizing the cost-efficiency of LLM serving. Different GPU types exhibit varying characteristics (e.g., computational capabilities, memory bandwidths, and memory capacities), making them more suitable for distinct workloads and model types. Given the varying types of incoming workloads, we need to strategically optimize GPU composition to improve resource utilization, reduce latency, and enhance overall serving performance.
37
+ - Deployment configurations (i.e., how many model replicas to deploy and the parallelism strategy for each) are necessary for maximizing overall system performance. The optimal configurations is influenced by the model, workload, and GPU type, so using a unique deployment configuration for all replicas is impractical. Therefore, we should adaptively optimize the deployment configurations so that the system efficiency can be improved.
38
+ - Workload assignment (i.e., allocating incoming workloads to GPUs) becomes crucial as different replicas are deployed with varying configurations (i.e., resources and
39
+
40
+ ![](images/1fb3c5962034fec8a9e565dd8d279c7a917a7c924bb64832845de264573b0f33.jpg)
41
+ Figure 2: The number of different types of GPUs on Vast.ai during a 24-hour period.
42
+
43
+ parallelsms) and different workloads have their preferable resource needs. As a result, to improve resource utilization, it necessitates assigning requests to more suitable replicas while balancing the burden across all replicas.
44
+
45
+ The second contribution is to design a brand new LLM serving framework following the benchmarking, which aims at maximizing the cost-efficiency of LLM serving over heterogeneous GPU resources in cloud platforms.
46
+
47
+ Given the three factors above, a straightforward approach is to rent the most suitable GPUs for each workload type and assign requests accordingly. Nevertheless, this is impractical for two reasons. For one thing, due to the high demand for cloud GPUs, although cloud platforms (e.g., Vast.ai, RunPod, and AWS) offer a variety of GPU types, they usually have limited quantities of each type. We present the availability of different GPU types on Vast.ai over a 24-hour period in Figure 2. For another, the user-defined price budget is often constrained, making it impractical to always allocate sufficient GPUs for each workload demand.
48
+
49
+ Consequently, we formulate a scheduling algorithm based on mixed-integer linear programming (MILP), which takes account of both real-time GPU availability on cloud platforms and the user-defined price budget, while co-optimizing how to rent GPUs from the available pool (GPU composition), how to deploy the models over the rent GPUs (deployment configuration), and how to dispatch the workloads among the model replicas (workload assignment). We further incorporate practical heuristics and a binary search mechanism, as well as extend our approach to the multi-model scenario, improving scalability and solving efficiency for large-scale clusters.
50
+
51
+ We empirically evaluate our framework by comparing it with both homogeneous and heterogeneous baselines across a variety of scenarios, covering diverse workload traces, varying GPU availabilities, and multi-model serving. The results demonstrate that, within the same price budget, our approach can achieve up to $41\%$ and on average $20\%$ higher throughput, or reduce the serving latency by up to $54\%$ and on average $20\%$ .
52
+
53
+ # 2. Background
54
+
55
+ LLM inference phase. The inference process in LLMs consists of two main phases: the prefetch phase and the decoding phase. During the prefetch phase, the model processes the input prompt to compute the key-value (KV) cache and generates the first token in a single step. In contrast, the decoding phase uses the last generated token and the KV cache as inputs to generate subsequent tokens in a token-by-token manner. Generally, the prefetch phase is compute-bound, while the decoding phase is memory-bound.
56
+
57
+ Workload heterogeneity. LLMs are designed to support a diverse range of applications, and the inference workloads associated with these applications often exhibit heterogeneity in input and output token lengths, which is called workload heterogeneity. Different workload types exhibit varying characteristics in terms of compute and memory demands. For example, requests from the WildGPT dataset (Zhao et al.), with average input and output token lengths of 496 and 510 respectively (classified as short input and long output), typically require more memory resources to handle the memory-bound decoding phase. Conversely, requests from the Azure-Trace dataset (Patel et al., 2024; Azure, 2024), with average input and output token lengths of 2455 and 18 respectively (classified as long input and short output), generally demand more compute resources to manage the compute-bound prefetch phase. Therefore, appropriately allocating resources based on workload demands is critical for optimal performance.
58
+
59
+ Heterogeneous LLM serving. Recent research has explored various approaches for deploying LLM serving in heterogeneous GPU environments to achieve cost-efficient solutions (JIANG et al.; Miao et al., 2024; Griggs et al., 2024; Zhao et al., 2024a; Mei et al., 2024; Borzunov et al., 2023; Yan et al., 2024). HexGen introduces asymmetric partitioning and advanced scheduling techniques to deploy generative inference in decentralized and heterogeneous settings. Me'lange frames GPU allocation as a cost-aware bin-packing problem, optimizing cost efficiency for LLM services by effectively leveraging heterogeneous GPU types. Helix formulates the problem of heterogeneous GPU and network connection optimization as a max-flow problem, utilizing mixed-integer linear programming to determine the optimal model deployment. However, existing works typically optimize performance within a predefined heterogeneous cluster, and fail to consider GPU availability and user-defined budget constraints on cloud platforms. In addition, they are generally unaware of the workload heterogeneity, and only consider uniform workload assignment.
60
+
61
+ LLM serving optimization. There are several related works focusing on the optimization of LLM serving (Li et al., 2023; Yu et al., 2022; Kwon et al., 2023). QLM (Patke et al., 2024) focuses on SLO-aware serving and multi
62
+
63
+ node optimizations by refining request ordering; SarathiServe (Agrawal et al., 2024b) optimizes batching through prefetch chunking to mitigate interference between the prefetch and decoding stages; and Vidur (Agrawal et al., 2024a) develops an accurate simulator for deployment tuning. In contrast, our method is dedicated to achieving heterogeneous, cost-efficient serving in cloud environments.
64
+
65
+ # 3. Observation and Opportunity
66
+
67
+ Given a user with a specified budget estimation (in $\mathbb{S} / \mathbb{h}$ ) renting GPUs from the cloud for serving certain workload traces, our objective is to deliver a comprehensive serving plan that maximizes the cost-efficiency of the user's serving system. In this section, we first benchmark the cost-efficiency performance of various workload types across different GPU types, model types, and deployment configurations. Then, we present our key observations and opportunities.
68
+
69
+ Benchmark settings. We subsample nine workload types from the ShareGPT (Zheng et al.), WildGPT (Zhao et al.), and Azure-Trace datasets (Patel et al., 2024). These workloads are characterized by average input token lengths of $\{2455,824,496\}$ and output token lengths of $\{510,253,18\}$ . Each combination reflects distinct workload characteristics. For example, $\{2455,18\}$ (long input, short output) represents compute-intensive workloads, while $\{496,510\}$ (short input, long output) represents memory-intensive workloads. Based on these workload types, we evaluate two models, Llama3-8B and Llama3-70B, on six commonly used cloud GPUs (A6000, A40, L40, A100, H100, and 4090) with different deployment configurations. The benchmarking metrics include request throughput per unit cost (i.e., throughput divided by GPU cost) and the total cost associated with various latency percentiles (e.g., p5, p10, p15, ..., p95, p100). The total cost for each latency percentile is calculated by multiplying the latency time by the GPU cost. These metrics serve as indicators of cost efficiency. The GPU costs are demonstrated in Table 1.
70
+
71
+ Observation-1: Heterogeneous GPUs are well-suited for managing model and workload diversity in LLM serving. Figure 3 and Figure 11 present the benchmark results for the Llama3-70B and Llama3-8B models across various GPU types and workload types. The observations can be summarized as follows: (i) The H100 and A100 GPUs (data center GPUs) perform well on compute-intensive workloads with the Llama3-70B model, as both GPUs have high computational power to handle intense computational tasks. (ii) The A40, A6000, and L40 GPUs (workstation GPUs) excel in memory-intensive workloads with the Llama3-70B model. Workstation GPUs offer on average $1.2 \times$ higher memory bandwidth and $1.8 \times$ greater memory capacity per unit price than data center GPUs, making them more cost-efficient for memory-intensive workloads that often underutilize H100
72
+
73
+ ![](images/69fc7a35ecc58b80f14bb8120538e46aec208e4ef0f075ccd10b637ceb4c952d.jpg)
74
+ Figure 3: Benchmarked results for Llama3-70B model with different GPU types on different workload types. The left three columns represent the throughput results, x-axis represents different GPU types, y-axis represents throughput per unit price (i.e., throughput divided by GPU cost). The right three columns represent the latency results, x-axis represents the P5-100 latency results (P5-P100 Latencies means from left to right, the x sticks represent P5 Latency, P10 Latency, P15 Latency ...), y-axis represents total price (i.e., each latency time multiplied by GPU cost). Results for Llama3-8B model are demonstrated in Appendix A.
75
+
76
+ ![](images/199d2d62b524fd3dba5b959f6d56f60ea592f53cc28fc1bdcd5d713687111832.jpg)
77
+ Figure 4: Throughput results for Llama3-70B model with different deployment configurations on different workloads. The three-element array represents the DP, TP, and PP degrees. Full benchmarking results are listed in Appendix B.
78
+
79
+ and A100 GPUs due to memory constraints. (iii) The 4090 GPUs (consumer GPUs) deliver excellent performance with the Llama3-8B model. As smaller models require significantly less compute and memory, and the consumer GPUs offer superior memory bandwidth per unit price, approximately $1.9 \times$ higher than that of the A100 and H100 GPUs. Overall, our experimental results demonstrate that selecting the most appropriate GPU type for specific workloads and models can enhance the cost-efficiency performance of LLM serving by up to $2.27 \times$ . These findings underscore the necessity of aligning GPU types with model and workload demands to maximize both performance and cost-efficiency.
80
+
81
+ Observation-2: Optimal deployment configurations are crucial for maximizing cost-efficiency across models,
82
+
83
+ workloads, and GPU types. Figure 4 presents the benchmark results of various deployment configurations across different models, workloads, and GPU types. The observations can be summarized as follows: (i) Optimal configurations vary by workload type for a given GPU type. For instance, on H100 GPUs serving Llama3-70B, tensor parallelism (TP) (Shoeybi et al., 2019) is most effective for compute- and memory-intensive workloads (e.g., {2455, 510}), while higher degree of data parallelism (DP, i.e., model replication) performs better for less demanding workloads (e.g., {496, 18}). (ii) Optimal configurations vary by GPU type for a given workload type. For instance, in compute-intensive workloads (e.g., {2455, 18}), the L40 GPUs achieve the best performance using pure pipeline par
84
+
85
+ alleleism (PP) (Huang et al., 2019), while the H100 GPUs excel with a combination of DP and TP. (iii) Optimal configurations also depend on model type. For instance, with Llama3-8B models, DP consistently outperforms model parallelism (i.e., TP and PP). Since the Llama3-8B model has lower memory requirements and can run on a single GPU without model parallelism, increasing the number of model replicas (i.e., raising the DP degree) enhances the system's parallel processing capability, thereby improving cost efficiency. Overall, our experimental results demonstrate that selecting the most effective deployment configurations can improve system performance by up to $2.61 \times$ . These findings highlight the need for optimized deployment configurations to maximize cost-efficiency in LLM serving.
86
+
87
+ Observation-3: The workload assignment should be co-optimized with the heterogeneous GPU composition and deployment configurations. Effective workload assignment is critical for managing workload heterogeneity and achieving optimal performance. It serves two key objectives: (i) Directing requests to GPU types and deployment configurations that best match their resource demands to enhance serving efficiency. As noted in observation-1 and -2, even with an optimal GPU composition and deployment setup, performance may degrade if workloads are assigned to unsuitable GPUs or configurations. This misalignment can lead to inefficiencies and reduced overall performance. (ii) Balancing workloads across GPUs to prevent overloading or underutilization, thereby maximizing resource utilization. Workload balancing is essential for cost-efficient GPU utilization. In some cases, achieving full GPU capacity requires assigning workloads to suboptimal GPUs or configurations. While this may not be ideal for individual requests, it ultimately improves overall system performance.
88
+
89
+ Constraints: Appropriate resource scheduling is crucial under limited resources and budget constraints. Allocating workloads to the most suitable GPUs is a straightforward strategy for cost-efficient LLM serving. However, real-world deployments often face resource availability and budget limitations: (i) On cloud service platforms, GPU availability fluctuates, leading to shortages during peak periods. For instance, A40 availability on RunPod and Vast.ai can range from 3 to 16 and 0 to 32, depending on the time. (ii) Budget constraints may prevent users from selecting the optimal GPU for each workload, necessitating compromises in resource allocation. These constraints often force workload assignments to suboptimal GPUs, reducing performance and efficiency. To mitigate these challenges, an effective scheduling algorithm is essential. It should account for the user's budget constraints and real-time GPU availability, enabling efficient and adaptive LLM serving even under constrained conditions.
90
+
91
+ Opportunities: Optimization of heterogeneous GPU de
92
+
93
+ ployment for cost-efficient LLM serving. Existing systems typically assume a homogeneous GPU cluster for LLM serving or focus on optimizing performance within a predefined heterogeneous cluster. However, adjusting the heterogeneous GPU composition within the serving cluster to align with specific workload demands offers a more cost-efficient alternative. Based on prior observations, we propose optimizing LLM serving by customizing the deployment of heterogeneous GPU types to meet workload requirements. This includes determining the optimal heterogeneous GPU composition (observation-1), selecting the most effective deployment configurations (observation-2), and implementing the most appropriate workload assignment (observation-3). Ultimately, our aim is to deliver a comprehensive LLM serving plan that meets user requirements, adapts to cloud environment constraints, and maximizes cost-efficiency (constraints).
94
+
95
+ # 4. Scheduling Algorithm
96
+
97
+ # 4.1. Problem Formulation
98
+
99
+ Given the LLMs to be served, a set of heterogeneous workloads, a user-defined budget, and GPU availability on the cloud, we seek a cost-efficient serving plan comprising: (1) GPU composition, i.e., selecting the type and number of GPUs to rent from the cloud while meeting budget and resource requirements; (2) deployment configurations, i.e., organizing the rented GPUs into serving groups, each responsible for serving one model replica, and determining their parallelism strategies; and (3) workload assignment, i.e., determining the allocation of incoming workloads across model replicas. Our objective is to minimize the overall makespan for processing all incoming workloads. The resulting plan must ensure that the user obtains the most cost-efficient LLM serving solution under the specified budgetary and resource constraints.
100
+
101
+ # 4.2. Simple Example
102
+
103
+ Experiment setup. We begin by assuming three GPU types, $\{t_1,t_2,t_3\}$ , each with two units available. The hourly rental prices for these types are 4, 2, and $2\mathbb{S} / \mathfrak{h}$ , respectively. We consider two workload types $\{w_{1},w_{2}\}$ , which arrive simultaneously with 80 total requests for $w_{1}$ ( $\lambda_{1} = 80$ ) and 20 total requests for $w_{2}$ ( $\lambda_{2} = 20$ ). We denote by $C_{t,w}$ the throughput (in requests per second) of GPU type $t$ on workload $w$ . If each GPU serves one model replica, the throughputs are $C_{1,1} = 1.0$ , $C_{1,2} = 1.2$ , $C_{2,1} = 0.9$ , $C_{2,2} = 0.9$ , $C_{3,1} = 0.3$ , and $C_{3,2} = 0.5$ . Note that $C_{\sim,1}$ and $C_{\sim,2}$ vary with model parallelism. In Cases 1 and 2, we assume the workload is assigned to each GPU in proportion to its processing rate, so the system-wide throughput for each workload is the sum of individual-GPU rates. In Case 3, we allow workload-aware assignment for further optimization.
104
+
105
+ Case 1: GPU composition. We compare two compositions under the same budget of $8\$/h$ , where each GPU serves one model replica. Composition 1 consists of $1 \times t_1$ , $1 \times t_2$ , and $1 \times t_3$ , achieving a total throughput of (2.2, 2.6) rps on $(w_1, w_2)$ , with a processing time of $44.05s$ . Composition 2 consists of $1 \times t_1$ and $2 \times t_2$ , achieving throughputs of (2.8, 3.0) rps on $(w_1, w_2)$ , with a processing time of $35.24s$ . Thus, modifying the GPU composition within the same budget results in a $20\%$ speedup.
106
+
107
+ Case 2: Deployment configuration. Focusing on composition 2, we compare two ways to organize these three GPUs. Configuration 1 assigns each GPU to serve a single model replica, resulting in a processing time of $35.24\mathrm{s}$ . Configuration 2 applies TP to the two $t_2$ GPUs, changing their combined throughput to $2.4\mathrm{rps}$ on $w_{1}$ and $1.5\mathrm{rps}$ on $w_{2}$ , reducing the overall processing time to $30.94\mathrm{s}$ . Thus, modifying the deployment configuration improves the processing time by approximately $14\%$ .
108
+
109
+ Case 3: Workload assignment. Finally, we retain the same composition and TP-based configuration but optimize workload assignment. Specifically, we assign $15\%$ of $w_{1}$ and $100\%$ of $w_{2}$ to the replica with $t_{1}$ , and $85\%$ of $w_{1}$ to the replica with TP on $2 \times t_{2}$ . With this assignment, the overall completion time is reduced to $28.67\mathrm{s}$ . Thus, adjusting the workload assignment results in an additional $8\%$ reduction in processing time.
110
+
111
+ This step-by-step example demonstrates the joint optimization of GPU composition, deployment configuration, and workload assignment for optimal performance. A detailed processing time calculation for each case is in Appendix C.
112
+
113
+ # 4.3. MILP Formulation
114
+
115
+ In this section, we introduce a mixed-integer linear programming (MILP) formulation to find a serving plan, i.e., GPU composition, deployment configurations and workload assignment, that minimizes the overall processing time. An overview of the symbols is shown in Table 2.
116
+
117
+ Let there be $N$ types of GPUs, indexed by $n \in \{1, 2, \dots, N\}$ . We denote the decision on how many GPUs of each type to use (i.e., GPU composition) by a vector $\mathbf{D} = [d_1, d_2, \dots, d_N]$ where each $d_n \geq 0$ represents the number of GPUs of the $n$ -th type. These variables are subject to availability constraints encoded by a vector $\mathbf{A} = [a_1, a_2, \dots, a_N]$ , such that $0 \leq d_n \leq a_n$ , $\forall n = 1, 2, \dots, N$ . Each GPU type $n$ has price $p_n$ (e.g., $1.75 \text{~$/h}$ for A100, $2.99 \text{~$/h}$ for H100), and memory limit $m_n$ (e.g., 48 GB for L40, 80 GB for H100).
118
+
119
+ Configurations. We consider a set $\mathcal{C}$ of feasible configurations (i.e., deployment configurations). Each configuration $c\in \mathcal{C}$ represents the serving plan for a single model replica, which is characterized by $(v_{c},s_{c},o_{c},h_{c,w})$
120
+
121
+ Table 2: Symbols used in MILP.
122
+
123
+ <table><tr><td>Symbol</td><td>Description</td></tr><tr><td>N</td><td>number of GPU types</td></tr><tr><td>W</td><td>number of workload types</td></tr><tr><td>C</td><td>set of feasible config</td></tr><tr><td>dn</td><td>type n GPUs allocated</td></tr><tr><td>an</td><td>maximum available type n GPUs</td></tr><tr><td>pn</td><td>rental price of type n GPUs</td></tr><tr><td>mn</td><td>memory limit of type n GPUs</td></tr><tr><td>B</td><td>user-defined total price budget</td></tr><tr><td>vc</td><td>GPU composition of config c</td></tr><tr><td>sc</td><td>parallelism strategy of config c</td></tr><tr><td>oc</td><td>price cost of config c</td></tr><tr><td>hc,w</td><td>throughput of config c on workload w</td></tr><tr><td>xc,w</td><td>assignment of workload w to config c</td></tr><tr><td>yc</td><td>whether config c is used</td></tr><tr><td>T</td><td>makespan of processing all workloads</td></tr><tr><td>fw</td><td>total requests of workload w</td></tr></table>
124
+
125
+ (i) A vector $v_{c} = \{d_{n}(c)\}_{n=1}^{N}$ indicating exactly how many GPUs of each type $n$ are used in configuration $c$ . (ii) An array $s_{c} = \{t_{1}, t_{2}, \ldots, t_{S}\}$ indicating the parallelism strategy used in configuration $c$ . The array length $S$ represents the total number of pipeline stages, and the element $t_{s}$ represents the TP degree of the $s$ -th stage. The summation of all $t_{s}$ should be equal to the total GPU count of configuration $c$ , i.e., $\sum_{s=1}^{S} t_{s} = \sum_{n=1}^{N} d_{n}(c)$ . (iii) A cost $o_{c} = \sum_{n=1}^{N} (d_{n}(c) \times p_{n})$ indicating the total price required for configuration $c$ . (iv) A throughput $h_{c,w}$ indicating the rate at which configuration $c$ process workload type $w$ , which is obtained through a one-time profiling<sup>1</sup>. By optimizing the configurations, we can obtain the GPU composition and deployment configurations mentioned in §4.1.
126
+
127
+ Workloads and assignment. Let there be $W$ workload types, indexed by $w \in \{1,2,\dots ,W\}$ . Each workload $w$ must be fully served (i.e., 100% coverage). We allow fractional assignment: a fraction $x_{c,w} \in [0,1]$ of workload $w$ may be processed by configuration $c$ . Concretely, $\sum_{c \in \mathcal{C}} x_{c,w} = 1, \forall w = 1,2,\dots ,W$ . We also introduce an integer variable $y_c \in \{0,1,2,\ldots \}$ indicating how many copies of configuration $c$ are chosen (activated). If $y_c = 0$ , then $x_{c,w}$ must be zero for all $w$ . By co-optimizing the workload assignment (the fractions $x_{c,w}$ ) with the activated configurations $(y_c)$ , we determine the final workload assignment as described in §4.1.
128
+
129
+ Budget and GPU constraints. A valid configuration set $\mathcal{C}$ must also satisfy the following constraints: (i) the allocated number of GPUs for each type must not exceed the available number, i.e., $0\leq \sum_{c\in \mathcal{C}}(d_n(c)\times y_c)\leq a_n,\forall n = 1,\ldots ,N;$ (ii) the total cost of all chosen configurations must be within
130
+
131
+ the user-defined budget $B$ , i.e., $\sum_{c \in \mathcal{C}} (o_c \times y_c) \leq B$ .
132
+
133
+ Optimization objective. We define a makespan variable $T \geq 0$ to represent the overall completion time. For a configuration $c$ , if it is instantiated $y_{c}$ times and processes fractions $x_{c,w}$ of workload $w$ , each replica provides a throughput of $h_{c,w}$ . Let $f_{w}$ be the total number of requests for workload $w$ . Consequently, the total effective throughput for $c$ is $y_{c} \times h_{c,w}$ , and the time required on $c$ is given by $T_{c} = \sum_{w=1}^{W} \frac{x_{c,w} \cdot f_{w}}{y_{c} \cdot h_{c,w}}$ . Since all chosen configurations run in parallel, the system completes once the slowest configuration finishes. Thus, we have $T_{c} \leq T$ for all $c \in \mathcal{C}$ . Our optimization objective is to minimize $T$ .
134
+
135
+ MILP formulation. The problem can be summarized as the following Mixed-Integer Linear Program (MILP):
136
+
137
+ $\arg \min T$ (1)
138
+
139
+ s.t. $\sum_{c\in \mathcal{C}}x_{c,w} = 1,\forall w$ (Assignment Constraint) (2)
140
+
141
+ $$
142
+ \sum_ {w \in W} \frac {x _ {c , w} \cdot f _ {w}}{y _ {c} \cdot h _ {c , w}} \leq T, \forall c, (\text {M a k e s p a n}) \tag {3}
143
+ $$
144
+
145
+ $$
146
+ x _ {c, w} \leq y _ {c}, \forall c, w, (\text {A c t i v a t i o n C o u p l i n g}) \tag {4}
147
+ $$
148
+
149
+ $$
150
+ \sum_ {c \in \mathcal {C}} \left(o _ {c} \times y _ {c}\right) \leq B, (\text {B u d g e t C o n s t r a i n t}) \tag {5}
151
+ $$
152
+
153
+ $$
154
+ \sum_ {c \in \mathcal {C}} \left(d _ {n} (c) \times y _ {c}\right) \leq a _ {n}, \forall n, (\text {G P U A v a i l .}) \tag {6}
155
+ $$
156
+
157
+ $$
158
+ y _ {c} \in \{0, 1, 2, \dots \}. \tag {7}
159
+ $$
160
+
161
+ This formulation determines which configurations are used $(y_{c})$ and how the workload fractions $(x_{c,w})$ are distributed, subject to memory limit, price budget, and GPU availability constraints, in order to minimize the makespan $T$ . Note that $d_{n}(c)$ is an integer; we enumerate all feasible integer combinations $\{d_n(c)\}_{n = 1}^N$ in a precomputation step. In contrast, $x_{c,w}$ is a continuous variable, and the solver relies on branch-and-bound to systematically narrow the feasible region and converge to an optimal fractional assignment.
162
+
163
+ Complexity analysis. The number of binary activation variables $y_{c}$ grows combinatorially with the number of feasible configurations $|\mathcal{C}|$ . In this worst case $|\mathcal{C}|$ can be on the order of $\prod_{n=1}^{N}(a_{n} + 1)$ . Since MILP solvers (e.g., branch-and-bound) have worst-case running time exponential in the number of binary variables, the theoretical worst-case time complexity scales as $\mathbb{O}\big(\mathrm{poly}(|\mathcal{C}|, W, N) \times 2^{|C|}\big)$ , where the polynomial factor accounts for the overhead of processing each node in the search tree (e.g., solving continuous relaxations of the MILP). As a result, the solution time escalates rapidly with the number of candidate configurations.
164
+
165
+ Other constraints and heuristics. We introduce additional constraints and heuristics to reduce the search space. Concretely, we enforce a memory constraint to eliminate configurations with insufficient GPU memory, and a connectivity constraint to exclude those involving disconnected GPUs. Additionally, we refine the configuration search by restricting TP to single machines and enabling non-uniform PP
166
+
167
+ layer partitioning based on memory allocation. See Appendix D for details.
168
+
169
+ Binary search. To address the long computation times associated with the MILP solver for large-scale problems, we introduce a binary-search-on-T method to accelerate the search process. Rather than directly minimizing the makespan $T$ , we iteratively check whether a valid serving plan exists for different candidate values of $\hat{T}$ , based on reasonable lower and upper bounds. For a full explanation, refer to Appendix F, and we evaluate the effectiveness of this binary search method in §5.2.
170
+
171
+ Extension to multiple LLM serving. Our MILP formulation can be easily adapted to scenarios involving multiple LLMs, such as simultaneously serving both Llama3-8B and Llama3-70B models. To accommodate this, we introduce a model-type dimension to the decision variables and constraints. This ensures that workload assignments, memory requirements, and other constraints are optimized for each model type. The objective remains to minimize the overall makespan $T$ , while also taking into account GPU availability, budget constraints, and other constraints across all model types. For a detailed explanation of the formulation, please refer to Appendix E. We demonstrate the evaluation of our method for multiple model serving in §5.2.
172
+
173
+ # 5. Experiments
174
+
175
+ # 5.1. Experimental Setup
176
+
177
+ **Environments.** Our experiments are conducted using two types of data center servers H100 and A100, three types of work station servers A40, RTX A6000 and L40, and one type of consumer server RTX 4090. In data center servers, GPUs are linked by NVLink (300 GB/s), while in workstation/consumer servers, GPUs are linked by PCIe (60 GB/s). Servers with inter-connection are connected via Ethernet with a bandwidth of 5 Gb/s. All experiments are conducted with vLLM (Kwon et al., 2023).
178
+
179
+ Baselines. We compare our method, which uses heterogeneous cloud resources, against various homogeneous setups:
180
+
181
+ - Heterogeneous setups: We rent GPUs from Vast.ai, a cloud provider offering a range of GPU types. The rentals are based on real-time GPU availability on the cloud. For our experiments, we randomly selected four different GPU availabilities (shown in Table 4 in Appendix H) under varying price budgets of 15, 30, and $60 \, \text{$/h}$ .
182
+ - Homogeneous setups: We rent H100 GPUs (representative data center GPUs), RTX A6000 GPUs (workstation GPUs), and RTX 4090 GPUs (consumer GPUs) under different price budgets, with each GPU type representing a homogeneous baseline. For example, a budget of $60 \, \text{$/h}$ can rent up to 20 H100 GPUs. Note that we fine-tune the deployment configurations and workload assignments us
183
+
184
+ ing our scheduling algorithm to optimize the performance of each homogeneous baseline.
185
+
186
+ Models and traces. Our evaluation is conducted on Llama3-8B and Llama3-70B models. And we follow prior work to generate workload traces based on real-world data. Our testing traces are subsampled from three sources: real workload traces collected over one month from the Swiss AI Center, the WildChat dataset, and the production traces Azure-Trace. Each trace comprises multiple workload types, with their ratios shown in Table 5 in Appendix I.
187
+
188
+ Evaluation metrics. We focus on the overall system throughput and vairous percentile latencies (i.e., p10, . . ., p90, p100). P90 latency represents the maximum response time within which $90\%$ of all requests are completed.
189
+
190
+ # 5.2. Experimental Results
191
+
192
+ End-to-end system performance. We evaluated our method across various traces, cloud GPU availability scenarios, price budgets, model types, and homogeneous baselines. Experimental results in Figure 5 and Figure 6 show that our method improves system throughput by up to $41\%$ (25% on average) while reducing percentile latencies by up to $54\%$ (20% on average). In traces 1 and 2, H100 (Homo) achieves the best performance among all baselines. In our plan, the GPU composition depends on the budget. With a high budget $(60 \, \text{$/h})$ , data center GPUs are preferred, making up approximately $51\%$ of our rented resources for request processing. In contrast, with a low budget $(15 \, \text{$/h})$ , workstation GPUs are favored due to their lower cost. In trace 3, A6000 (Homo) demonstrates the highest performance among all baselines. In this scenario, our plan primarily relies on workstation GPUs, which constitute approximately $93\%$ of the rented resources for request processing. Additionally, as shown in Figure 15 in Appendix J, the 4090 (Homo) delivers the best performance among all baselines for the Llama3-8B model. In this case, our plan prefers consumer GPUs, which form the majority of our rented resources and handle $53\%$ of overall request processing.
193
+
194
+ Comparison with HexGen. We also compare our method with the state-of-the-art heterogeneous serving framework, HexGen (JIANG et al.). Since HexGen schedules workloads based on a fixed GPU composition, we evaluate it using two setups: (i) a uniform composition, where six GPU types are evenly allocated within the budget, and (ii) the optimal composition used by our method. As shown in Figure 7, HexGen with a uniform composition suffers up to $35\%$ and on average $29\%$ performance degradation due to suboptimal GPU allocation. Even with the optimal composition, our method outperforms HexGen by up to $18\%$ and on average $14\%$ , benefiting from workload-aware scheduling.
195
+
196
+ Compare with Helix. We conduct additional experiments
197
+
198
+ ![](images/43432f2c3eaac67791f4c3e130278215b3b198bd53d235b9ae82386e6d4a238f.jpg)
199
+ Figure 5: End-to-end throughput results on Llama3-70B model with different setups. We further demonstrate the Llama3-8B results in Appendix J.
200
+
201
+ ![](images/7e3563855caade185b0616c64ccb8665aa6a3bcd30d4aebaab4217b1abe342ae.jpg)
202
+ Figure 6: End-to-end latency results on Llama3-70B model with different setups.
203
+
204
+ ![](images/7d21e7791751031b2413ef42a5fd5e7a58147670256a9fff7362b906bf9aa414.jpg)
205
+ Figure 7: Ours vs. HexGen. The first and second bars in each picture represent HexGen using a uniform and optimal GPU composition.
206
+
207
+ comparing our system with Helix (Mei et al., 2024). Specifically, we compare our method against Helix's single-cluster case (the optimal case reported in their paper) under a price budget of $15 per hour on the AzureTrace dataset (i.e., trace 2). While Helix optimizes heterogeneous LLM serving using max-flow and MILP scheduling, our method explicitly considers workload heterogeneity and GPU composition optimization, resulting in greater cost efficiency. Experimental
208
+
209
+ ![](images/d740f861aaf2ea66a7678a507226d8d97c43a265249446e6d899703fb445a5b6.jpg)
210
+ Figure 8: Ablation study of Llama3-70B on traces 1 and 2.
211
+
212
+ results show that our system outperforms Helix by $25 - 35\%$ .
213
+
214
+ Table 3: Performance comparison of Helix and our method.
215
+
216
+ <table><tr><td></td><td>Llama-30B</td><td>Llama3-70B</td></tr><tr><td>Helix</td><td>8.49 req/s</td><td>5.72 req/s</td></tr><tr><td>Ours</td><td>11.43 req/s (35%↑)</td><td>7.13 req/s (25%↑)</td></tr></table>
217
+
218
+ Ablation studies. We assess the impact of each optimization target in our scheduling algorithm by systematically disabling them. Three baselines are considered: (i) Uniform GPU composition: GPUs are rented uniformly across six types within the given budget. This evaluates the performance gains from optimized heterogeneous GPU composition. (ii) Uniform deployment configuration: Instead of optimizing deployment per model replica, TP is uniformly applied across all replicas. This measures the impact of deployment configuration optimization. (iii) Rule-based request assignment: Requests are assigned using a Round-Robin strategy based on real trace arrivals, assessing the benefit of heterogeneous-aware workload assignment. As shown in Figure 8, disabling heterogeneous GPU composition reduces throughput by up to $27\%$ (average: $20\%$ ), deployment optimization by up to $34\%$ (average: $33\%$ ), and workload assignment by up to $32\%$ (average: $29\%$ ). These results highlight the necessity of each optimization for high-performance LLM serving in heterogeneous environments.
219
+
220
+ Algorithm efficiency. We evaluate two strategy search methods from §4: (i) MILP and (ii) binary search. As shown in Figure 9, the left plot illustrates their scalability, while the right plot depicts algorithm performance during the search process. Compared to MILP, which exhaustively explores all combinations of heterogeneous GPU compositions, deployment configurations, and workload assignments, the binary search method, enhanced with feasibility checks using knapsack approximation, achieves approximately a $4 \times$ reduction in search time. This improvement comes with only marginal differences in algorithm performance, with deviations of less than $1\%$ .
221
+
222
+ Multi-model extension. We further evaluate our system in a multi-model serving scenario (discussed in §4.3), assuming that $80\%$ of the requests are assigned to the Llama3-
223
+
224
+ ![](images/7e01de20a50b1a3fca00e611f679c813400f682cb80dfa5d4fc308d171a1133f.jpg)
225
+ Figure 9: Algorithm scalability and efficiency.
226
+
227
+ ![](images/896546d815f35e0db132b985ae8edace416637345b9e7a3ccb5a59342189cb6b.jpg)
228
+ Figure 10: End-to-end experiments on multiple model types (Llama3-8B and Llama3-70B) with different setups.
229
+
230
+ 8B model, while the remaining $20\%$ are assigned to the Llama3-70B model. As shown in Figure 10, our method outperforms homogeneous baselines, achieving up to a $35\%$ (average: $23\%$ ) performance gain. In the $60 \, \text{$/h}$ case, our scheduling algorithm allocates $70\%$ of computing resources to Llama3-70B and $30\%$ to Llama3-8B. In the $30 \, \text{$/h}$ case, the allocation shifts to $77\%$ and $23\%$ . Our algorithm balances resource allocation based on model demands, enabling efficient multi-model serving in heterogeneous clusters.
231
+
232
+ System performance vs. price budget. We compare our system with homogeneous baselines under different price budgets. The performance gap narrows as the budget increases due to cloud resource limits, which is reasonable since we assume an unlimited number of GPUs for our homogeneous baselines. See Appendix K for details.
233
+
234
+ Additional discussion and experiments. We provide further discussion and experiments on online scheduling for dynamic workloads, as well as on the trade-offs between cost-efficiency and request latency, in Appendix M.
235
+
236
+ # 6. Conclusion
237
+
238
+ This paper aims to address the questions of why and how heterogeneous cloud resources can be utilized for cost-efficient LLM serving. Specifically, we benchmark the cost-efficiency of LLM serving over heterogeneous GPUs, following which, a novel scheduling algorithm is developed. Experimental results demonstrate that our approach outperforms existing works substantially.
239
+
240
+ # Impact Statement
241
+
242
+ This paper presents work whose goal is to advance the field of Machine Learning. There are many potential societal consequences of our work, none which we feel must be specifically highlighted here.
243
+
244
+ # Acknowledgment
245
+
246
+ This work is supported by the HKUST startup grant R9895 from CSE; RGC-ECS project 26218024; RGC-NSFC project CRS_HKUST601/24. This work was supported as part of the Swiss AI Initiative by a grant from the Swiss National Supercomputing Centre (CSCS) on Alps.
247
+
248
+ # References
249
+
250
+ Agrawal, A., Kedia, N., Mohan, J., Panwar, A., Kwatra, N., Gulavani, B. S., Ramjee, R., and Tumanov, A. Vidur: A large-scale simulation framework for llm inference. Proceedings of Machine Learning and Systems, 6:351-366, 2024a.
251
+ Agrawal, A., Kedia, N., Panwar, A., Mohan, J., Kwatra, N., Gulavani, B., Tumanov, A., and Ramjee, R. Taming {Throughput-Latency} tradeoff in {LLM} inference with {Sarathi-Serve}. In 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI 24), pp. 117-134, 2024b.
252
+ Anthropic. The claude 3 model family: Opus, sonnet, haiku, 2024. URL https://www-cdn.anthropic.com/de8ba9b01c9ab7cbabf5c33b80b7bbc618857627/Model_Card_Claude_3.pdf.
253
+ Azure. Azure public dataset, 2024. URL https://github.com/Azure/AzurePublicDataset.
254
+ Borzunov, A., Baranchuk, D., Dettmers, T., Riabinin, M., Belkada, Y., Chumachenko, A., Samygin, P., and Raffel, C. Petals: Collaborative inference and fine-tuning of large models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), pp. 558-568, 2023.
255
+ Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.
256
+ GitHub. The world's most widely adopted ai developer tool, 2024. URL https://github.com/features/copilot.
257
+ Griggs, T., Liu, X., Yu, J., Kim, D., Chiang, W.-L., Cheung, A., and Stoica, I. M\`elange: Cost efficient large language model serving by exploiting gpu heterogeneity. arXiv preprint arXiv:2404.14527, 2024.
258
+
259
+ He, G. and Yoneki, E. Cuasmrl: Optimizinggpu Sass schedules via deep reinforcement learning. In Proceedings of the 23rd ACM/IEEE International Symposium on Code Generation and Optimization, CGO '25, pp. 493-506, New York, NY, USA, 2025. Association for Computing Machinery. ISBN 9798400712753. doi: 10.1145/3696443.3708943. URL https://doi.org/10.1145/3696443.3708943.
260
+ Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. Measuring massive multitask language understanding. In International Conference on Learning Representations.
261
+ Hu, C., Huang, H., Xu, L., Chen, X., Xu, J., Chen, S., Feng, H., Wang, C., Wang, S., Bao, Y., et al. Inference without interference: Disaggregate llm inference for mixed downstream workloads. arXiv preprint arXiv:2401.11181, 2024.
262
+ Huang, Y., Cheng, Y., Bapna, A., First, O., Chen, D., Chen, M., Lee, H., Ngiam, J., Le, Q. V., Wu, Y., et al. Gpipe: Efficient training of giant neural networks using pipeline parallelism. Advances in neural information processing systems, 32, 2019.
263
+ Jeon, J. and Lee, S. Large language models in education: A focus on the complementary relationship between human teachers and chatgpt. Education and Information Technologies, 28(12):15873-15892, 2023.
264
+ Jiang, A. Q., Sablayrolles, A., Roux, A., Mensch, A., Savary, B., Bamford, C., Chaplot, D. S., Casas, D. d. l., Hanna, E. B., Bressand, F., et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024.
265
+ JIANG, Y., Yan, R., Yao, X., Zhou, Y., Chen, B., and Yuan, B. Hexgen: Generative inference of large language model over heterogeneous environment. In *Forty-first International Conference on Machine Learning*.
266
+ Jiang, Y., Fu, F., Yao, X., Wang, T., Cui, B., Klimovic, A., and Yoneki, E. Thunderserve: High-performance and cost-efficient llm serving in cloud environments. arXiv preprint arXiv:2502.09334, 2025a.
267
+ Jiang, Y., Yan, R., and Yuan, B. Hexgen-2: Disaggregated generative inference of llms in heterogeneous environment. arXiv preprint arXiv:2502.07903, 2025b.
268
+ Kwon, W., Li, Z., Zhuang, S., Sheng, Y., Zheng, L., Yu, C. H., Gonzalez, J., Zhang, H., and Stoica, I. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pp. 611-626, 2023.
269
+
270
+ Li, H., Fu, F., Ge, H., Lin, S., Wang, X., Niu, J., Miao, X., and Cui, B. Hetu v2: A general and scalable deep learning system with hierarchical and heterogeneous single program multiple data annotations. arXiv preprint arXiv:2504.20490, 2025.
271
+ Li, Z., Zheng, L., Zhong, Y., Liu, V., Sheng, Y., Jin, X., Huang, Y., Chen, Z., Zhang, H., Gonzalez, J. E., et al. {AlpaServe}: Statistical multiplexing with model parallelism for deep learning serving. In 17th USENIX Symposium on Operating Systems Design and Implementation (OSDI 23), pp. 663-679, 2023.
272
+ Liu, A., Feng, B., Xue, B., Wang, B., Wu, B., Lu, C., Zhao, C., Deng, C., Zhang, C., Ruan, C., et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a.
273
+ Liu, Y., He, H., Han, T., Zhang, X., Liu, M., Tian, J., Zhang, Y., Wang, J., Gao, X., Zhong, T., et al. Understanding llms: A comprehensive overview from training to inference. arXiv preprint arXiv:2401.02038, 2024b.
274
+ Liu, Z., Wang, J., Dao, T., Zhou, T., Yuan, B., Song, Z., Shrivastava, A., Zhang, C., Tian, Y., Re, C., et al. Dejavu: Contextual sparsity for efficient llms at inference time. In International Conference on Machine Learning, pp. 22137-22176. PMLR, 2023.
275
+ Mei, Y., Zhuang, Y., Miao, X., Yang, J., Jia, Z., and Vinayak, R. Helix: Distributed serving of large language models via max-flow on heterogeneous gpus. arXiv preprint arXiv:2406.01566, 2024.
276
+ Miao, X., Shi, C., Duan, J., Xi, X., Lin, D., Cui, B., and Jia, Z. Spotserve: Serving generative large language models on preemptible instances. In Proceedings of the 29th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, Volume 2, pp. 1112-1127, 2024.
277
+ Oh, H., Kim, K., Kim, J., Kim, S., Lee, J., Chang, D.-s., and Seo, J. Exeget: Constraint-aware resource scheduling for llm inference. In Proceedings of the 29th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, Volume 2, pp. 369-384, 2024.
278
+ OpenAI. Openai gpt-4o, 2024. URL https://platforrm.openai.com/docs/models/gpt-4o.
279
+ Patel, P., Choukse, E., Zhang, C., Shah, A., Goiri, I., Maleki, S., and Bianchini, R. Splitwise: Efficient generative llm inference using phase splitting. In 2024 ACM/IEEE 51st Annual International Symposium on Computer Architecture (ISCA), pp. 118-132. IEEE, 2024.
280
+
281
+ Patke, A., Reddy, D., Jha, S., Qiu, H., Pinto, C., Narayanaswami, C., Kalbarczyk, Z., and Iyer, R. Queue management for slo-oriented large language model serving. In Proceedings of the 2024 ACM Symposium on Cloud Computing, pp. 18-35, 2024.
282
+ Peng, C., Yang, X., Chen, A., Smith, K. E., PourNejatian, N., Costa, A. B., Martin, C., Flores, M. G., Zhang, Y., Magoc, T., et al. A study of generative large language model for medical research and healthcare. NPJ digital medicine, 6(1):210, 2023.
283
+ Peng, Y., Jiang, Y., Wang, C., and Yuan, B. Hexgen-text2sql: Optimizing lvm inference request scheduling for agentic text-to-sql workflow. arXiv preprint arXiv:2505.05286, 2025.
284
+ Qiao, Y., Anzai, S., Yu, S., Ma, H., Wang, Y., Kim, M., and Xu, H. Conserve: Harvesting gpus for low-latency and high-throughput large language model serving. arXiv preprint arXiv:2410.01228, 2024.
285
+ Qin, R., Li, Z., He, W., Zhang, M., Wu, Y., Zheng, W., and Xu, X. Mooncake: Kimi's kvcache-centric architecture for llm serving. arXiv preprint arXiv:2407.00079, 2024.
286
+ Reid, M., Savinov, N., Teplyashin, D., Lepikhin, D., Lillicrap, T., Alayrac, J.-b., Soricut, R., Lazaridou, A., First, O., Schrittwieser, J., et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024.
287
+ Shoeybi, M., Patwary, M., Puri, R., LeGresley, P., Casper, J., and Catanzaro, B. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019.
288
+ Stojkovic, J., Zhang, C., Goiri, I., Torrellas, J., and Choukse, E. Dynamollm: Designing llm inference clusters for performance and energy efficiency. arXiv preprint arXiv:2408.00741, 2024.
289
+ Sun, B., Huang, Z., Zhao, H., Xiao, W., Zhang, X., Li, Y., and Lin, W. Lumnix: Dynamic scheduling for large language model serving. In 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI 24), pp. 173-191, 2024.
290
+ Wang, Y., Chen, Y., Li, Z., Kang, X., Tang, Z., He, X., Guo, R., Wang, X., Wang, Q., Zhou, A. C., et al. Burstgpt: A real-world workload dataset to optimize llm serving systems. arXiv preprint arXiv:2401.17644, 2024a.
291
+ Wang, Y., Chen, Y., Li, Z., Tang, Z., Guo, R., Wang, X., Wang, Q., Zhou, A. C., and Chu, X. Towards efficient and reliable llm serving: A real-world workload study. arXiv preprint arXiv:2401.17644, 2024b.
292
+
293
+ Wu, B., Zhong, Y., Zhang, Z., Huang, G., Liu, X., and Jin, X. Fast distributed inference serving for large language models. arXiv preprint arXiv:2305.05920, 2023.
294
+ Yan, R., Jiang, Y., Tao, W., Nie, X., Cui, B., and Yuan, B. Flashflex: Accommodating large language model training over heterogeneous environment. arXiv preprint arXiv:2409.01143, 2024.
295
+ Yu, G.-I., Jeong, J. S., Kim, G.-W., Kim, S., and Chun, B.-G. Orca: A distributed serving system for {Transformer-Based} generative models. In 16th USENIX Symposium on Operating Systems Design and Implementation (OSDI 22), pp. 521-538, 2022.
296
+ Zhang, J., Huang, H., Zhang, P., Wei, J., Zhu, J., and Chen, J. Sageattention2: Efficient attention with thorough outlier smoothing and per-thread int4 quantization. In International Conference on Machine Learning (ICML), 2025a.
297
+ Zhang, J., Wei, J., Zhang, P., Xu, X., Huang, H., Wang, H., Jiang, K., Zhu, J., and Chen, J. Sageattention3: Microscaling fp4 attention for inference and an exploration of 8-bit training. arXiv preprint arXiv:2505.11594, 2025b.
298
+ Zhang, J., Wei, J., Zhang, P., Zhu, J., and Chen, J. Sageattention: Accurate 8-bit attention for plug-and-play inference acceleration. In International Conference on Learning Representations (ICLR), 2025c.
299
+ Zhang, J., Xiang, C., Huang, H., Wei, J., Xi, H., Zhu, J., and Chen, J. Spargeattn: Accurate sparse attention accelerating any model inference. In International Conference on Machine Learning (ICML), 2025d.
300
+ Zhang, J., Xu, X., Wei, J., Huang, H., Zhang, P., Xiang, C., Zhu, J., and Chen, J. Sageattention2++: A more efficient implementation of sageattention2. arXiv preprint arXiv:2505.21136, 2025e.
301
+ Zhao, J., Wan, B., Peng, Y., Lin, H., and Wu, C. Llm-pq: Serving lIm on heterogeneous clusters with phase-aware partition and adaptive quantization. the 29th ACM SIGPLAN Annual Sympos-sium Principles and Practice of Parallel Programming (PPoPP'24)(02/03/2024-06/03/2024, Edinburgh), 2024a.
302
+ Zhao, W., Ren, X., Hessel, J., Cardie, C., Choi, Y., and Deng, Y. Wildchat: 1m chatgpt interaction logs in the wild. In The Twelfth International Conference on Learning Representations.
303
+ Zhao, Y., Yang, S., Zhu, K., Zheng, L., Kasikci, B., Zhou, Y., Xing, J., and Stoica, I. Blendserve: Optimizing offline inference for auto-regressive large models with resource-aware batching. arXiv preprint arXiv:2411.16102, 2024b.
304
+
305
+ Zheng, L., Chiang, W.-L., Sheng, Y., Li, T., Zhuang, S., Wu, Z., Zhuang, Y., Li, Z., Lin, Z., Xing, E., et al. Lmsys-chat1m: A large-scale real-world llm conversation dataset. In The Twelfth International Conference on Learning Representations.
306
+ Zhong, Y., Liu, S., Chen, J., Hu, J., Zhu, Y., Liu, X., Jin, X., and Zhang, H. {DistServe}: Disaggregating prefetch and decoding for goodput-optimized large language model serving. In 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI 24), pp. 193–210, 2024.
307
+ Zhou, Z., Wei, X., Zhang, J., and Sun, G. {PetS}: A unified framework for {Parameter-Efficient} transformers serving. In 2022 USENIX Annual Technical Conference (USENIX ATC 22), pp. 489-504, 2022.
308
+
309
+ ![](images/a51f18461edc5154ea1a3f3e545709f2d7a3b43f20ee98a0e8f829a47de6f194.jpg)
310
+ Figure 11: Benchmarked results for Llama3-8B model with different GPU types on different workload types.
311
+
312
+ # A. Benchmarking Results for Llama3-8B
313
+
314
+ We demonstrate the benchmark results for Llama3-8B model in Figure 11.
315
+
316
+ # B. Benchmarking Results of Different Deployment Configurations for Remaining GPUs
317
+
318
+ We demonstrate the benchmark results for different deployment configurations in Figure 12 and Figure 13.
319
+
320
+ # C. Simple Example
321
+
322
+ Experiment setup. We begin by assuming three GPU types, $\{t_1,t_2,t_3\}$ , each with two units available. The hourly rental prices for these types are 4, 2, and $2\mathbb{S} / \mathfrak{h}$ , respectively. We consider two workload types $\{w_{1},w_{2}\}$ , which arrive simultaneously with 80 total requests for $w_{1}$ ( $\lambda_{1} = 80$ ) and 20 total requests for $w_{2}$ ( $\lambda_{2} = 20$ ). We denote by $C_{t,w}$ the throughput (in requests per second) of GPU type $t$ on workload $w$ . If each GPU serves one model replica, the throughputs are $C_{1,1} = 1.0$ , $C_{1,2} = 1.2$ , $C_{2,1} = 0.9$ , $C_{2,2} = 0.9$ , $C_{3,1} = 0.3$ , and $C_{3,2} = 0.5$ . Note that $C_{\sim,1}$ and $C_{\sim,2}$ vary with model parallelism. In Cases 1 and 2, we assume the workload is assigned to each GPU in proportion to its processing rate, so the system-wide throughput for each workload is the sum of individual-GPU rates. In Case 3, we allow workload-aware assignment for further optimization.
323
+
324
+ Case 1: GPU composition. We compare two compositions under the same budget of $8\ \mathrm{h}$ , where each GPU is responsible for serving one model replica. Composition 1 uses $1 \times t_1$ , $1 \times t_2$ , and $1 \times t_3$ . This setup achieves a total throughput of $(1.0 + 0.9 + 0.3) = 2.2$ rps on $w_1$ and $(1.2 + 0.9 + 0.5) = 2.6$ rps on $w_2$ , giving a processing time of $\left(\lambda_1 / C_{\sim,1} + \lambda_2 / C_{\sim,2}\right) = (80 / 2.2 + 20 / 2.6) \approx 44.05$ s. Composition 2 uses $1 \times t_1$ and $2 \times t_2$ , for throughputs of $(1.0 + 0.9 + 0.9) = 2.8$ rps on $w_1$ and $(1.2 + 0.9 + 0.9) = 3.0$ rps on $w_2$ , so $(80 / 2.8 + 20 / 3.0) \approx 35.24$ s. In this case, changing the GPU composition under the same price budget results in a $20\%$ speedup.
325
+
326
+ Case 2: Deployment configuration. Focusing on composition 2, we compare two ways to organize these three GPUs. Configuration 1 keeps all GPUs in a purely DP style (i.e., each GPU is responsible for serving one model replica), summing up to $2.8\mathrm{rps}$ on $w_{1}$ and $3.0\mathrm{rps}$ on $w_{2}$ , matching the $35.24\mathrm{s}$ above. Configuration 2 applies TP to the two $t_{2}$ GPUs, which changes their combined rate, e.g., to $2.4\mathrm{rps}$ on $w_{1}$ and $1.5\mathrm{rps}$ on $w_{2}$ . Together with the single $t_{1}$ GPU $(1.0\mathrm{rps}$ on $w_{1}$ and $1.2\mathrm{rps}$ on $w_{2}$ ), the total throughput becomes $(3.4, 2.7)\mathrm{rps}$ for $(w_{1}, w_{2})$ . The corresponding time $(80/3.4 + 20/2.7) \approx 30.94\mathrm{s}$ . In this case, changing the deployment configuration results in an improvement in the overall processing time of roughly $14\%$ .
327
+
328
+ Case 3: Workload assignment. Finally, we keep the same composition and TP-based configuration but allow workload-aware assignment. Concretely, we assign:
329
+
330
+ Replica $(t_1)$ .. $15\%$ of $w_{1}$ $100\%$ of $w_{2}$
331
+
332
+ Replica (TP on $2 \times t_{2}$ ): 85% of $w_{1}$ .
333
+
334
+ ![](images/a1d09cace9ad665ec02e5f378afaf46554d3975b8dff7cd23da8f29da5599c99.jpg)
335
+ Figure 12: Throughput and latency results for Llama3-70B model with different deployment configurations on different workloads.
336
+
337
+ Under these fractions, $t_1$ processes 12 requests of $w_1$ at 1.0 rps and 20 requests of $w_2$ at 1.2 rps, while the TP-based replica handles 68 requests of $w_1$ at 2.4 rps. By balancing the load and routing the workload to the preferable replica, i.e., the one with relatively higher throughput for a specific workload, we reduce the overall completion time from 30.94 s to $\max(0.85\lambda_1 / C_{2,1}, 0.15\lambda_1 / C_{1,1} + \lambda_2 / C_{1,1}) = \max(68/2.4, 12/1 + 20/1.2) = 28.67$ s. In this case, changing the workload assignment results in an additional improvement in the overall processing time of approximately $8\%$ .
338
+
339
+ This step-by-step example (also illustrated in Figure 14) shows how all three factors—GPU composition, deployment configuration, and workload assignment—must be jointly optimized to achieve the best performance.
340
+
341
+ # D. Other Constraints and Heuristics
342
+
343
+ We enforce two additional constraints to minimize the overall search space and speed up the search process: (i) We perform an early memory check on each configuration, which ensures that the sum of GPU memories in configuration $c$ is sufficient for a model replica, i.e., $\sum_{n=1}^{N} (d_n(c) \times m_n) \geq M_r$ , where $M_r$ represents the least memory required for serving one model replica (e.g., 140 GB for Llama3-70B model). Configurations that violate this constraint will be eliminated from further evaluation; (ii) we enforce a connectivity constraint within each configuration. If certain GPUs lack interconnection (e.g., they are located in different data centers), those combinations do not appear in each configuration $c$ . Additionally, we use two heuristic methods to facilitate the deployment configuration search: (i) we only adopt TP within a single machine containing multiple GPUs, as TP typically requires high intra-machine communication bandwidth (e.g., PCIe, NVLink) for efficient deployment; (ii) we support non-uniform pipeline layer partitioning for PP, and determine the partition based on the total memory allocated for each stage. For instance, if there are a total of 24 layers and the GPU memory allocated for each stage is 1:2, then we allocate 8 and 16 layers to the first and second stages.
344
+
345
+ ![](images/ff7d426ab21657ef622e5d24805733dc7201e37463783e6b9d8f86d409f53112.jpg)
346
+ Figure 13: Throughput and latency results for Llama3-70B model with different deployment configurations on different workloads.
347
+
348
+ # E. Extend to Multiple LLM serving
349
+
350
+ The previous MILP formulation assumes a single LLM serving with multiple model replicas. However, cloud services typically involve multiple LLM serving with varying sizes, e.g., Llama3-8B and Llama3-70B models. To integrate multiple LLM serving plan search into our MILP, we introduce the following extended MILP formulation.
351
+
352
+ Let there be $M$ model types, indexed by $m \in \{1, 2, \ldots, M\}$ , each type has its own memory requirement. The MILP formulation can be extended to:
353
+
354
+ $$
355
+ \arg \min T \tag {8}
356
+ $$
357
+
358
+ $$
359
+ \begin{array}{l} \text {s . t .} \quad \forall m: \left\{ \begin{array}{l} \sum_ {c \in \mathcal {C} _ {m}} x _ {c, w, m} = 1, \forall w \in W _ {m}, \\ \sum_ {w \in W _ {m}} \frac {x _ {c , w , m} \cdot f _ {w , m}}{y _ {c , m} \cdot h _ {c , w , m}} \leq T, \forall c \in \mathcal {C} _ {m}, \\ x _ {c, w, m} \leq y _ {c, m}, \forall c \in \mathcal {C} _ {m}, \forall w \in W _ {m}, \end{array} \right. \end{array} \tag {9}
360
+ $$
361
+
362
+ $$
363
+ \sum_ {m = 1} ^ {M} \sum_ {c \in \mathcal {C} _ {m}} \left(o _ {c, m} \times y _ {c, m}\right) \leq B, \tag {10}
364
+ $$
365
+
366
+ $$
367
+ \sum_ {m = 1} ^ {M} \sum_ {c \in \mathcal {C} _ {m}} \left(d _ {n} (c, m) \times y _ {c, m}\right) \leq a _ {n}, \forall n, \tag {11}
368
+ $$
369
+
370
+ $$
371
+ y _ {c, m} \in \{0, 1, 2, \dots \}. \tag {12}
372
+ $$
373
+
374
+ In this extended MILP formulation, we introduce an additional model-type dimension to every relevant variable and constraint. Consequently, the problem now accommodates multiple model types (each with its own workload set, throughput profiles, memory requirements, etc.) within a unified optimization framework. The objective remains the same—minimizing
375
+
376
+ ![](images/c08323657f4248067f819c08bce3ea55edf6736bd1eb589b9b42bb9be4aae76a.jpg)
377
+ Figure 14: Illustration of a simple example.
378
+
379
+ Algorithm 1 Binary Search on $T$
380
+ ```txt
381
+ Input: $\underline{T},\overline{T}$ {initial bounds}
382
+ Input: $\tau$ {tolerance}
383
+ Output: Approximate minimal feasible makespan
384
+ while $(\overline{T} -\underline{T}) > \tau$ do $\hat{T}\gets \frac{T + \overline{T}}{2}$ ifFEASIBILITYCHECK( $\hat{T}$ )istrue then $\overline{T}\gets \hat{T}$ {Iffeasible,trysmaller $\hat{T}$ } else $\underline{T}\gets \hat{T}$ {Ifinfeasible,increase $\hat{T}$ }
385
+ end if
386
+ end while
387
+ return $\overline{T}$
388
+ ```
389
+
390
+ the overall makespan $T$ while jointly enforcing GPU availability, budget, and other constraints across all model types. This ensures that the chosen configuration set and workload assignments meet the demands of every model type while adhering to the total GPU and budget limits.
391
+
392
+ # F. Binary Search
393
+
394
+ For large numbers of model, workload and GPU types, it might take hours for the MILP solver to provide a relatively good solution. To expedite the search process, we incorporate the binary-search-on-T approach into our existing MILP formulation. Specifically, we transform the previous "minimize $T$ " problem into a sequence of feasibility checks: for a given candidate $\hat{T}$ , we ask whether a valid serving plan exists that completes all workloads in at most $\hat{T}$ , subject to budget and GPU constraints. If yes, we can try smaller $\hat{T}$ ; if no, we must increase $\hat{T}$ .
395
+
396
+ Binary search. The lower bound of the makespan, $\underline{T}$ , is identified as the best possible time if infinite GPUs were available with no budget limit (e.g., using the fastest configuration for each workload type). The upper bound, $\overline{T}$ , is the worst-case scenario (e.g., using the slowest feasible configuration to serve all workloads). During the binary search loop, if the difference between the lower and upper bounds exceeds a certain tolerance $\tau$ (e.g., one second), i.e., $\overline{T} - \underline{T} \geq \tau$ , we calculate $\hat{T} = \frac{\overline{T} + T}{2}$ and check its feasibility. If a pair $(x_{c,w}, y_{c})$ or $(x_{c,w,m}, y_{c,m})$ (in the extended case) satisfies all constraints in §4.3 or §4.3, with $\sum_{w \in W} \frac{x_{c,w}}{h_{c,w}} \leq \hat{T}$ or $\sum_{w \in W_m} \frac{x_{c,w,m}}{h_{c,w,m}} \leq \hat{T}$ , $\forall c \in C_m$ , we update $\overline{T} \gets \hat{T}$ . Otherwise, we update $\underline{T} \gets \hat{T}$ . When the loop concludes, the value of $\overline{T}$ (or $\underline{T}$ ) represents the minimal feasible makespan within the specified tolerance. Note that the feasibility check can be further approximated using a knapsack approximation, which makes the binary search approach more efficient for handling large-scale MILP problems. We outline the binary search process in Algorithm 1.
397
+
398
+ Other optimizations for speeding up MILP. For extremely large-scale MILP problems (e.g., dozens of model and workload types with hundreds of GPUs), we introduce several optimizations, such as pruning configurations, providing a good starting point, and early stopping based on the lower bound, as detailed in Appendix G. The experimental results presented in §5
399
+
400
+ ![](images/cac530b13f6a737fa61c5f21867b3d71542c33cfd949aa4eaf096d34f54b4339.jpg)
401
+ Figure 15: End-to-end experiments on Llama3-8B model with different setups.
402
+
403
+ ![](images/77c688196baf9146f224b23fe9908e2cf96202b0605d0701f0d0627a65d4c93b.jpg)
404
+ Figure 16: System performance v.s. price budget.
405
+
406
+ demonstrate the efficiency, effectiveness, and scalability of our scheduling algorithm.
407
+
408
+ # G. Other Optimizations for Speeding up MILP
409
+
410
+ For large numbers of GPUs and model types, it might take hours for the MILP solver to provide a relatively good solution. To expedite the search process, we introduce three optimizations to minimize the search space without sacrificing the effectiveness of our scheduling results: (i) for each model type, we prune configurations that are clearly dominated. For example, configurations with high degrees of model parallelism are retained for Llama3-70B, which requires substantial memory for model serving, but are pruned for Llama3-8B to prevent excessive communication overhead; (ii) we pre-estimate the resource requirements for each model type based on incoming workloads and their memory demands, and proportionally allocate resources to provide a good starting point for the MILP solver, thereby expediting the search process; (iii) we establish a theoretical lower bound for the makespan by analyzing the minimum possible processing time across all feasible configurations, which enables the implementation of early stopping criteria during optimization, i.e., the search process stops when it finds a solution that is very close to this lower bound. The minimum possible makespan occurs when all workloads are assigned to the most efficient configuration without considering resource constraints.
411
+
412
+ # H. Real Time GPU Availabilitys
413
+
414
+ We randomly selected four real-time GPU availabilities on the cloud, as shown in Table 4.
415
+
416
+ # I. Workload Type Ratios for Each Trace
417
+
418
+ We demonstrate the workload type ratios for the three traces in Table 5.
419
+
420
+ Table 4: Real time GPU availabilities on cloud platform.
421
+
422
+ <table><tr><td>GPU Avails</td><td>4090</td><td>A40</td><td>A6000</td><td>L40</td><td>A100</td><td>H100</td></tr><tr><td>Avail 1</td><td>16</td><td>12</td><td>8</td><td>12</td><td>6</td><td>8</td></tr><tr><td>Avail 2</td><td>32</td><td>8</td><td>16</td><td>16</td><td>7</td><td>12</td></tr><tr><td>Avail 3</td><td>32</td><td>16</td><td>8</td><td>8</td><td>32</td><td>8</td></tr><tr><td>Avail 4</td><td>24</td><td>24</td><td>24</td><td>16</td><td>4</td><td>8</td></tr></table>
423
+
424
+ Table 5: Workload type ratios for subsampled traces from the Swiss AI Center (Trace 1), Azure-Trace (Trace 2), and WildGPT dataset (Trace 3). Workloads 1–9 correspond to the nine workload types shown in Figure 4 from left to right.
425
+
426
+ <table><tr><td>Workloads</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td></tr><tr><td>Trace 1 (%)</td><td>33</td><td>7</td><td>8</td><td>7</td><td>27</td><td>6</td><td>6</td><td>3</td><td>3</td></tr><tr><td>Trace 2 (%)</td><td>22</td><td>5</td><td>5</td><td>21</td><td>5</td><td>5</td><td>19</td><td>6</td><td>12</td></tr><tr><td>Trace 3 (%)</td><td>4</td><td>1</td><td>4</td><td>3</td><td>20</td><td>27</td><td>1</td><td>25</td><td>15</td></tr></table>
427
+
428
+ # J. End-to-end Experiment Results for Llama3-8B Model
429
+
430
+ The end-to-end experiments on Llama3-8B model with different setups are shown in Figure 15.
431
+
432
+ # K. System Performance vs Price Budget
433
+
434
+ We further evaluate our system's performance compared to homogeneous baselines under various price budgets. As shown in Figure 16, as the price budgets increase (from $5 \, \text{h}$ to $60 \, \text{h}$ ), the performance gap between our approach and the homogeneous setups narrows from approximately $30\%$ to $15\%$ . This is primarily due to the limited availability of cloud resources. In homogeneous baselines, we assume an unlimited number of GPUs, allowing performance to scale linearly with the price budget. However, in cloud-based scenarios, resource restrictions prevent such linear scaling. When larger price budgets are applied, unsuitable GPUs for the current workload may be rented if they are the only available options, further limiting performance scalability.
435
+
436
+ Table 6: Performance of different configurations.
437
+
438
+ <table><tr><td>Diff Configs</td><td>Real</td><td>Estimated</td></tr><tr><td>H100 (2,4)</td><td>0.56 req/s</td><td>0.60 req/s</td></tr><tr><td>H100 (4,2)</td><td>0.44 req/s</td><td>0.47 req/s</td></tr><tr><td>H100 (4,2) (cross machine)</td><td>0.42 req/s</td><td>0.44 req/s</td></tr><tr><td>L40 (2,4)</td><td>0.42 req/s</td><td>0.46 req/s</td></tr><tr><td>L40 (4,2)</td><td>0.21 req/s</td><td>0.22 req/s</td></tr><tr><td>L40 (4,2) (cross machine)</td><td>0.18 req/s</td><td>0.19 req/s</td></tr><tr><td>H100+A100 (4,2) (cross machine)</td><td>0.48 req/s</td><td>0.52 req/s</td></tr></table>
439
+
440
+ # L. One-Time Profiling
441
+
442
+ We employ a one-time profiling strategy that captures the following components. This approach is referred to the profiling method used in Vidur (Agrawal et al., 2024a): (1) Inference prefilling latency: We profile the latency for a single transformer layer across varying TP degrees, different workload types, and various GPU types; (2) inference decoding latency: We profile the decoding latency for a single transformer layer under similar variations in TP degrees, workload types, and GPU types; (3) pipeline communication latency: We measure the communication latency between different GPUs across various workload types. Using these measurements, the per-request latency for any configuration is estimated by combining the TP costs (both communication and computation) of all layers—which may be served by different GPUs and at varying TP degrees—with the PP communication cost. Note that our heuristics, as discussed in Section 4.3 and Appendix D, largely reduce the profiling space, e.g., TP is employed only intra-machine. When estimating throughput, the prefetch and decoding
443
+
444
+ phases are treated separately: (1) The prefetch phase is computed, and its batched processing capacity is determined by the sum of the individual latencies; (2) the decoding phase is memory-bound, with its batched processing capability defined by a single latency value. This distinction has been validated in several studies (Zhong et al., 2024; Patel et al., 2024).
445
+
446
+ Table 6 demonstrates examples of our cost estimation under a long-input, short-output workload (i.e., workload 1 in Figure 4). In Table 6, the notation (2,4) indicates that the TP degree is 2 and the PP degree is 4. The estimation errors range from $4\%$ to $7\%$ . Although the estimations are not perfectly accurate, they are sufficiently reliable for selecting the optimal configurations.
447
+
448
+ # M. Discussion
449
+
450
+ Online replanning. Online scheduling for dynamic workloads is orthogonal to the approach presented in this work. However, accommodating dynamic workloads could be achieved through the implementation of a replanning mechanism analogous to the one proposed in DistServe (Zhong et al., 2024). Concretely, the system could (1) monitor the real-time composition of incoming workloads, (2) track GPU resource availability within the cloud environment, and (3) upon detecting a significant shift in workload distribution, (e.g., an increase in the proportion of certain workload types) the scheduling algorithm could be executed again, incorporating recent historical data to produce an updated serving plan.
451
+
452
+ Table 7: Performance changes in workload and GPU drop.
453
+
454
+ <table><tr><td></td><td>Workload Change</td><td>GPU Drop</td></tr><tr><td>Before</td><td>26.89 req/s</td><td>26.89 req/s</td></tr><tr><td>After</td><td>23.70 req/s (13%↓)</td><td>20.80 req/s (29%↓)</td></tr><tr><td>Replanning</td><td>29.61 req/s (25%↑)</td><td>22.85 req/s (10%↑)</td></tr></table>
455
+
456
+ As shown in Table 7, we test the workload surge in short output requests in AzureTrace dataset (i.e., trace 2) with a price budget of $30 per hour. Before surge, the optimal GPU composition is {20%, 65%, 15%} for datacenter, workstation, and consumer GPUs, achieving 26.89 req/s. After workload change, the throughput degrades to 23.7 req/s. In this case, replanning (shifting allocation to {63%, 23%, 14%}) boosts throughput to 29.61 req/s. We also test the case when GPU drop happens (4 H100s down), the throughput falls from 26.89 to 20.80 req/s. In this case, replanning raises throughput to 22.85 req/s.
457
+
458
+ Trade-offs between prioritizing cost-efficiency and request latency. Prioritizing cost-efficiency typically involves using fewer resources (i.e., lower budgets), which can lead to slightly higher response latencies. In contrast, prioritizing latency often requires utilizing more resources (i.e., incurring higher costs). We acknowledge that optimizing for cost efficiency may result in a slight increase in latency. However, inference tasks typically do not require extremely low latency; meeting a predefined latency threshold is usually sufficient. In resource-limited scenarios, where systems are naturally underprovisioned, emphasizing throughput can also indirectly improve latency by reducing queuing delays. Our experimental results in Figure 6 demonstrate that our method achieved the lowest P99 latency among all compared baselines.
2502.00xxx/2502.00722/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d452dd082e7cf318e478e54d426f28fcb2bb712800540712c7db65213e7c2cc5
3
+ size 1701162
2502.00xxx/2502.00722/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00782/fe7680fc-da3e-4a3b-bd9d-6cdcbf7cab47_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00782/fe7680fc-da3e-4a3b-bd9d-6cdcbf7cab47_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00782/fe7680fc-da3e-4a3b-bd9d-6cdcbf7cab47_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1878f0d2bdb8094d1f2a0aeb0c232e39de2468e0282f8a26c60d73ab4b1374c
3
+ size 14621870
2502.00xxx/2502.00782/full.md ADDED
@@ -0,0 +1,761 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Transfer Learning in Physics-Informed Neural Networks: Full Fine-Tuning, Lightweight FineTuning, and Low-Rank Adaptation
2
+ Yizheng Wang, Jinshuai Bai, Mohammad Sadegh Eshaghi, Cosmin Anitescu, Xiaoying Zhuang, Timon Rabczuk, Yinghua Liu
3
+
4
+ ![](images/5b4e5ab928e6a67a1e215bf54cf127c75c6ec8dc10ef18059a5334507f10c02e.jpg)
5
+
6
+ # Transfer Learning in Physics-Informed Neural Networks: Full Fine-Tuning, Lightweight Fine-Tuning, and Low-Rank Adaptation
7
+
8
+ Yizheng Wang $^{a,b}$ , Jinshuai Bai $^{a,e,f}$ , Mohammad Sadegh Eshaghi $^{c}$ , Cosmin Anitescu $^{b}$ , Xiaoying Zhuang $^{c}$ , Timon Rabczuk $^{b}$ , Yinghua Liu $^{a,*}$
9
+
10
+ $^{a}$ Department of Engineering Mechanics, Tsinghua University, Beijing 100084, China
11
+
12
+ $^{b}$ Institute of Structural Mechanics, Bauhaus-Universität Weimar, Marienstr. 15, D-99423 Weimar, Germany
13
+
14
+ Institute of Photonics, Department of Mathematics and Physics, Leibniz University Hannover, Germany
15
+
16
+ Drilling Mechanical Department, CNPC Engineering Technology RD Company Limited, Beijing 102206, China
17
+
18
+ $^{e}$ School of Mechanical, Medical and Process Engineering, Queensland University of Technology, Brisbane, QLD 4000, Australia
19
+
20
+ $^{f}$ ARC Industrial Transformation Training Centre—Joint Biomechanics, Queensland University of Technology, Brisbane, QLD
21
+
22
+ 4000,Australia
23
+
24
+ # Abstract
25
+
26
+ AI for PDEs has garnered significant attention, particularly Physics-Informed Neural Networks (PINNs). However, PINNs are typically limited to solving specific problems, and any changes in problem conditions necessitate retraining. Therefore, we explore the generalization capability of transfer learning in the strong and energy form of PINNs across different boundary conditions, materials, and geometries. The transfer learning methods we employ include full finetuning, lightweight finetuning, and Low-Rank Adaptation (LoRA). The results demonstrate that full finetuning and LoRA can significantly improve convergence speed while providing a slight enhancement in accuracy.
27
+
28
+ Keywords: PINNs, Computational mechanics, Transfer learning, AI for PDEs, AI for science
29
+
30
+ # 1. Introduction
31
+
32
+ Numerous physical phenomena are modeled using PDEs [1]. Solving PDEs is key to understanding these phenomena, making it one of the most critical tasks in computational mathematics, physics, and mechanics [2]. However, traditional PDEs solvers require re-solving the problem whenever boundary conditions, material distributions, or geometries change [3].
33
+
34
+ Recently, AI for PDEs [4, 5], one of the important directions of AI for science, refers to a class of algorithms that use deep learning to solve PDEs. There are three main approaches in AI for PDEs. The first is Physics-Informed Neural Networks (PINNs) [6], which are based on physical equations. The second is data-driven operator learning [7, 8]. The third is hybrid methods that combine data and physics, such as Physics-informed Neural Operator (PINO) [9] and Variational Physics-informed Neural Operator (VINO) [10]. PINNs are a core component of AI for PDEs, but they are limited to solving specific problems. Any changes in boundary conditions, material distributions, or geometries necessitate retraining [11, 12, 13].
35
+
36
+ Transfer learning refers to fine-tuning a pre-trained model for a new related task, often requiring fewer iterations and less data for the new task [14]. Therefore, exploring transfer learning in PINNs for different boundary conditions [15], material distributions [16], and geometries [17] is of great importance. The advantage of transfer learning in PINNs lies in iterative algorithms, as they can inherit parameters from previous training, thereby reducing the number of iterations required for new but similar tasks [4]. Currently, there are two main methods for applying transfer learning in PINNs: The first is full finetuning, where the model parameters
37
+
38
+ are fully inherited from the similar task and then the model is re-trained [18, 19]. The second is lightweight finetuning, where the initial layers of the neural network are frozen, and only the latter layers are trained to achieve less literatures to converge [20, 21]. Recently, LoRA (Low-Rank Adaptation of Large Language Models) [22] has been introduced, which uses low-rank approximation of trainable parameters to enable rapid fine-tuning, significantly reducing computational costs. LoRA offers a more generalized and flexible approach compared to full and lightweight finetuning. [23, 24] have applied LoRA to PINNs. However, there has been no systematic study on the performance of different transfer learning methods in both the strong and energy forms of PINNs, particularly for generalization across boundary conditions, geometries, and material distributions. Given that transfer learning is a pivotal technique for enhancing the adaptability of PINNs across varying scenarios, a comprehensive analysis is essential to understand performance of transfer learning in PINNs.
39
+
40
+ As a result, we systematically tested different transfer learning methods in PINNs for varying boundary conditions, geometries, and material distributions. The transfer learning methods include full finetuning, lightweight finetuning, and LoRA. Our results show that full finetuning and LoRA can significantly improve convergence speed while providing a slight enhancement in accuracy across most scenarios. These findings underscore the potential of transfer learning techniques to optimize the training efficiency and performance of PINNs, particularly in complex physical systems with varying boundary conditions, geometries, and material distributions.
41
+
42
+ The outline of the paper is as follows. Section 2 introduces PINNs, divided into strong and energy form. Section 3 presents different transfer learning methods. Section 4 details numerical experiments, divided into three parts:
43
+
44
+ 1. Transfer learning for different boundary conditions: Solving incompressible fluid dynamics Navier-Stokes (NS) equations with varying boundary conditions using the strong form of PINNs.
45
+ 2. Transfer learning for different materials: Solving heterogeneous problems with functionally graded materials of varying elastic moduli using the energy form of PINNs.
46
+ 3. Transfer learning for different geometries: Solving the square plate with a hole problem with varying geometries using the energy form of PINNs.
47
+
48
+ Finally, Section 5 and Section 6 summarize the characteristics of different transfer learning methods in PINNs and suggest future research directions for transfer learning in PINNs.
49
+
50
+ # 2. Preparatory knowledge
51
+
52
+ Before introducing transfer learning in PINNs, we need to understand two important forms of PINNs: the strong form of PINNs [6] and the energy form [1]. The essential difference between these two forms lies in the mathematical representation of the PDEs. The strong form describes the equations from a differential element perspective, while the energy form uses variational principles to describe the physical system from the overall energy perspective [25]. It is important to note that while the PDEs may have different forms, they solve the same physical problem. The reason for studying different forms of PDEs lies in the fact that, although they represent the same physical problem, their algorithmic accuracy and efficiency differ [26]. Therefore, studying different forms of PINNs is crucial for AI for PDEs [5, 4], which is one of the important aspect in AI for Science [27]. Below, we introduce both the strong form and the energy form.
53
+
54
+ # 2.1. PINNs: Strong Form
55
+
56
+ We begin by discussing the PDEs for boundary value problems, considering the following equations:
57
+
58
+ $$
59
+ \left\{ \begin{array}{l l} \boldsymbol {P} (\boldsymbol {u} (\boldsymbol {x})) = \boldsymbol {f} (\boldsymbol {x}) & \boldsymbol {x} \in \Omega \\ \boldsymbol {B} (\boldsymbol {u} (\boldsymbol {x})) = \boldsymbol {g} (\boldsymbol {x}) & \boldsymbol {x} \in \Gamma \end{array} , \right. \tag {1}
60
+ $$
61
+
62
+ where $\pmb{P}$ and $\pmb{B}$ are the domain and boundary operators, respectively, and $\Omega$ and $\Gamma$ represent the domain and boundary. $\pmb{u}(\pmb{x})$ denote the field of interest that needs to be solved, where $\pmb{x}$ represents the spatial or temporal coordinates.
63
+
64
+ ![](images/62b6d58031ee943c01b5f03976f137233fa109ec65275b0207ab9b4a470277fe.jpg)
65
+ Fig. 1. The strong form of Physics-informed neural networks (PINNs). The inputs $x_{1}, x_{2}, \dots, t$ are typically spatial coordinates, while the outputs $y_{1}, \dots, y_{n}$ are the network's outputs, usually representing the variables of interest.
66
+
67
+ We use the weighted residual method to transform these equations into their weighted residual form:
68
+
69
+ $$
70
+ \left\{ \begin{array}{l l} \int_ {\Omega} [ \boldsymbol {P} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {f} (\boldsymbol {x}) ] \cdot \boldsymbol {w} (\boldsymbol {x}) d \Omega = 0 & \boldsymbol {x} \in \Omega \\ \int_ {\Gamma} [ \boldsymbol {B} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {g} (\boldsymbol {x}) ] \cdot \boldsymbol {w} (\boldsymbol {x}) d \Gamma = 0 & \boldsymbol {x} \in \Gamma \end{array} , \right. \tag {2}
71
+ $$
72
+
73
+ where $\pmb{w}(\pmb{x})$ is the weight function. Eq. (1) and Eq. (2) are equivalent if $\pmb{w}(\pmb{x})$ is arbitrary. For numerical convenience, we often predefine the form of $\pmb{w}(\pmb{x})$ and obtain the residual form of the PDEs:
74
+
75
+ $$
76
+ \boldsymbol {w} (\boldsymbol {x}) = \left\{ \begin{array}{l l} \boldsymbol {P} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {f} (\boldsymbol {x}) & \boldsymbol {x} \in \Omega \\ \boldsymbol {B} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {g} (\boldsymbol {x}) & \boldsymbol {x} \in \Gamma \end{array} . \right. \tag {3}
77
+ $$
78
+
79
+ As a result, Eq. (2) is transformed into:
80
+
81
+ $$
82
+ \left\{ \begin{array}{l l} \int_ {\Omega} [ \boldsymbol {P} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {f} (\boldsymbol {x}) ] \cdot [ \boldsymbol {P} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {f} (\boldsymbol {x}) ] d \Omega = 0 & \boldsymbol {x} \in \Omega \\ \int_ {\Gamma} [ \boldsymbol {B} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {g} (\boldsymbol {x}) ] \cdot [ \boldsymbol {B} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {g} (\boldsymbol {x}) ] d \Gamma = 0 & \boldsymbol {x} \in \Gamma \end{array} . \right. \tag {4}
83
+ $$
84
+
85
+ Next, we approximate these integrals shown in Eq. (4) numerically, leading to the strong form of PINNs:
86
+
87
+ $$
88
+ \mathcal {L} _ {P I N N s} = \frac {\lambda_ {r}}{N _ {r}} \sum_ {i = 1} ^ {N _ {r}} | | \boldsymbol {P} (\boldsymbol {u} (\boldsymbol {x} _ {i}; \boldsymbol {\theta})) - \boldsymbol {f} (\boldsymbol {x} _ {i}) | | _ {2} ^ {2} + \frac {\lambda_ {b}}{N _ {b}} \sum_ {i = 1} ^ {N _ {b}} | | \boldsymbol {B} (\boldsymbol {u} (\boldsymbol {x} _ {i}; \boldsymbol {\theta})) - \boldsymbol {g} (\boldsymbol {x} _ {i}) | | _ {2} ^ {2}. \tag {5}
89
+ $$
90
+
91
+ , where $||\cdot ||_2$ is the $L_{2}$ norm.
92
+
93
+ We optimize the above loss function to obtain the neural network approximation of the field variable $\pmb{u}(\pmb{x};\pmb{\theta})$ where $\pmb{\theta}$ is the parameters of the neural network:
94
+
95
+ $$
96
+ \boldsymbol {u} (\boldsymbol {x}; \boldsymbol {\theta}) = \underset {\boldsymbol {\theta}} {\arg \min } \mathcal {L} _ {P I N N s} \tag {6}
97
+ $$
98
+
99
+ Thus, mathematically, the strong form of PINNs [6] is essentially a weighted residual form with the approximation of the function $\pmb{u}(\pmb{x})$ using a neural network, and the weight function given by Eq. (3).
100
+
101
+ ![](images/26bc4a408f38cf2fd80ee53f340076a266885bf695b59ad10f69bbba691ed22e.jpg)
102
+ Fig. 2. The energy form of Physics-informed neural networks (PINNs). The inputs $x, y, z$ are typically spatial coordinates, while the outputs $U_x, U_y,$ and $U_z$ represent the displacement field $\mathbf{u}$ . The admissible displacement $\tilde{\mathbf{u}}(\mathbf{x};\boldsymbol{\theta})$ is the displacement field $\mathbf{u}$ that satisfies the essential boundary conditions in advance.
103
+
104
+ # 2.2. PINNs: Energy Form
105
+
106
+ We introduce the energy form of the Physics-Informed Neural Network (DEM: Deep Energy Method) [1]. We consider $\delta \pmb{u}$ as the weight function $\pmb{w}(\pmb{x})$ in Eq. (2), which leads to the Galerkin form. Eq. (2) can be written as:
107
+
108
+ $$
109
+ \int_ {\Omega} \left[ \boldsymbol {P} (\boldsymbol {u} (\boldsymbol {x})) - \boldsymbol {f} (\boldsymbol {x}) \right] \cdot \delta \boldsymbol {u} d \Omega = 0, \boldsymbol {x} \in \Omega . \tag {7}
110
+ $$
111
+
112
+ For simplicity, we consider a specific Poisson equation to illustrate this:
113
+
114
+ $$
115
+ \left\{ \begin{array}{l l} - \triangle (u (\boldsymbol {x})) = f (\boldsymbol {x}) & \boldsymbol {x} \in \Omega \\ u (\boldsymbol {x}) = \bar {u} (\boldsymbol {x}) & \boldsymbol {x} \in \Gamma^ {u}. \\ \frac {\partial u (\boldsymbol {x})}{\partial \boldsymbol {n}} = \bar {t} (\boldsymbol {x}) & \boldsymbol {x} \in \Gamma^ {t} \end{array} \right. \tag {8}
116
+ $$
117
+
118
+ where $\Gamma^u$ and $\Gamma^t$ are the Dirichlet and Neumann boundary conditions, respectively. For the Poisson equation, the Galerkin form of Eq. (7) can be expressed as:
119
+
120
+ $$
121
+ \int_ {\Omega} \left[ - \triangle (u (\boldsymbol {x})) - f (\boldsymbol {x}) \right] \cdot \delta u d \Omega = 0, \boldsymbol {x} \in \Omega . \tag {9}
122
+ $$
123
+
124
+ Using the Gaussian integration formula, we can transform the above equation to:
125
+
126
+ $$
127
+ \int_ {\Omega} (- u _ {, i i} - f) \delta u d \Omega = \int_ {\Omega} u _ {, i} \delta u _ {, i} d \Omega - \int_ {\Gamma} u _ {, i} n _ {i} \delta u d \Gamma - \int_ {\Omega} f \delta u d \Omega = 0. \tag {10}
128
+ $$
129
+
130
+ By incorporating the boundary conditions from Eq. (8) into Eq. (10), we obtain the Galerkin weak form:
131
+
132
+ $$
133
+ \int_ {\Omega} (- u _ {, i i} - f) \delta u d \Omega = \int_ {\Omega} u _ {, i} \delta u _ {, i} d \Omega - \int_ {\Gamma^ {t}} \bar {t} \delta u d \Gamma - \int_ {\Omega} f \delta u d \Omega = 0. \tag {11}
134
+ $$
135
+
136
+ Since $u(\pmb{x})$ is given on $\Gamma^u$ , the corresponding variation $\delta \pmb{u} = 0$ on $\Gamma^u$ . Here, we observe an interesting phenomenon: we must satisfy $u(\pmb{x}) = \bar{u}(\pmb{x})$ on $\Gamma^u$ in advance, which involves constructing an admissible function. This is crucial for DEM. Additionally, Eq. (11) includes the domain PDEs and the boundary conditions on $\Gamma^t$ . Therefore, solving Eq. (8) is equivalent to solving Eq. (11).
137
+
138
+ We can further use the variational principle to write Eq. (11) as:
139
+
140
+ $$
141
+ \delta \mathcal {L} = \int_ {\Omega} u _ {, i} \delta u _ {, i} d \Omega - \int_ {\Gamma^ {t}} \bar {t} \delta u d \Gamma - \int_ {\Omega} f \delta u d \Omega . \tag {12}
142
+ $$
143
+
144
+ $$
145
+ \mathcal {L} = \frac {1}{2} \int_ {\Omega} u _ {, i} u _ {, i} d \Omega - \int_ {\Gamma^ {t}} \bar {t} u d \Gamma - \int_ {\Omega} f u d \Omega . \tag {13}
146
+ $$
147
+
148
+ $\mathcal{L}$ represents the potential energy, and we can observe that $\delta^2\mathcal{L} > 0$ (excluding zero solutions), indicating that we solve for $u(\pmb{x})$ by minimizing the energy:
149
+
150
+ $$
151
+ u (\boldsymbol {x}) = \underset {u} {\arg \min } \mathcal {L}. \tag {14}
152
+ $$
153
+
154
+ The essence of DEM is to approximate $u(\pmb{x})$ using a neural network $u(\pmb{x};\pmb{\theta})$ , and then optimize Eq. (14):
155
+
156
+ $$
157
+ u (\boldsymbol {x}; \boldsymbol {\theta}) = \underset {\boldsymbol {\theta}} {\arg \min } \mathcal {L} _ {D E M} = \underset {u} {\arg \min } \left\{\frac {1}{2} \int_ {\Omega} u (\boldsymbol {x}; \boldsymbol {\theta}), _ {i} u (\boldsymbol {x}; \boldsymbol {\theta}), _ {i} d \Omega - \int_ {\Gamma^ {t}} \bar {t} u (\boldsymbol {x}; \boldsymbol {\theta}) d \Gamma - \int_ {\Omega} f u (\boldsymbol {x}; \boldsymbol {\theta}) d \Omega \right\}. \tag {15}
158
+ $$
159
+
160
+ Therefore, the core of DEM lies in the integration of the domain energy and boundary energy, as well as the construction of the admissible function. Integration strategies can use numerical analysis methods, such as simple Monte Carlo integration or more accurate methods like Gaussian integration or Simpson's Rule.
161
+
162
+ Here, we emphasize the construction of the admissible function. We use the concept of a distance network for this purpose:
163
+
164
+ $$
165
+ u (\boldsymbol {x}) = u _ {p} (\boldsymbol {x}; \boldsymbol {\theta} _ {p}) + D (\boldsymbol {x}) \cdot u _ {g} (\boldsymbol {x}; \boldsymbol {\theta} _ {g}), \tag {16}
166
+ $$
167
+
168
+ where $u_{p}(\boldsymbol{x};\boldsymbol{\theta}_{p})$ is the particular solution network that fits the Dirichlet boundary condition, such that it outputs $\bar{u}(\boldsymbol{x})$ when the input points are on $\Gamma^{u}$ , and outputs any value elsewhere. The parameters $\boldsymbol{\theta}_{p}$ are optimized by:
169
+
170
+ $$
171
+ \boldsymbol {\theta} _ {p} = \underset {\boldsymbol {\theta} _ {p}} {\arg \min } \operatorname {M S E} \left(u _ {p} (\boldsymbol {x}; \boldsymbol {\theta} _ {p}), \bar {u} (\boldsymbol {x})\right), \quad \boldsymbol {x} \in \Gamma^ {u}. \tag {17}
172
+ $$
173
+
174
+ $D(\pmb{x})$ is the distance network, which we approximate using radial basis functions [28]. Other fitting functions can also be used. The effect is to output the minimum distance to the Dirichlet boundary:
175
+
176
+ $$
177
+ D (\boldsymbol {x}) = \min _ {\boldsymbol {y} \in \Gamma^ {u}} \sqrt {(\boldsymbol {x} - \boldsymbol {y}) \cdot (\boldsymbol {x} - \boldsymbol {y})}. \tag {18}
178
+ $$
179
+
180
+ $u_{g}(\pmb {x};\pmb{\theta}_{g})$ is a standard neural network. When using the minimum potential energy principle, we only optimize $\pmb{\theta}_{g}$
181
+
182
+ $$
183
+ u (\boldsymbol {x}; \boldsymbol {\theta} _ {p}, \boldsymbol {\theta} _ {g}) = \underset {\boldsymbol {\theta} _ {g}} {\arg \min } \mathcal {L} _ {D E M}. \tag {19}
184
+ $$
185
+
186
+ # 3. Method: Transfer learning in PINNs
187
+
188
+ Transfer learning has been a hot topic in deep learning research. In this manuscript, we conduct a systematic evaluation of transfer learning in the strong and energy forms of PINNs.
189
+
190
+ Given that there are various transfer learning methods, as shown in Fig. 3, we focus on the classical Parameter-based transfer learning and evaluate its performance in PINNs. The source domain refers to the pre-trained source problem, while the target domain refers to the target problem. The core of transfer learning lies in leveraging the parameters pre-trained on the source domain to achieve faster convergence on the target domain. Below, we introduce three common approaches for Parameter-based transfer learning.
191
+
192
+ ![](images/a0450f150814221ce1f4b6ba018aef64a858b230eb15f1d44ba75258eac318e7.jpg)
193
+ Fig. 3. Categories of Transfer Learning [14]
194
+
195
+ # 3.1. Full Fine-Tuning
196
+
197
+ The most common method for adapting pretrained models to downstream tasks is full fine-tuning, where all model parameters are fine-tuned [29]. However, this results in a full copy of fine-tuned model parameters for each task, which becomes prohibitively expensive when the models have a large number of trainable parameters [30]. Full fine-tuning is easy to understand, as shown in Fig. 4a. The core of full fine-tuning is to initialize the neural network parameters with those from the old task. Then, on the new task, the parameters from the old task are fine-tuned:
198
+
199
+ $$
200
+ \boldsymbol {\theta} ^ {n e w} = \arg \min _ {\boldsymbol {\theta}} \mathcal {L} \left(D ^ {n e w}; \boldsymbol {\theta} ^ {o l d}\right), \tag {20}
201
+ $$
202
+
203
+ where $\pmb{\theta}^{old}$ is the parameter set after the old task is completed, and $\pmb{\theta}^{new}$ is the result of optimizing the loss function $\mathcal{L}$ on the new dataset $D^{new}$ .
204
+
205
+ # 3.2. Lightweight Fine-Tuning
206
+
207
+ Lightweight fine-tuning freezes most of the pretrained parameters and only tune a smaller subset of parameters [29], as shown in Fig. 4b. The key question is how to decide which subset of pretrained parameters to tune.
208
+
209
+ In the context of AI for PDEs, a common method for lightweight fine-tuning is to freeze the early layers of the neural network and only train the later layers to adapt to the new task [20, 21, 15]. The motivation for this approach is that shallow layers typically extract basic features, while retraining deeper layers enables the transfer of abstract knowledge. Due to the significant reduction in trainable parameters, the computational costs are substantially reduced.
210
+
211
+ # 3.3. LoRA: Low-Rank Adaptation
212
+
213
+ LoRA (Low-Rank Adaptation) [22] proposes multiplying low-rank matrices $\mathbf{A}\mathbf{B}$ (where $\mathbf{A} \subseteq \mathbb{R}^{d \times r}$ , $\mathbf{B} \subseteq \mathbb{R}^{r \times m}$ ), and then adding them to the pretrained parameters $\mathbf{W} \subseteq \mathbb{R}^{d \times m}$ , as shown in Fig. 4c. The parameter computation for LoRA is as follows:
214
+
215
+ $$
216
+ \boldsymbol {W} ^ {*} = \boldsymbol {W} + \alpha \boldsymbol {A} \boldsymbol {B}, \tag {21}
217
+ $$
218
+
219
+ ![](images/3338d64b4c256107f81812d83f1aa789c775dbf34b3d8135b5659b4974f02ac3.jpg)
220
+ (a) Full fine-tuning
221
+
222
+ ![](images/dc1aba6461dea735e5f95ac3b372d8cd0a933e7b2650031a5cdcb33368fdbe13.jpg)
223
+ (b) Lightweight fine-tuning
224
+
225
+ ![](images/ee7dc2315bd2995a41ecefa567dc7434e2653f4516103a52ca2ad13e75b8f205.jpg)
226
+ (c) LoRA: Low-rank adaptation
227
+ Fig. 4. Three common methods for Parameter-based transfer learning: (a) Full fine-tuning: fine-tune all parameters of the model, with the red arrows indicating the parameters that need fine-tuning. (b) Lightweight fine-tuning: fine-tune a subset of the model's parameters, with the red arrows indicating the parameters that need fine-tuning. (c) LoRA: the blue matrix $W$ represents fixed parameters (pretrained parameters from the previous task). The yellow matrices are the trainable parameters $A$ and $B$ , where $AB$ is a low-rank matrix with rank $r$ . During training on the new dataset, only $A$ and $B$ are adjusted. $W^{*} = W + AB$ represents the neural network weights during testing.
228
+
229
+ where $r$ is the rank and hyperparameter in LoRA. In general, $r \ll \min(d, m)$ , and we discussed the method for determining $r$ in Section 5.1.
230
+
231
+ It is important to note that $\mathbf{W}$ represents the fixed pretrained parameters from the old task (source domain). Only the product $\mathbf{A}\mathbf{B}$ is trained in the target domain, with the number of trainable parameters being $r \times (d + m)$ . The number of trainable parameters in $\mathbf{A}\mathbf{B}$ is much smaller than the total number of parameters in $\mathbf{W}$ , which is $d \times m$ . The scaling factor $\alpha$ is used to control the weight between the pretrained $\mathbf{W}$ and the LoRA-trained parameters $\mathbf{A}\mathbf{B}$ , with a default value of 1. $\mathbf{A}$ and $\mathbf{B}$ are initialized using a Gaussian distribution. Other initialization methods for $\mathbf{A}$ and $\mathbf{B}$ are also possible. In this work, we employ a Gaussian distribution with a mean of 0 and a standard deviation of 0.02.
232
+
233
+ In essence, LoRA can be seen as a more flexible version of lightweight fine-tuning. Additionally, full fine-tuning can be considered a limiting case of LoRA, when $r = \min(d, m)$ . The core idea of LoRA is to decompose part of the weight matrices in the model into the product of two low-rank matrices, thereby reducing the number of parameters that need to be trained.
234
+
235
+ # 4. Results
236
+
237
+ In this section, we evaluate the performance of the three transfer learning schemes introduced in Section 3 within the context of PINNs. We conduct systematic and in-depth tests to examine the transfer effects across different boundary conditions, material distributions, and geometric configurations.
238
+
239
+ # 4.1. Transfer learning for different boundary conditions
240
+
241
+ Since PINNs can only solve for a specific case, this means that once the boundary or initial conditions change, PINNs must be retrained. In this section, we validate the performance of transfer learning in the strong form of PINNs when the boundary and initial conditions change. We perform the validation by the strong form of PINNs on the incompressible fluid dynamics Navier-Stokes (NS) equations, which are a 2D unsteady and nonlinear PDEs system:
242
+
243
+ $$
244
+ u _ {i, i} = 0
245
+ $$
246
+
247
+ $$
248
+ \frac {\partial u _ {i}}{\partial t} + u _ {j} u _ {i, j} = - p _ {, i} + \frac {1}{R e} u _ {i, j j}, \tag {22}
249
+ $$
250
+
251
+ where the pressure $p$ and velocity field $\mathbf{u}$ (where $u$ represents the velocity field in the $x$ -direction, and $v$ represents the velocity field in the $y$ -direction) are strongly coupled. Noting that Eq. (22) cannot be solved by the energy form of PINNs (for reasons detailed in [26]), so we focus here on validating the strong form of PINNs.
252
+
253
+ Although solving Eq. (22) with the SIMPLE algorithm [31], a widely recognized and effective method for solving Navier-Stokes equations, is successful, the absence of the SIMPLE method typically results in a large, irregular, and stiff sparse linear system after discretizing Eq. (22). Solving the poorly-conditioned linear system of Eq. (22) is computationally expensive and difficult to converge. Therefore, fully coupled solution methods are rarely used in practice. In fact, when solving the 2D incompressible NS equations, we often use the vorticity-streamfunction formulation:
254
+
255
+ $$
256
+ \frac {\partial \Omega}{\partial t} + v _ {i} \Omega_ {, i} = \frac {1}{R e} \nabla^ {2} \Omega , \tag {23}
257
+ $$
258
+
259
+ $$
260
+ \nabla^ {2} \psi = \Omega
261
+ $$
262
+
263
+ where $\Omega$ and $\psi$ are the vorticity and streamfunction, both of which are related to the velocity field:
264
+
265
+ $$
266
+ \Omega = u _ {y} - v _ {x}
267
+ $$
268
+
269
+ $$
270
+ \frac {\partial \psi}{\partial x} = - v \quad . \tag {24}
271
+ $$
272
+
273
+ $$
274
+ \frac {\partial \psi}{\partial y} = u
275
+ $$
276
+
277
+ It is easy to verify that the streamfunction automatically satisfies the incompressibility condition $u_{i,i} = 0$ . To continue solving for the pressure, we need to solve the Poisson equation for the pressure:
278
+
279
+ $$
280
+ \begin{array}{l} \nabla^ {2} p = - \left[ \left(\frac {\partial u}{\partial x}\right) ^ {2} + 2 \frac {\partial v}{\partial x} \frac {\partial u}{\partial y} + \left(\frac {\partial v}{\partial y}\right) ^ {2} \right] \tag {25} \\ = 2 \left[ \frac {\partial u}{\partial x} \frac {\partial v}{\partial y} - \frac {\partial u}{\partial y} \frac {\partial v}{\partial x} \right]. \\ \end{array}
281
+ $$
282
+
283
+ Note that if the pressure field is not of interest, solving Eq. (8) is not necessary for solving Eq. (23). By examining Eq. (23) and Eq. (25), we observe that the pressure $p$ and the velocity field $\pmb{u}$ are decoupled.
284
+
285
+ We solve the Taylor-Green vortex problem shown in Fig. 5. The Taylor-Green vortex is a classic analytical solution to the incompressible Navier-Stokes equations, commonly used to verify the accuracy of computational
286
+
287
+ ![](images/ff58213f7a7b070ed69cf599aaac233ebd31511628f498fcb9e5006c2d3afe98.jpg)
288
+ Fig. 5. Introduction to the Taylor-Green vortex: the velocity vector field at different values of frequency $w$ . The flow pattern of the Taylor-Green vortex typically involves multiple vortex structures, where the size and distribution of the vortices exhibit high symmetry. Over time, the vortices decay and eventually disappear.
289
+
290
+ ![](images/b3ebc514ee5905271c3d56c9ff41fa827d31975d4e32c6d64284947f8e6f7a8c.jpg)
291
+
292
+ ![](images/6ee922e6ac865051b139993bb47221e034126124ee6c59a2f3f4e767da7d588c.jpg)
293
+
294
+ fluid dynamics algorithms. The analytical solution is given by:
295
+
296
+ $$
297
+ \psi = \frac {1}{w} \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w x) \cos (w y)
298
+ $$
299
+
300
+ $$
301
+ \Omega = u _ {y} - v _ {x} = - 2 w \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w x) \cos (w y)
302
+ $$
303
+
304
+ $$
305
+ u = \frac {\partial \psi}{\partial y} = - \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w x) \sin (w y) \quad , \tag {26}
306
+ $$
307
+
308
+ $$
309
+ v = - \frac {\partial \psi}{\partial x} = \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \sin (w x) \cos (w y)
310
+ $$
311
+
312
+ $$
313
+ p = - \frac {1}{2} \exp \left(- \frac {4 w ^ {2}}{R e} t\right) \left[ \cos^ {2} (w x) + \cos^ {2} (w y) \right]
314
+ $$
315
+
316
+ where $w$ is a constant that controls the frequency of the Taylor-Green vortex. It is easy to verify that Eq. (26) satisfies Eq. (23).
317
+
318
+ The initial and boundary conditions for the Taylor-Green vortex are set according to the analytical solution and are detailed in Appendix A.
319
+
320
+ The loss function for solving Equations (23) to (25) using the strong form of PINNs is:
321
+
322
+ $$
323
+ \begin{array}{l} \mathcal {L} _ {p i n n s} = \frac {\lambda_ {p}}{N _ {p}} \sum_ {i = 1} ^ {N _ {p}} \left[ \left| \frac {\partial \Omega (\pmb {x} _ {i})}{\partial t} + u \Omega (\pmb {x} _ {i}) _ {, x} + v \Omega (\pmb {x} _ {i}) _ {, y} - \frac {1}{R e} \nabla^ {2} \Omega (\pmb {x} _ {i}) \right| ^ {2} + \left| \nabla^ {2} \psi (\pmb {x} _ {i}) - \Omega (\pmb {x} _ {i}) \right| ^ {2} \right] \\ + \frac {\lambda_ {\psi} ^ {b}}{N _ {\psi} ^ {b}} \sum_ {i = 1} ^ {N _ {\psi} ^ {b}} \left| \psi (\boldsymbol {x} _ {i}) - \bar {\psi} (\boldsymbol {x} _ {i}) \right| ^ {2} + \frac {\lambda_ {\Omega} ^ {b}}{N _ {\Omega} ^ {b}} \sum_ {i = 1} ^ {N _ {\Omega} ^ {b}} \left| \Omega (\boldsymbol {x} _ {i}) - \bar {\Omega} (\boldsymbol {x} _ {i}) \right| ^ {2} \\ + \frac {\lambda_ {u} ^ {b}}{N _ {u} ^ {b}} \sum_ {i = 1} ^ {N _ {u} ^ {b}} | u (\boldsymbol {x} _ {i}) - \bar {u} (\boldsymbol {x} _ {i}) | ^ {2} + \frac {\lambda_ {v} ^ {b}}{N _ {v} ^ {b}} \sum_ {i = 1} ^ {N _ {v} ^ {b}} | v (\boldsymbol {x} _ {i}) - \bar {v} (\boldsymbol {x} _ {i}) | ^ {2} \tag {27} \\ + \frac {\lambda_ {\psi} ^ {i}}{N _ {\psi} ^ {i}} \sum_ {i = 1} ^ {N _ {\psi} ^ {i}} \left| \psi (\boldsymbol {x} _ {i}) - \bar {\psi} (\boldsymbol {x} _ {i}) \right| ^ {2} + \frac {\lambda_ {\Omega} ^ {i}}{N _ {\Omega} ^ {i}} \sum_ {i = 1} ^ {N _ {\Omega} ^ {i}} \left| \Omega (\boldsymbol {x} _ {i}) - \bar {\Omega} (\boldsymbol {x} _ {i}) \right| ^ {2} \\ + \frac {\lambda_ {u} ^ {i}}{N _ {u} ^ {i}} \sum_ {i = 1} ^ {N _ {u} ^ {i}} | u (\boldsymbol {x} _ {i}) - \bar {u} (\boldsymbol {x} _ {i}) | ^ {2} + \frac {\lambda_ {v} ^ {i}}{N _ {v} ^ {i}} \sum_ {i = 1} ^ {N _ {v} ^ {i}} | v (\boldsymbol {x} _ {i}) - \bar {v} (\boldsymbol {x} _ {i}) | ^ {2}, \\ \end{array}
324
+ $$
325
+
326
+ where $N_{p}$ is the total number of collocation points for the PDEs loss in the domain; $N_{\psi}^{b}$ , $N_{\Omega}^{b}$ , $N_{u}^{b}$ , and $N_{v}^{b}$ are the total numbers of collocation points for the boundary conditions of $\psi$ , $\Omega$ , $u$ , and $v$ , respectively; and $N_{\psi}^{i}$ , $N_{\Omega}^{i}$ , $N_{u}^{i}$ , and $N_{v}^{i}$ are the total numbers of collocation points for the initial conditions of $\psi$ , $\Omega$ , $u$ , and $v$ , respectively. All collocation points are uniformly and randomly distributed. We set $N_{p} = 1000$ , $N_{\psi}^{b} = 100$ , $N_{\Omega}^{b} = 100$ , $N_{u}^{b} = 100$ , and $N_{v}^{b} = 100$ , while $N_{\psi}^{i} = 100$ , $N_{\Omega}^{i} = 100$ , $N_{u}^{i} = 100$ , and $N_{v}^{i} = 100$ .
327
+
328
+ The weights $\lambda_{p},\lambda_{\psi}^{b},\lambda_{\Omega}^{b},\lambda_{u}^{b}$ , and $\lambda_v^b$ correspond to the PDEs loss in the domain and the boundary condition losses for $\psi, \Omega, u$ , and $v$ , respectively. Similarly, $\lambda_{\psi}^{i},\lambda_{\Omega}^{i},\lambda_{u}^{i}$ , and $\lambda_v^i$ are the weights for the initial condition losses of $\psi, \Omega, u$ , and $v$ , respectively. Although there are techniques for adjusting hyperparameters [32, 33, 34], determining the optimal hyperparameters remains challenging. Therefore, we manually set all hyperparameters $\lambda_p,\lambda_\psi^b,\lambda_\Omega^b,\lambda_u^b,\lambda_v^b,\lambda_\psi^i,\lambda_\Omega^i,\lambda_u^i$ , and $\lambda_v^i$ to 1.
329
+
330
+ The MLP architecture is [3, 100, 100, 100, 100, 2], where the inputs are the coordinates $x$ , $y$ , and $t$ , and the outputs are the stream function $\psi$ and the vorticity $\Omega$ . The hidden layers of the neural network use the tanh activation function, while the final layer has no activation function. For objective comparison, we use the Adam optimizer with a learning rate of 0.001.
331
+
332
+ In the loss function Eq. (27), note that the velocities $u$ and $v$ are derived from the stream function using Eq. (24) to transform the stream function into the velocity field.
333
+
334
+ Fig. 6 shows a comparison of the exact solution and the predicted stream function from PINNs at different times (0.3, 0.6, 1.0) and different $w$ values. Fig. 7 demonstrates a comparison of the exact solution and the predicted vorticity from PINNs at different times (0.3, 0.6, 1.0). We can observe that PINNs can simulate the Taylor Green vortex accurately.
335
+
336
+ To more accurately demonstrate the performance of PINNs on the Taylor Green vortex, we need to observe the evolution of the relative error $\mathcal{L}_2$ as the iterations progress. The calculation formula for $\mathcal{L}_2$ is as follows:
337
+
338
+ $$
339
+ \mathcal {L} _ {2} = \frac {\left\| \boldsymbol {u} _ {\text {e x a c t}} - \boldsymbol {u} _ {\text {p r e d}} \right\| _ {2}}{\left\| \boldsymbol {u} _ {\text {e x a c t}} \right\| _ {2}} \tag {28}
340
+ $$
341
+
342
+ where $||\cdot ||_2$ is the $L_{2}$ norm. Fig. 8 shows the evolution of the relative error of PINNs as the iterations progress. We can clearly see that as $w$ increases, the accuracy of PINNs decreases. This is due to the spectral bias of fully connected neural networks [33]. Spectral bias refers to the phenomenon where fully connected neural networks converge more strongly on low frequencies than on high frequencies. Moreover, the physical meaning of $w$ is the frequency of the solution space, meaning that as $w$ increases, the frequency of the target solution becomes higher. This means that fully connected neural networks perform less well when handling target solutions with higher frequencies.
343
+
344
+ ![](images/bacd7f7ef321e457c387929affafe9f7a1a0ae1d0118c7d0a45e9871efbd9d81.jpg)
345
+ Fig. 6. Performance of PINNs in the strong form on the stream function: for different $w$ values ( $w = 1.0 * \pi$ , $w = 2.0 * \pi$ , and $w = 3.0 * \pi$ ), at different times ( $t = 0.3$ , $t = 0.6$ , and $t = 1.0$ )
346
+
347
+ ![](images/3d01743ce818706cfd2d6d9512516c76908a16a109ebc02089d9bce87c9a4ea9.jpg)
348
+ Fig. 7. Performance of PINNs in the strong form on vorticity: for different $w$ values ( $w = 1.0 * \pi$ , $w = 2.0 * \pi$ , and $w = 3.0 * \pi$ ), at different times ( $t = 0.3$ , $t = 0.6$ , and $t = 1.0$ )
349
+
350
+ ![](images/2cbf772b8ea0a2dc21e6adba211203c997a0048b6ca2ec03f95877505544fa79.jpg)
351
+ Fig. 8. Evolution of the relative error $\mathcal{L}_2$ of PINNs in the strong form on the stream function and vorticity
352
+
353
+ ![](images/fd42346d6ec96d5e71f8c50233917609cac65938969629063bffa3c1b76c7df7.jpg)
354
+
355
+ Next, we are naturally curious about the generalization performance of transfer learning on the boundary conditions in PINNs' strong form. For boundary condition generalization, we adopt a transfer learning scheme where $w$ is randomly selected from $\{\pi, 2\pi, 3\pi\}$ as the source domain, and the parameters are transferred to the other two $w$ values as the target domain. For example, PINNs are trained on $w = \pi$ , and then the parameters are directly transferred to $w = 2\pi$ or $w = 3\pi$ as the initial values. Here, we follow the transfer learning schemes described in Section 3, which are full finetuning, lightweight finetuning, and LoRA. Fig. 9 and Fig. 10 show the evolution trends of the relative error of different transfer learning methods on different boundary conditions. No transfer learning refers to the evolution trend of the relative error of PINNs without transfer learning. Full finetuning refers to fully fine-tuning the pre-trained parameters. Full finetuning refers to fully fine-tuning the pre-trained parameters. We adopt the method of lightweight finetuning as described in [20, 21, 15], which only trains the final layer of the neural network. However, we found that the lightweight approach performed poorly. This is because the neural network of PINNs does not have the hierarchical features typical of CNNs used in computer vision [35]. LoRA trains only the hidden layers in the middle, without training the input and output layer, i.e., LoRA only trains the layers [3, 100, 100, 100, 100, 2] on [100, 100, 100, 100], and we set the rank of LoRA to 4. Section 5.1 shows the effects of different ranks in LoRA.
356
+
357
+ It is worth noting that the full finetuning and lightweight finetuning are the same when epoch is 0 in transfer learning. However, due to the use of curve averaging smoothing technique, the starting points for full finetuning and lightweight finetuning in Fig. 9 and Fig. 10 may differ. In addition, the initial value of LoRA in transfer learning is different from full finetuning and light finetuning because LoRA involves additional initialization parameters $\mathbf{A}$ and $\mathbf{B}$ shown in Eq. (21).
358
+
359
+ Fig. 9 and Fig. 10 show that lightweight finetuning performs the worst, because the change in the problem is too large, but the number of parameters the neural network can learn is too small, which is insufficient to meet the actual needs. Additionally, we observe an interesting phenomenon: when the pre-trained $w$ is larger than the target $w$ , LoRA generally performs better than when the pre-trained $w$ is smaller than the target $w$ (the pretrained $w$ refers to the $w$ on which PINNs are initially trained, and the target $w$ refers to the $w$ the parameters are transferred to). This may be because the transfer between low and high frequencies in fully connected neural networks is not bidirectional; that is, high-to-low frequency transfer is more effective than low-to-high frequency transfer. This could also be caused by spectral bias: when the pre-trained $w$ is low-frequency, the neural network has already converged to a stable loss value, and if we transfer it to a high-frequency problem, the well-converged network may not easily escape local minima. However, when pre-trained on high frequencies, the neural network has not converged to a stable local minimum due to the network's poorer fitting ability for high frequencies. In this case, the network can better transfer to lower frequencies. In summary, full finetuning
360
+
361
+ ![](images/6e4514a4e92a0e8884fb26f0c58552ffcdc02c962aaabbaa9dad5b9cf13b8dfc.jpg)
362
+
363
+ ![](images/e9fb625abd9bacc7d0d4af6d6b419cc71928a1b80512eb9c5b2c8f8a65ccb58c.jpg)
364
+
365
+ ![](images/038fb7b0787b7e189e0dfff1c4d4d35bdebcc1518e18c26db2870a6f58fc6010.jpg)
366
+
367
+ ![](images/11b45065c818b74a87463d5fde41b5e0b02a621e5102a4d777e8e659aaaca958.jpg)
368
+ Fig. 9. Evolution of the relative error $\mathcal{L}_2$ of transfer learning on the stream function in PINNs' strong form: $w = X\rightarrow Y$ indicates that in PINNs, pre-training is performed on the source domain $w = X$ , and then transfer learning is applied to the target domain $w = Y$ .
369
+
370
+ ![](images/1614f7b1a416f6fe6e859c049368c94fb7646f2443999924b899a09e53274673.jpg)
371
+
372
+ ![](images/263f447d25fd132cd73bf21ea387498d59bd9a52fd161734816a115ed284e489.jpg)
373
+
374
+ performs well in all scenarios and outperforms the case with no transfer learning.
375
+
376
+ Although full finetuning generally performs the best, the number of learnable parameters in full finetuning is larger than in lightweight finetuning and LoRA, which makes each epoch take more time. Table 1 shows the efficiency and the number of learnable parameters for different transfer learning methods. All computations are carried out on a single Nvidia RTX 4060 Ti GPU with 16GB memory. We noticed an odd phenomenon: the efficiency of lightweight finetuning does not show a significant improvement over full finetuning. This is because when PINNs approximate the differential operators using Automatic Differentiation [39] during backpropagation, a new computation graph is constructed, which reduces the gains of lightweight finetuning. We explain this in Appendix B. Moreover, LoRA turns out to be less efficient than full finetuning. Although LoRA has fewer learnable parameters than full finetuning, the internal matrix computations in LoRA are actually more than in full finetuning, which we explain in Appendix C. We tested the rank in LoRA to two extreme cases, $r = 1$ and $r = 100$ . The results in Table 1 show that when $r = 100$ , the $\mathcal{L}_2$ error is close to that of full finetuning, as $r = 100$ is equal to full finetuning.
377
+
378
+ # 4.2. Transfer learning for different materials
379
+
380
+ In this subsection, we verify the generalization capability of transfer learning on different materials using functionally graded porous beams as a numerical example. The characteristic of functionally graded porous beams is that the material density is an inhomogeneous field, as shown in Fig. 11. The elastic modulus and shear modulus of the functionally graded porous beams are proportional to the density.
381
+
382
+ The elastic modulus and shear modulus for the symmetric porosity distribution are given by:
383
+
384
+ $$
385
+ \begin{array}{l} E (x, y) = E _ {\max } - \cos \left[ \pi \left(\frac {y}{H} - \frac {1}{2}\right) \right] \left(E _ {\max } - E _ {\min }\right) \tag {29} \\ G (x, y) = G _ {m a x} - \cos \left[ \pi \left(\frac {y}{H} - \frac {1}{2}\right) \right] \left(G _ {m a x} - G _ {m i n}\right) \\ \end{array}
386
+ $$
387
+
388
+ ![](images/c23822aac97c6fa8e509574a8fdb614154ea6215b8cd50308c085f8d712ec27a.jpg)
389
+
390
+ ![](images/caf84af4bcccdd4930fc81a2eb76d7c84b32ab793c42e4071e899df3874af54b.jpg)
391
+
392
+ ![](images/d13d2e0021e89833079b1874c4901aca7171452bd6b09e7ca77c7429d8dbabce.jpg)
393
+
394
+ ![](images/f2ea3eb01aff18dcdb4746745a315feb4766dbed0f383989102de02a2910211f.jpg)
395
+ Fig. 10. Evolution of the relative error $\mathcal{L}_2$ of transfer learning on the vorticity in PINNs' strong form: $w = X \to Y$ indicates that in PINNs, pre-training is performed on the source domain $w = X$ , and then transfer learning is applied to the target domain $w = Y$ .
396
+
397
+ ![](images/408fc9a8e89ef1d0e72471791b3e11026ee75debf378ecf95831c2cf8ecf38de.jpg)
398
+
399
+ ![](images/01937bb0fe096798a0db8cfa0a3785a91f5f2536dda53452826a01f8c220dceb.jpg)
400
+
401
+ Table 1 Accuracy and efficiency of different transfer learning methods: The relative error $\mathcal{L}_2$ is recorded at the 30,000th iteration. $w = X\rightarrow Y$ indicates that PINNs are pre-trained on $w = X$ as the source domain, then transfer learning is performed on $w = Y$ as the target domain. The bold numbers represent the best solutions among all transfer learning solutions.
402
+
403
+ <table><tr><td></td><td>No transfer</td><td>Full_finetuning</td><td>Lightweight_finetuning (last)</td><td>LoRA (r=1)</td><td>LoRA (r=4)</td><td>LoRA (r=100)</td></tr><tr><td>Time (s, 1000 Epochs)</td><td>14.6</td><td>14.6</td><td>12.3</td><td>14.6</td><td>14.8</td><td>14.9</td></tr><tr><td>Trainable parameters</td><td>30902</td><td>30902</td><td>202</td><td>900</td><td>2700</td><td>60300</td></tr><tr><td>Vorticity: L2(w=π→2π)</td><td>0.014540</td><td>0.010842</td><td>0.27463</td><td>0.10166</td><td>0.021114</td><td>0.014770</td></tr><tr><td>Vorticity: L2(w=π→3π)</td><td>0.039597</td><td>0.037837</td><td>1.1711</td><td>1.2465</td><td>0.21432</td><td>0.079696</td></tr><tr><td>Vorticity: L2(w=2π→π)</td><td>0.0078504</td><td>0.0051498</td><td>0.38377</td><td>0.0096263</td><td>0.0029064</td><td>0.0039211</td></tr><tr><td>Vorticity: L2(w=2π→3π)</td><td>0.039597</td><td>0.033586</td><td>1.51422</td><td>0.28875</td><td>0.070175</td><td>0.0040570</td></tr><tr><td>Vorticity: L2(w=3π→π)</td><td>0.0078504</td><td>0.0049677</td><td>0.21606</td><td>0.017968</td><td>0.0035354</td><td>0.0049142</td></tr><tr><td>Vorticity: L2(w=3π→2π)</td><td>0.014540</td><td>0.0098385</td><td>0.21788</td><td>0.032850</td><td>0.010454</td><td>0.0065526</td></tr><tr><td>Stream function: L2(w=π→2π)</td><td>0.088086</td><td>0.047082</td><td>0.41158</td><td>0.60451</td><td>0.060649</td><td>0.10954</td></tr><tr><td>Stream function: L2(w=π→3π)</td><td>0.43473</td><td>0.36305</td><td>3.2088</td><td>15.529</td><td>4.0337</td><td>3.0178</td></tr><tr><td>Stream function: L2(w=2π→π)</td><td>0.018961</td><td>0.010571</td><td>0.65634</td><td>0.021319</td><td>0.023206</td><td>0.01343</td></tr><tr><td>Stream function: L2(w=2π→3π)</td><td>0.43473</td><td>0.33061</td><td>3.5122</td><td>1.5075</td><td>0.25494</td><td>0.42985</td></tr><tr><td>Stream function: L2(w=3π→π)</td><td>0.018961</td><td>0.012136</td><td>0.64111</td><td>0.097851</td><td>0.019430</td><td>0.011005</td></tr><tr><td>Stream function: L2(w=3π→2π)</td><td>0.088086</td><td>0.057240</td><td>0.58412</td><td>0.62227</td><td>0.060155</td><td>0.021474</td></tr></table>
404
+
405
+ ![](images/e762d260b9df460fb38b138dfee5aecba24c74e6aa356d9571d6372a68b36cf5.jpg)
406
+ Fig. 11. Schematic of functionally graded porous beams: Both ends are clamped. The upper surface of the beam is subjected to a uniform load $f = 1N / m$ . The symmetric and asymmetric porosity distributions are two different material distributions for functionally graded porous beams.
407
+
408
+ ![](images/56e9182f8eae36f3e7516f6048e4f5dd55cc7cd57443319ae281710578db010d.jpg)
409
+
410
+ For the asymmetric porosity distribution, the elastic modulus and shear modulus are:
411
+
412
+ $$
413
+ \begin{array}{l} E (x, y) = E _ {\max } - \cos \left(\pi \frac {y}{2 H}\right) \left(E _ {\max } - E _ {\min }\right) \tag {30} \\ G (x, y) = G _ {m a x} - \cos \left(\pi \frac {y}{2 H}\right) \left(G _ {m a x} - G _ {m i n}\right) ^ {\prime} \\ \end{array}
414
+ $$
415
+
416
+ where $E_{max}$ and $E_{min}$ represent the maximum and minimum elastic moduli, respectively, and $G_{max}$ and $G_{min}$ represent the maximum and minimum shear moduli. Note that the material density field only depends on $y$ and not on $x$ , which means the mass density varies only in the $y$ direction. The symmetric porosity distribution has the highest density at the ends ( $y = 0$ and $y = H$ ) and the lowest at the center ( $y = H/2$ ), while the asymmetric porosity distribution has the highest density at the top and the lowest at the bottom.
417
+
418
+ The functionally graded porous beams are governed by the classical elastic mechanics PDEs, with the governing equations given by:
419
+
420
+ $$
421
+ \begin{array}{l} \sigma_ {\alpha \beta , \beta} + f _ {\alpha} = 0 \\ \sigma_ {\alpha \beta} = 2 G \varepsilon_ {\alpha \beta} + \lambda \varepsilon_ {k k} \delta_ {\alpha \beta}, \tag {31} \\ \varepsilon_ {\alpha \beta} = \frac {1}{2} \left(u _ {\alpha , \beta} + u _ {\beta , \alpha}\right) \\ \end{array}
422
+ $$
423
+
424
+ where $\alpha, \beta = x, y$ . We solve the plane stress problem, thus $\varepsilon_{zz} = -\frac{v}{1 - v}\left(\varepsilon_{xx} + \varepsilon_{yy}\right)$ . The Lamé coefficients are given by:
425
+
426
+ $$
427
+ \lambda = \frac {v E}{(1 + v) (1 - 2 v)}, \tag {32}
428
+ $$
429
+
430
+ $$
431
+ G = \frac {E}{2 (1 + v)}
432
+ $$
433
+
434
+ where $E$ is the elastic modulus and $\upsilon$ is Poisson's ratio.
435
+
436
+ Eq. (31) is the strong form of the PDEs. Solving this with the strong form in PINNs requires tuning many hyperparameters, and the highest derivative order is second [28]. Since we can use the variational principle to convert Eq. (31) into its corresponding energy form, solving with the PINNs energy form (Deep Energy Method) is more suitable because it significantly reduces the hyperparameters and lowers the highest derivative order to 1. The energy form of Eq. (31) is:
437
+
438
+ $$
439
+ \mathcal {L} = \int_ {\Omega} \frac {1}{2} \sigma_ {\alpha \beta} \varepsilon_ {\alpha \beta} d \Omega - \int_ {\Omega} f _ {\alpha} u _ {\alpha} d \Omega - \int_ {\Gamma^ {t}} \bar {t} _ {\alpha} u _ {\alpha} d \Gamma , \tag {33}
440
+ $$
441
+
442
+ where $\bar{t}_{\alpha}$ is the force boundary condition on the Neumann boundary $\Gamma^t$ . It is easy to verify that $\delta \mathcal{L} = 0$ is equivalent to Eq. (31).
443
+
444
+ ![](images/ce2eab8612270b4ec0632c2dbec051f6ecf1ffd263c5f955d4aa94e1f32a6d02.jpg)
445
+ Fig. 12. Prediction contour maps of the displacement field $u_{y}$ for symmetric (first column) and asymmetric (second column) porosity distributions using the PINNs energy form: The first row shows the reference solution obtained from isogeometric analysis; the second row shows the DEM prediction; the third row shows the absolute error.
446
+
447
+ In the DEM, we set $\mathcal{L}$ in Eq. (33) as the loss function. It is worth noting that the displacement field in the DEM must satisfy the essential boundary conditions in advance. Considering the essential boundary conditions shown in Fig. 11, which are clamped, the displacement field is assumed to be:
448
+
449
+ $$
450
+ u (x, y) = N N (x, y; \boldsymbol {\theta}) * x * (L - x), \tag {34}
451
+ $$
452
+
453
+ where $NN(x,y;\theta)$ is an MLP neural network, and $\theta$ represents the learnable parameters of the network. The structure of the MLP is [2, 100, 100, 100, 100, 2], with tanh as the activation function in the hidden layers, and no activation function in the final layer. It is easy to verify that Eq. (34) satisfies the clamped boundary conditions at both ends ( $x = 0$ and $x = L$ ).
454
+
455
+ In Eq. (29) and Eq. (30), $E_{max}$ and $E_{min}$ are set to 200 and 100, respectively. Poisson's ratio is 1/3. We set the optimizer (Adam) for objective comparison with a learning rate of 0.001. Given that, under this load, the $y$ -direction displacement $u_y$ is the main displacement field, the predicted contour map of the DEM displacement field $u_y$ is shown in Fig. 12. We observe that the DEM-predicted displacement field aligns well with the exact solution. Note that we use isogeometric analysis (IGA) as the reference solution [36]. The collocation points for both DEM and IGA are arranged on a uniform grid with a spacing of 0.01, resulting in a $401 \times 101$ grid. The integration scheme in DEM is the trapezoidal integration method, rather than the traditional Monte Carlo integration, because the integration scheme significantly affects the accuracy of DEM [26]. Monte Carlo integration does not perform as well as methods like trapezoidal, Simpson's numerical integrations, or triangular integration in DEM [37].
456
+
457
+ Fig. 13 shows the relative errors of the DEM displacement $\mathcal{L}_2$ and Von Mises stress $\mathcal{H}_1$ as they evolve with iterations. The Von Mises stress is calculated as:
458
+
459
+ $$
460
+ M i s e s = \sqrt {\frac {3}{2} \left(\sigma_ {i j} - \frac {1}{3} \sigma_ {k k} \delta_ {i j}\right) \left(\sigma_ {i j} - \frac {1}{3} \sigma_ {m m} \delta_ {i j}\right)}. \tag {35}
461
+ $$
462
+
463
+ We observe that the relative error of Von Mises stress $\mathcal{H}_1$ is larger than the displacement error $\mathcal{L}_2$ due to the infinite stress at corner points. Theoretically, stress solutions at corner points will always have errors. Additionally, since stress requires the first derivative of the displacement field, the stress error will naturally be higher than the displacement error (the higher the derivative order in PINNs, the larger the error [38]).
464
+
465
+ Next, we demonstrate the transfer learning effects for different materials using the PINNs energy form. The idea behind our transfer learning approach is to use the symmetric porosity distribution as the source domain,
466
+
467
+ ![](images/40bb6cbfea26b1f5662cde10f5bca1b8b5df7345292fe107a2fd30b371f764a3.jpg)
468
+ Fig. 13. Comparison of relative displacement $u_y$ error $\mathcal{L}_2$ (left) and Von Mises stress error $\mathcal{H}_1$ (right) for symmetric and asymmetric porosity distributions using the PINNs energy form.
469
+
470
+ ![](images/cfb7971ae3bb26803b912b4ce9afcea3ac17899c3e5ba3689cbfd2a4b8cc0164.jpg)
471
+
472
+ Table 2 Accuracy and efficiency of different transfer learning approaches for material generalization: The relative errors $\mathcal{L}_2(u_y)$ and $\mathcal{H}_1$ (Von Mises) are computed at 100,000 iterations. $Sym\rightarrow Asym$ indicates that the symmetric porosity distribution is the source domain, and the asymmetric porosity distribution is the target domain. The reverse is also true.
473
+
474
+ <table><tr><td></td><td>No transfer</td><td>Full_finetuning</td><td>Lightweight_finetuning (last)</td><td>LoRA (r=1)</td><td>LoRA (r=4)</td><td>LoRA (r=100)</td></tr><tr><td>Time (s, 1000 Epoches)</td><td>32.41</td><td>32.41</td><td>30.30</td><td>31.77</td><td>31.81</td><td>32.3</td></tr><tr><td>Trainable parameters</td><td>30802</td><td>30802</td><td>202</td><td>900</td><td>2700</td><td>60300</td></tr><tr><td>Displacement L2 relative error: Sym → Asym</td><td>0.017427</td><td>0.0066538</td><td>0.013616</td><td>0.0044375</td><td>0.0026861</td><td>0.0035537</td></tr><tr><td>Displacement L2 relative error: Asym → Sym</td><td>0.017698</td><td>0.0082514</td><td>0.020277</td><td>0.0087151</td><td>0.0050296</td><td>0.0086635</td></tr><tr><td>Stress H1 relative error: Sym → Asym</td><td>0.086220</td><td>0.050413</td><td>0.072096</td><td>0.039482</td><td>0.028944</td><td>0.03093</td></tr><tr><td>Stress H1 relative error: Asym → Sym</td><td>0.086337</td><td>0.059366</td><td>0.094390</td><td>0.061203</td><td>0.047018</td><td>0.057451</td></tr></table>
475
+
476
+ i.e., the parameters of the DEM model after 100,000 epochs are used as the initial values for the neural network parameters. The asymmetric porosity distribution is then treated as the target domain, and vice versa. Fig. 14 and Fig. 15 show the effects of different transfer learning methods on the generalization of the materials. In the case of lightweight fine-tuning, only the last layer of the neural network is trained, while the other parameters remain fixed. In the LoRA method, we set the rank to 4. We observe that among all the transfer learning approaches, LoRA yields the best performance.
477
+
478
+ To further compare the precision and efficiency of different transfer learning approaches for material generalization, we vary the rank in LoRA to two extreme values: 1 and 100. Table 2 presents the results of various transfer learning strategies. We observe that the lowest errors for material generalization are achieved when the rank in LoRA is set to 4. The performance of full fine-tuning and LoRA with rank 100 is similar, as a higher rank in LoRA approaches the effect of full fine-tuning, as shown in Fig. 4c. Additionally, lightweight fine-tuning performs poorly, which is consistent with the results in Table 1, and in some cases, it even increases the relative errors $\mathcal{L}_2$ and $\mathcal{H}_1$ compared to no transfer learning.
479
+
480
+ # 4.3. Transfer learning for different geometries
481
+
482
+ This numerical example explores the generalization capability of transfer learning across different geometries. The example we adopt is a square plate with a hole, as shown in Fig. 16. The square plate with a hole is a classic benchmark for stress concentration in solid mechanics, where stress concentration occurs at the edge of the hole. The material is linear elastic, with an elastic modulus of $E = 1000$ MPa and a Poisson's ratio of 0.3. The governing equations are the same as in Eq. (31).
483
+
484
+ ![](images/c03f4946f0218c6b4f2c4490e00b0bcfa167c6d209ad156f3b12269a385b234c.jpg)
485
+ Fig. 14. Relative error evolution of $\mathcal{L}_2(u_y)$ for material generalization in the PINNs energy form: Sym and Asym represent the symmetric and asymmetric porosity distributions, respectively. $Sym \rightarrow$ Asym indicates that the symmetric porosity distribution is the source domain, and the asymmetric porosity distribution is the target domain. Lightweight finetuning only trains the last layer of the neural network, while LoRA finetuning uses a rank of 4.
486
+
487
+ ![](images/2699f8bf27c8dfa57ab5790883351faa1d397129684118faf3e2082df7280055.jpg)
488
+
489
+ ![](images/cbf4758cf24c2515f7273e39badef9f5a8743877c544cdbe336cd9c653f8de65.jpg)
490
+ Fig. 15. Relative error evolution of $\mathcal{H}_1$ (Von Mises) for material generalization in the PINNs energy form: Sym and Asym represent the symmetric and asymmetric porosity distributions, respectively. $Sym \rightarrow Asym$ indicates that the symmetric porosity distribution is the source domain, and the asymmetric porosity distribution is the target domain. Lightweight finetuning only trains the last layer of the neural network, while LoRA finetuning uses a rank of 4.
491
+
492
+ ![](images/3452eae803316326a79fb053cde9c4a2be7c30f406bcaf42fe0ef7f4368579dc.jpg)
493
+
494
+ ![](images/9417d668c236433152007e12444a1207423eb7693ddc60b7d4e86a94d3516ed4.jpg)
495
+ Fig. 16. Schematic of the square plate with a hole: The left figure shows a square plate with a circular hole of radius $5\mathrm{mm}$ and the plate has a side length of $20\mathrm{mm}$ . The load is a uniform load of $100N / mm$ applied to the right boundary of the plate. The right figure shows a square plate with an elliptical hole, where the major and minor axes of the ellipse are $10\mathrm{mm}$ and $5\mathrm{mm}$ , respectively. The plate has a side length of $20\mathrm{mm}$ , and the load is a uniform load of $100N / mm$ applied to the right boundary of the plate.
496
+
497
+ ![](images/291e820551125ed41af4e23d0c144556a06eea31dde40c191722ae35de228e92.jpg)
498
+
499
+ Since this is an elastic mechanics problem, there exists a corresponding energy form of PINNs (DEM), and it can also be solved using the strong form of PINNs [26]. Considering that the energy form of PINNs involves too many hyperparameters for solving this elastic mechanics problem, we only use DEM here. DEM requires the essential boundary conditions to be satisfied in advance. The neural network constructs the possible displacement field as:
500
+
501
+ $$
502
+ u _ {x} = x \cdot N N \left(\frac {\boldsymbol {x}}{L}; \boldsymbol {\theta}\right) \tag {36}
503
+ $$
504
+
505
+ $$
506
+ u _ {y} = y \cdot N N (\frac {\pmb {x}}{L}; \pmb {\theta}) ^ {\prime}
507
+ $$
508
+
509
+ where $L = 20$ is the side length of the square plate, and $x / L$ is used for normalization. Since the original size of the input coordinates is much larger than $1$ ( $L \gg 1$ ), using the original scale would lead to the vanishing gradient problem in the neural network. This occurs because large input values cause the tanh activation function to operate in its saturation region, where its derivative becomes very small. As a result, during backpropagation, the gradients of the trainable parameters diminish significantly, causing the learning process to stagnate. The accuracy of DEM largely depends on the integration scheme. For DEM, we use triangular integration, as it performs better than traditional Monte Carlo integration in DEM [26].
510
+
511
+ Fig. 17 shows the predicted displacement field $u_{mag}$ and Von Mises stress results from DEM (after 60,000 epochs), where the displacement field is $u_{mag} = \sqrt{u_x^2 + u_y^2}$ , and the Von Mises stress is as shown in Eq. (35). The reference solution is obtained using the finite element software Abaqus. The plate with a circular hole has 18,000 CPS8R elements, and the plate with an elliptical hole has 14,993 CPS8R elements. Both the circular and elliptical cases have undergone finite element convergence analysis, ensuring the reliability of the reference solution. For DEM, the plate with a circular hole and the plate with an elliptical hole use 75,309 and 71,403 uniformly distributed triangular integration points, respectively.
512
+
513
+ To quantify the accuracy of DEM, Fig. 18 shows the evolution of the relative error for DEM in the square plate with a hole problem. The results show that DEM can converge well to the reference solution. Next, we test the effect of transfer learning on geometry generalization.
514
+
515
+ ![](images/11bfad10ba5022bbeda82c601d85ccdb9b910e035db40c58e8854e9863235784.jpg)
516
+ Fig. 17. Predicted displacement field $(u_{mag})$ and Von Mises stress for the square plate with a hole: The first row shows the reference solution computed using Abaqus, the second row shows the predicted solution from DEM, and the third row shows the absolute error.
517
+
518
+ ![](images/a10901c4c70c0940ce2b8581f6293e703b97fb803758540db97142073666f4d0.jpg)
519
+ (a)
520
+
521
+ ![](images/2637d8e7db98ea5a2232f7687826bc8960ec4cf7b4f6caec1a91e0f38f2ecbdd.jpg)
522
+ (b)
523
+ Fig. 18. Evolution of the relative error for the displacement field $\mathcal{L}_2$ $u_{mag}$ (a) and Von Mises stress $\mathcal{H}_1$ (b) for the square plate with a hole.
524
+
525
+ ![](images/9526234b99c8a07ccc87d74b79ef55ecdda2ba0604edea1fc83e5a6bf3e43233.jpg)
526
+
527
+ ![](images/157a22ca43d75b1b47623d23ebd1ade9778e5898451709e985be27029ded8ada.jpg)
528
+
529
+ ![](images/85b9ef5090ec39df5e50702179a74052d64ee1d3d6e16144c31cfa2a81c7cbfc.jpg)
530
+
531
+ ![](images/bddf19a9ef1980826fa32e6da48fbed0a7b2a08467695d207e9040b6345ba761.jpg)
532
+ Fig. 19. Performance of transfer learning on the lines $x = 0$ and $y = 0$ for the square plate with a hole: (a, b, c) Transfer from the elliptical hole plate to the circular hole plate, where the "circle" curve represents the result without transfer learning. (d, e, f) Transfer from the circular hole plate to the elliptical hole plate, where the "ellipse" curve represents the result without transfer learning. The first column shows the $y$ -direction displacement $u_y$ on the line $x = 0$ ; the second column shows the Von Mises stress on the line $x = 0$ ; the third column shows the $x$ -direction displacement $u_x$ on the line $y = 0$ .
533
+
534
+ ![](images/47df18a2a01d6981e2b1662b499224285f6a262c85e95575b364d86e84e69857.jpg)
535
+
536
+ ![](images/46c60c057e6c716c0c96a65616a9b96e65003e9b268b503608ed63d2f899706a.jpg)
537
+
538
+ Table 3 Accuracy and efficiency of different transfer learning schemes for geometry generalization: The relative errors $\mathcal{L}_2$ ( $u_{mag}$ ) and $\mathcal{H}_1$ (Von Mises) are measured after 60,000 iterations. $Circle \rightarrow Ellipse$ refers to the case where the source domain is the plate with a circular hole, and the target domain is the plate with an elliptical hole. The reverse is also true.
539
+
540
+ <table><tr><td></td><td>No transfer</td><td>Full_finetuning</td><td>Lightweight_finetuning (last)</td><td>LoRA (r=1)</td><td>LoRA (r=4)</td><td>LoRA (r=100)</td></tr><tr><td>Time (s, 1000 Epochs)</td><td>46.82</td><td>46.82</td><td>43.48</td><td>46.53</td><td>46.77</td><td>46.80</td></tr><tr><td>Trainable parameters</td><td>30802</td><td>30802</td><td>202</td><td>900</td><td>2700</td><td>60300</td></tr><tr><td>Relative error L2for displacement: Circle → Ellipse</td><td>0.0021302</td><td>0.0023966</td><td>0.054191</td><td>0.0020030</td><td>0.0026557</td><td>0.0024090</td></tr><tr><td>Relative error L2for displacement: Ellipse → Circle</td><td>0.0031061</td><td>0.0031083</td><td>0.013176</td><td>0.0031666</td><td>0.0038253</td><td>0.00086261</td></tr><tr><td>Relative error H1for stress: Circle → Ellipse</td><td>0.012843</td><td>0.011563</td><td>0.11855</td><td>0.012162</td><td>0.010285</td><td>0.012158</td></tr><tr><td>Relative error H1for stress: Ellipse → Circle</td><td>0.0080823</td><td>0.010230</td><td>0.034274</td><td>0.0093254</td><td>0.0088848</td><td>0.0080786</td></tr></table>
541
+
542
+ The characteristic of the square plate with a hole problem is that the maximum $y$ -direction displacement $u_y$ and Von Mises stress occur on the line $x = 0$ , while the maximum $x$ -direction displacement $u_x$ occurs on the line $y = 0$ . Therefore, we explore the performance of transfer learning on the lines $x = 0$ and $y = 0$ . We adopt three transfer learning schemes: full finetuning, lightweight finetuning (only training the last layer), and LoRA with a rank of 4. Fig. 19 shows the performance of transfer learning on the lines $x = 0$ and $y = 0$ . The results indicate that lightweight finetuning performs the worst, while full finetuning and LoRA perform the best. Note that the results are obtained after 60,000 epochs for both the source domain and target domain.
543
+
544
+ To quantify the accuracy of transfer learning, Fig. 20 shows the evolution of the relative error for different transfer learning schemes. The results demonstrate that transfer learning can significantly improve the convergence speed. Table 3 provides the specific efficiency and accuracy of different transfer learning schemes. The results show that LoRA achieves the highest accuracy, although the improvement is not significant.
545
+
546
+ ![](images/13a845a04014aa42223c7e2309ae7393e7389ef864eedc8999489b6941e32aaa.jpg)
547
+
548
+ ![](images/a01889ae77903015f0e2c10e5501aafdff0c552b89cadde822142ce9901e0fee.jpg)
549
+
550
+ ![](images/90198dfa774f535a084dbe1cf96130e04c2c6360fae8855cc6aa94edf6e746f5.jpg)
551
+ Fig. 20. Evolution of the relative error for displacement $(u_{mag})$ and Von Mises stress under different transfer learning schemes for the square plate with a hole: (a) Evolution of the relative error for the displacement field of the circular hole plate. (b) Evolution of the relative error for the Von Mises stress of the circular hole plate. (c) Evolution of the relative error for the displacement field of the elliptical hole plate. (d) Evolution of the relative error for the Von Mises stress of the elliptical hole plate. (a, b) The source domain is the elliptical hole plate, and the target domain is the circular hole plate. (c, d) The source domain is the circular hole plate, and the target domain is the elliptical hole plate.
552
+
553
+ ![](images/4c82ff9bd0bd330d840ce123afdebaf8069479da4db0059438cb9025c9e405f8.jpg)
554
+
555
+ ![](images/c56c417af3b8abe035db9eb5f195febc8c805148edb1dd836c7636df1356b1f4.jpg)
556
+
557
+ ![](images/1bc3d58d44901c07766eccb1e8a46a71fa6e5b09108a96286278e65f9be22405.jpg)
558
+
559
+ ![](images/a995b577ce6f5ee50b867ea7c1119113f90b1a95d54f02010754ee5d13841c34.jpg)
560
+
561
+ ![](images/eef2fe14b1d5e6cf15fadbbd4ca2a6ef3c6acdd6e31a65b16ef23aa8392f6f88.jpg)
562
+ Fig. 21. Performance of different ranks in LoRA for PINNs: We vary the rank in LoRA as $1,4,8,12,\ldots,96,100$ and observe the relative error $\mathcal{L}_2$ in 4.1. $w = X\rightarrow Y$ indicates that PINNs are pre-trained on $w = X$ as the source domain and then fine-tuned on $w = Y$ as the target domain. The relative error $\mathcal{L}_2$ is averaged over the last 1,000 iterations.
563
+
564
+ ![](images/df3c09574ffb9e716d4e99a37374215af1ab85872b4477d0fc59eefb4e21318a.jpg)
565
+
566
+ ![](images/1d8407eb978d4f8491ce2383f6b51258c6964c7abc3b1233fd464801834a0d6c.jpg)
567
+
568
+ # 5. Discussion
569
+
570
+ # 5.1. The Rank in LoRA
571
+
572
+ Theoretically, LoRA is a well-suited technique for fine-tuning PINNs, and it serves as a more generalized form of full finetuning and lightweight finetuning. However, in this manuscript, all experiments assume a predetermined rank for LoRA, with most cases using $r = 4$ . Therefore, finding an appropriate method to determine the rank in LoRA is crucial. Fig. 21 illustrates the results of different ranks in LoRA for the case described in Section 4.1. LoRA is applied only to the fully connected structure [3, 100, 100, 100, 100, 2], training [100, 100, 100, 100], with all LoRA layers sharing the same rank. It is evident that the larger the discrepancy between the source domain and the target domain, the higher the optimal rank. For example, when the source domain is $\pi$ and the target domain is $2\pi$ , the optimal rank is 12. However, when the source domain is $\pi$ and the target domain is $3\pi$ , the optimal rank increases to 56. This suggests that the rank in LoRA can be determined based on the similarity between the source and target domains.
573
+
574
+ The similarity can be determined based on geometry, material, boundary conditions, and PDEs types, as shown in Fig. 22. For instance, we can convert geometry, material, boundary conditions, and PDEs types into feature vectors $\mathbf{V}$ and compute the similarity using cosine similarity:
575
+
576
+ $$
577
+ s i m i l a r i t y = \frac {\boldsymbol {V} _ {s} \cdot \boldsymbol {V} _ {t}}{\| \boldsymbol {V} _ {s} \| \cdot \| \boldsymbol {V} _ {t} \|}, \tag {37}
578
+ $$
579
+
580
+ where $\mathbf{V}_s$ is the feature vector of the source domain, and $\mathbf{V}_t$ is the feature vector of the target domain. Note that the feature vectors are determined by geometry, material, boundary conditions, and PDEs types.
581
+
582
+ This work does not explore how to automatically determine the rank in LoRA, particularly how to characterize geometry, material, boundary conditions, and PDEs types into an abstract vector $\mathbf{V}$ shown in Eq. (37). Additionally, all layers share the same rank, without adaptive variations for each layer. In the future, this remains a promising direction for transfer learning in AI for PDEs.
583
+
584
+ # 5.2. Feature Fusion in Transfer Learning
585
+
586
+ We observed an interesting phenomenon in our experiments: learning across different scenarios can improve the accuracy of PINNs. For example, as shown in Section 4.2, Fig. 23 demonstrates that training for 100,000
587
+
588
+ ![](images/b88967b94c03bd37dd40ae6047df5d2cea327d1d754f67c5ce703d1778cca961.jpg)
589
+ Fig. 22. Schematic for determining the rank $r$ in LoRA
590
+
591
+ iterations in the source domain followed by 100,000 iterations in the target domain yields higher accuracy than training for 200,000 iterations without transfer learning. Note that the total number of iterations remains the same for both cases (100,000 in the source domain and 100,000 in the target domain for transfer learning, versus 200,000 iterations without transfer learning).
592
+
593
+ This indicates that fusing different scenarios can enhance the performance of PINNs. In the future, exploring scenario fusion in PINNs, as illustrated in Fig. 24, could further improve PINNs accuracy. We emphasize that the total number of iterations remains unchanged, so efficiency is preserved while accuracy improves. Future work should investigate which scenario fusions enhance PINNs' accuracy and how many scenarios should be fused.
594
+
595
+ # 6. Conclusion
596
+
597
+ We investigated the application of transfer learning in PINNs, focusing on two key forms: the strong form and the energy form. Considering that PINNs require retraining when faced with new boundary conditions, materials, or geometries, we explored the effectiveness of transfer learning across various cases, including generalization to different boundary conditions, materials, and geometries. The results demonstrate that LoRA and full finetuning significantly improve convergence speed and accuracy in most cases, while lightweight finetuning performs poorly in most scenarios. Additionally, the rank in LoRA can be determined based on the similarity between the source and target domains.
598
+
599
+ Although we validated the performance of transfer learning in PINNs, AI for PDEs [5, 4] includes two other key approaches: operator learning [8, 7] and Physics-Informed Neural Operators (PINO) [9, 10]. In the future, we will further explore the effectiveness of transfer learning in operator learning and PINO within AI for PDEs. Moreover, determining the low-rank approximation in LoRA is crucial. While we discovered that the optimal rank range in LoRA is related to the similarity between domains, future work should develop methods to effectively determine the rank in LoRA and adaptively adjust the rank for different hidden layers based on the similarity between the source and target domains. Leveraging Green's functions, which allow rapid integration of boundary conditions to obtain solutions, transfer learning can be applied to generalize and accelerate PINNs computations across different scenarios.
600
+
601
+ We believe that transfer learning is a key technology for future large-scale computational mechanics models. For example, the recently proposed ICON [11] integrates multiple operators. In the future, transfer learning
602
+
603
+ ![](images/9284e7d3b0772d236b706e4b0069ca06d6a9c2fa58099e7a319255c612238ce8.jpg)
604
+ Fig. 23. Scenarios showing the evolution of relative errors for displacement (left) and stress (right): Symmetric refers to training without transfer learning for 200,000 iterations. Asy $\rightarrow$ Sym refers to pre-training on the asymmetric porosity distribution (source domain) for 100,000 iterations, followed by training on the symmetric porosity distribution (target domain) for another 100,000 iterations. Note that the total number of iterations is the same: 100,000 in the source domain and 100,000 in the target domain and non-transfer learning 200,000 iterations.
605
+
606
+ ![](images/341cae4e1e2073498f25a88cc04e78a41cd0e9fd4fcef37fec207d36f52fae6d.jpg)
607
+
608
+ ![](images/a950c22abbe77ff14130e844fd1c9c2d4981555f9d7c26778af50ee935963426.jpg)
609
+ Fig. 24. Schematic of scenario fusion: The left figure divides the total iterations required for PINNs into $N$ scenarios, with the required iterations being $E_{1}$ , $E_{2}$ , ..., $E_{N-1}$ , and $E_{N}$ . The last scenario $N$ is the target domain, while the first $N-1$ scenarios are sequentially trained as source domains. Note that we control $E_{PINNs} = \sum_{i=1}^{N} E_{i}$ . The right figure shows the loss function landscape of the target domain. By leveraging different source domains, the optimization process resembles jumping, allowing it to bypass many local optima.
610
+
611
+ ![](images/f97149ab04ea07877bcc8d27a7c933234d38421e2b56c897ecfea2a70e120a73.jpg)
612
+
613
+ will play a particularly important role in fine-tuning pre-trained large-scale computational mechanics models on smaller, personalized datasets.
614
+
615
+ # Declaration of competing interest
616
+
617
+ The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.
618
+
619
+ # Acknowledgement
620
+
621
+ The study was supported by the Key Project of the National Natural Science Foundation of China (12332005) and scholarship from Bauhaus University in Weimar.
622
+
623
+ # Appendix A. Taylor Green Vortex boundary and initial conditions setup
624
+
625
+ For the Taylor Green Vortex, we apply the initial and boundary conditions based on the analytical solution Eq. (26). The initial conditions are as follows:
626
+
627
+ $$
628
+ \psi = \frac {1}{w} \cos (w x) \cos (w y)
629
+ $$
630
+
631
+ $$
632
+ \Omega = - 2 w \cos (w x) \cos (w y)
633
+ $$
634
+
635
+ $$
636
+ u = - \cos (w x) \sin (w y) \quad . \tag {A.1}
637
+ $$
638
+
639
+ $$
640
+ v = \sin (w x) \cos (w y)
641
+ $$
642
+
643
+ $$
644
+ p = - \frac {1}{2} [ \cos^ {2} (w x) + \cos^ {2} (w y) ]
645
+ $$
646
+
647
+ Our simulation domain is $[0,1]^2$ , so we obtain the boundary conditions as:
648
+
649
+ $$
650
+ \left\{ \begin{array}{l l} \psi = \frac {1}{w} \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w x) & , x \in [ 0, 1 ], y = 0 \\ \Omega = - 2 w \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w x) & , x \in [ 0, 1 ], y = 0 \\ u = 0 & , x \in [ 0, 1 ], y = 0 \\ v = \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \sin (w x) & , x \in [ 0, 1 ], y = 0 \end{array} \right., \tag {A.2}
651
+ $$
652
+
653
+ $$
654
+ \left\{ \begin{array}{l l} \psi = \frac {1}{w} \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w x) \cos (w) & , x \in [ 0, 1 ], y = 1 \\ \Omega = - 2 w \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w x) \cos (w) & , x \in [ 0, 1 ], y = 1 \\ u = - \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w x) \sin (w) & , x \in [ 0, 1 ], y = 1 \\ v = \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \sin (w x) \cos (w) & , x \in [ 0, 1 ], y = 1 \end{array} \right., \tag {A.3}
655
+ $$
656
+
657
+ $$
658
+ \left\{ \begin{array}{l l} \psi = \frac {1}{w} \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w y) & , x = 0, y \in [ 0, 1 ] \\ \Omega = - 2 w \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w y) & , x = 0, y \in [ 0, 1 ] \\ u = - \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \sin (w y) & , x = 0, y \in [ 0, 1 ] \\ v = 0 & , x = 0, y \in [ 0, 1 ] \end{array} \right. \tag {A.4}
659
+ $$
660
+
661
+ $$
662
+ \left\{ \begin{array}{l l} \psi = \frac {1}{w} \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w) \cos (w y) & , x = 1, y \in [ 0, 1 ] \\ \Omega = - 2 w \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w) \cos (w y) & , x = 1, y \in [ 0, 1 ] \\ u = - \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \cos (w) \sin (w y) & , x = 1, y \in [ 0, 1 ] \\ v = \exp \left(- \frac {2 w ^ {2}}{R e} t\right) \sin (w) \cos (w y) & , x = 1, y \in [ 0, 1 ] \end{array} . \right. \tag {A.5}
663
+ $$
664
+
665
+ ![](images/f2dee0e0d38ffc2fdcda1c2e8c042a53d59efec6bd929d1aa0eddc6295aec85a.jpg)
666
+ Fig. B.25. Computation graph of PINNs illustration
667
+
668
+ # Appendix B. Computation graph of PINNs
669
+
670
+ We found that the efficiency of lightweight finetuning did not significantly improve compared to full finetuning. This is because PINNs require differentiation with respect to the input, which approximates the differential operator. This step creates new computation graphs. To illustrate the issue, let us consider a simple PDE:
671
+
672
+ $$
673
+ \nabla^ {2} T = 0. \tag {B.1}
674
+ $$
675
+
676
+ Fig. B.25 shows an explanation of the computation graph for PINNs. When constructing the differential operator, new computation graphs are generated. Consider the PDEs in Eq. (B.1), where the neural network is $T = NN(\pmb{x};\pmb{\theta})$ , with $T$ as the network output, $\pmb{x}$ as the input coordinates, and $\pmb{\theta}$ as the trainable parameters. If the computation graph for $\pmb{x} \rightarrow T$ has $C_T$ steps, then the computation graph for $\pmb{x} \rightarrow \partial T / \partial \pmb{x}$ has $2C_T$ steps, and the computation graph for $\pmb{x} \rightarrow \partial^2 T / \partial \pmb{x}^2$ has $4C_T$ steps. Therefore, if the highest derivative order of the PDEs is $M$ , the computation graph will have a maximum of $2^M C$ steps, where $C$ is the basic number of steps for the network's computation from coordinates to the target field.
677
+
678
+ Thus, for full finetuning, the maximum computation graph has $2^{M}C$ steps; for lightweight finetuning, where only the last layer is trained, the maximum computation graph has $(2^{M} - 1)C + 1$ steps. The ratio of computation graphs between lightweight finetuning, $C_{light}$ , and full finetuning, $C_{full}$ , is given by:
679
+
680
+ $$
681
+ \frac {C _ {\text {l i g h t}}}{C _ {\text {f u l l}}} = \frac {(2 ^ {M} - 1) C + 1}{2 ^ {M} C} = 1 - \frac {C - 1}{2 ^ {M} C}. \tag {B.2}
682
+ $$
683
+
684
+ As a result, the efficiency gain of lightweight finetuning during backpropagation, when computing gradients for the estimizable parameters, decreases as the highest derivative order $M$ of the PDEs increases.
685
+
686
+ In conclusion, PINNs inherently involve the computation of differential operators, so when using PyTorch's Automatic Differentiation [39], more computation graphs are inevitably created. Therefore, during backpropagation, the number of graphs that lightweight finetuning must traverse is not significantly smaller than that of full finetuning.
687
+
688
+ # Appendix C. Computation of LoRA
689
+
690
+ Let's recall the parameters of LoRA:
691
+
692
+ $$
693
+ \boldsymbol {W} ^ {*} = \boldsymbol {W} + \alpha \boldsymbol {A} \boldsymbol {B}. \tag {C.1}
694
+ $$
695
+
696
+ When LoRA is not used, the gradient of the trainable parameters is:
697
+
698
+ $$
699
+ \frac {\partial \mathcal {L}}{\partial \boldsymbol {W}} = \frac {\partial \mathcal {L}}{\partial W _ {i j}}. \tag {C.2}
700
+ $$
701
+
702
+ When considering LoRA, the gradient of the trainable parameters is:
703
+
704
+ $$
705
+ \begin{array}{l} \frac {\partial \mathcal {L}}{\partial \boldsymbol {A}} = \frac {\partial \mathcal {L}}{\partial \boldsymbol {W} ^ {*}} \frac {\partial \boldsymbol {W} ^ {*}}{\partial \boldsymbol {A}} = \frac {\partial \mathcal {L}}{\partial W _ {i j} ^ {*}} \frac {\partial (W _ {i j} + \alpha A _ {i m} B _ {m j})}{\partial A _ {k l}} \\ = \frac {\partial \mathcal {L}}{\partial W _ {i j} ^ {*}} \alpha \delta_ {i k} \delta_ {m l} B _ {m j} = \alpha \frac {\partial \mathcal {L}}{\partial W _ {k j} ^ {*}} B _ {l j} \tag {C.3} \\ \end{array}
706
+ $$
707
+
708
+ $$
709
+ \begin{array}{l} \frac {\partial \mathcal {L}}{\partial \boldsymbol {B}} = \frac {\partial \mathcal {L}}{\partial \boldsymbol {W} ^ {*}} \frac {\partial \boldsymbol {W} ^ {*}}{\partial \boldsymbol {B}} = \frac {\partial \mathcal {L}}{\partial W _ {i j} ^ {*}} \frac {\partial \left(W _ {i j} + \alpha A _ {i m} B _ {m j}\right)}{\partial B _ {k l}}. \tag {U.3} \\ = \frac {\partial \mathcal {L}}{\partial W _ {i j} ^ {*}} \alpha A _ {i m} \delta_ {m k} \delta_ {j l} = \alpha \frac {\partial \mathcal {L}}{\partial W _ {i l} ^ {*}} A _ {i k} \\ \end{array}
710
+ $$
711
+
712
+ Since $W^{*}$ and $W$ have the same shape, the computation cost of $\frac{\partial\mathcal{L}}{\partial W_{ij}}$ is the same as $\frac{\partial\mathcal{L}}{\partial W_{ij}^*}$ . Eq. (C.3) shows that LoRA introduces an additional matrix multiplication, which means that while the number of trainable parameters is reduced in LoRA, the internal computation cost for calculating the gradient of trainable parameters is actually increased compared to not using LoRA.
713
+
714
+ This leads to the longer computation time of LoRA compared to full finetuning, as shown in Table 1.
715
+
716
+ # Appendix D. Supplementary code
717
+
718
+ The code of this work will be available at https://github.com/yizheng-wang/Research-on-Solving-Partial-Differential-Equations-of-Solid-Mechanics-Based-on-PINN after accepted.
719
+
720
+ # References
721
+
722
+ [1] E. Samaniego, C. Anitescu, S. Goswami, V. M. Nguyen-Thanh, H. Guo, K. Hamdia, X. Zhuang, T. Rabczuk, An energy approach to the solution of partial differential equations in computational mechanics via machine learning: Concepts, implementation and applications, Computer Methods in Applied Mechanics and Engineering 362 (2020) 112790.
723
+ [2] G. E. Karniadakis, I. G. Kevrekidis, L. Lu, P. Perdikaris, S. Wang, L. Yang, Physics-informed machine learning, Nature Reviews Physics 3 (6) (2021) 422-440. doi:10.1038/s42254-021-00314-5.
724
+ [3] S. Wang, H. Wang, P. Perdikaris, Learning the solution operator of parametric partial differential equations with physics-informed deeponets, Science advances 7 (40) (2021) eabi8605.
725
+ [4] Y. Wang, J. Bai, Z. Lin, Q. Wang, C. Anitescu, J. Sun, M. S. Eshaghi, Y. Gu, X.-Q. Feng, X. Zhuang, et al., Artificial intelligence for partial differential equations in computational mechanics: A review, arXiv preprint arXiv:2410.19843 (2024).
726
+ [5] W. Yizheng, Z. Xiaoying, T. Rabczuk, L. Yinghua, Ai for pdes in solid mechanics: A review, Advances in Mechanics 54 (3) (2024) 1-57.
727
+ [6] M. Raissi, P. Perdikaris, G. E. Karniadakis, Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations, Journal of Computational Physics 378 (2019) 686-707.
728
+ [7] L. Lu, P. Jin, G. Pang, Z. Zhang, G. E. Karniadakis, Learning nonlinear operators via deeponet based on the universal approximation theorem of operators, Nature Machine Intelligence 3 (3) (2021) 218-229. doi:10.1038/s42256-021-00302-5.
729
+ [8] Z. Li, N. Kovachki, K. Azizzadenesheli, B. Liu, K. Bhattacharya, A. Stuart, A. Anandkumar, Fourier neural operator for parametric partial differential equations, arXiv preprint arXiv:2010.08895 (2020).
730
+ [9] Z. Li, H. Zheng, N. Kovachki, D. Jin, H. Chen, B. Liu, K. Azizzadenesheli, A. Anandkumar, Physics-informed neural operator for learning partial differential equations, ACM/JMS Journal of Data Science 1 (3) (2024) 1-27.
731
+ [10] M. S. Eshaghi, C. Anitescu, M. Thombre, Y. Wang, X. Zhuang, T. Rabczuk, Variational physics-informed neural operator (vino) for solving partial differential equations, arXiv preprint arXiv:2411.06587 (2024).
732
+
733
+ [11] L. Yang, S. Liu, T. Meng, S. J. Osher, In-context operator learning with data prompts for differential equation problems, Proceedings of the National Academy of Sciences 120 (39) (2023) e2310142120.
734
+ [12] S. Desai, M. Mattheakis, H. Joy, P. Protopapas, S. Roberts, One-shot transfer learning of physics-informed neural networks, arXiv preprint arXiv:2110.11286 (2021).
735
+ [13] Y. Gao, K. C. Cheung, M. K. Ng, Svd-pinns: Transfer learning of physics-informed neural networks via singular value decomposition, in: 2022 IEEE Symposium Series on Computational Intelligence (SSCI), IEEE, 2022, pp. 1443-1450.
736
+ [14] F. Zhuang, Z. Qi, K. Duan, D. Xi, Y. Zhu, H. Zhu, H. Xiong, Q. He, A comprehensive survey on transfer learning, Proceedings of the IEEE 109 (1) (2020) 43-76.
737
+ [15] C. Xu, B. T. Cao, Y. Yuan, G. Meschke, Transfer learning based physics-informed neural networks for solving inverse problems in engineering structures under different loading scenarios, Computer Methods in Applied Mechanics and Engineering 405 (2023) 115852.
738
+ [16] H. Guo, X. Zhuang, P. Chen, N. Alajlan, T. Rabczuk, Analysis of three-dimensional potential problems in non-homogeneous media with physics-informed deep collocation method using material transfer learning and sensitivity analysis, Engineering with Computers 38 (6) (2022) 5423-5444.
739
+ [17] A. Chakraborty, C. Anitescu, X. Zhuang, T. Rabczuk, Domain adaptation based transfer learning approach for solving pdes on complex geometries, Engineering with Computers 38 (5) (2022) 4569-4588.
740
+ [18] X. Chen, C. Gong, Q. Wan, L. Deng, Y. Wan, Y. Liu, B. Chen, J. Liu, Transfer learning for deep neural network-based partial differential equations solving, Advances in Aerodynamics 3 (2021) 1-14.
741
+ [19] E. Haghighat, M. Raissi, A. Moure, H. Gomez, R. Juanes, A physics-informed deep learning framework for inversion and surrogate modeling in solid mechanics, Computer Methods in Applied Mechanics and Engineering 379 (2021) 113741. doi: 10.1016/j.cma.2021.113741.
742
+ [20] S. Goswami, C. Anitescu, S. Chakraborty, T. Rabczuk, Transfer learning enhanced physics informed neural network for phase-field modeling of fracture, Theoretical and Applied Fracture Mechanics 106 (2020) 102447.
743
+ [21] S. Chakraborty, Transfer learning based multi-fidelity physics informed deep neural network, Journal of Computational Physics 426 (2021) 109942.
744
+ [22] E. J. Hu, Y. Shen, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, L. Wang, W. Chen, Lora: Low-rank adaptation of large language models, arXiv preprint arXiv:2106.09685 (2021).
745
+ [23] R. Majumdar, V. Jadhav, A. Deodhar, S. Karande, L. Vig, V. Runkana, Hyperlora for pdes, arXiv preprint arXiv:2308.09290 (2023).
746
+ [24] W. Cho, K. Lee, D. Rim, N. Park, Hypernetwork-based meta-learning for low-rank physics-informed neural networks, Advances in Neural Information Processing Systems 36 (2023) 11219-11231.
747
+ [25] Y. Fung, Foundations of solid mechanics. 1965, Englewood Cliffs, NJ 436 (2010).
748
+ [26] Y. Wang, J. Sun, J. Bai, C. Anitescu, M. S. Eshaghi, X. Zhuang, T. Rabczuk, Y. Liu, Kolmogorov arnold informed neural network: A physics-informed deep learning framework for solving forward and inverse problems based on kolmogorov-arnold networks, Computer Methods in Applied Mechanics and Engineering 433 (2025) 117518.
749
+ [27] X. Zhang, L. Wang, J. Helwig, Y. Luo, C. Fu, Y. Xie, M. Liu, Y. Lin, Z. Xu, K. Yan, et al., Artificial intelligence for science in quantum, atomistic, and continuum systems, arXiv preprint arXiv:2307.08423 (2023).
750
+ [28] Y. Wang, J. Sun, W. Li, Z. Lu, Y. Liu, Cenn: Conservative energy method based on neural networks with subdomains for solving variational problems involving heterogeneous and complex geometries, Computer Methods in Applied Mechanics and Engineering 400 (2022) 115491.
751
+ [29] J. He, C. Zhou, X. Ma, T. Berg-Kirkpatrick, G. Neubig, Towards a unified view of parameter-efficient transfer learning, arXiv preprint arXiv:2110.04366 (2021).
752
+ [30] A. Radford, J. Wu, R. Child, D. Luan, D. Amodei, I. Sutskever, et al., Language models are unsupervised multitask learners, OpenAI blog 1 (8) (2019) 9.
753
+ [31] S. V. Patankar, D. B. Spalding, A calculation procedure for heat, mass and momentum transfer in three-dimensional parabolic flows, in: Numerical prediction of flow, heat transfer, turbulence and combustion, Elsevier, 1983, pp. 54-73.
754
+ [32] S. Wang, Y. Teng, P. J. S. J. o. S. C. Perdikaris, Understanding and mitigating gradient flow pathologies in physics-informed neural networks, SIAM Journal on Scientific Computing 43 (5) (2021) A3055-A3081.
755
+ [33] S. Wang, H. Wang, P. Perdikaris, On the eigenvector bias of fourier feature networks: From regression to solving multi-scale pdes with physics-informed neural networks, Computer Methods in Applied Mechanics and Engineering 384 (2021) 113938. doi:10.1016/j.cma.2021.113938.
756
+ [34] S. Wang, X. Yu, P. Perdikaris, When and why pinns fail to train: A neural tangent kernel perspective, Journal of Computational Physics 449 (2022) 110768.
757
+ [35] M. D. Zeiler, R. Fergus, Visualizing and understanding convolutional networks, in: Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part I 13, Springer, 2014, pp. 818-833.
758
+ [36] M. S. Eshaghi, M. Bamdad, C. Anitescu, Y. Wang, X. Zhuang, T. Rabczuk, Applications of scientific machine learning for the analysis of functionally graded porous beams, Neurocomputing 619 (2025) 129119.
759
+ [37] V. M. Nguyen-Thanh, X. Zhuang, T. Rabczuk, A deep energy method for finite deformation hyperelasticity, European Journal of Mechanics-A/Solids 80 (2020) 103874.
760
+ [38] Y. Wang, J. Sun, T. Rabczuk, Y. Liu, Dcem: A deep complementary energy method for solid mechanics, International Journal for Numerical Methods in Engineering (2024). doi:10.1002/nme.7585.
761
+ [39] A. Paszke, S. Gross, S. Chintala, G. Chanan, E. Yang, Z. DeVito, Z. Lin, A. Desmaison, L. Antiga, A. Lerer, Automatic differentiation in pytorch (2017).
2502.00xxx/2502.00782/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:063746dd8d332adfb897fb89769a3138d3e7b04096134952fbd4d53c0844a0cd
3
+ size 2867661
2502.00xxx/2502.00782/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00792/0b69818d-930d-4d27-bd99-411f78543698_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00792/0b69818d-930d-4d27-bd99-411f78543698_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00792/0b69818d-930d-4d27-bd99-411f78543698_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fa988bb12d659afdfaa66d66f508e9ff37d08ce3594954d8b6b7a950d6525c1
3
+ size 1036020
2502.00xxx/2502.00792/full.md ADDED
@@ -0,0 +1,558 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RTBAgent: A LLM-based Agent System for Real-Time Bidding
2
+
3
+ Leng Cai*
4
+
5
+ caileng1923@gmail.com
6
+
7
+ South China University of Technology
8
+
9
+ Guangzhou, China
10
+
11
+ Junxuan He*
12
+
13
+ hejunxuan30@gmail.com
14
+
15
+ Shanghai University
16
+
17
+ Shanghai, China
18
+
19
+ Yikai Li
20
+
21
+ ykli82086@gmail.com
22
+
23
+ South China University of Technology
24
+
25
+ Guangzhou, China
26
+
27
+ Junjie Liang
28
+
29
+ 19jliang22@gmail.com
30
+
31
+ South China University of Technology
32
+
33
+ Guangzhou, China
34
+
35
+ Yuanping Lin
36
+
37
+ linyuanping@pazhoulab.cn
38
+
39
+ Pazhou Lab
40
+
41
+ Guangzhou, China
42
+
43
+ Ziming Quan
44
+
45
+ dragonquan1112@gmail.com
46
+
47
+ South China University of Technology
48
+
49
+ Guangzhou, China
50
+
51
+ Yawen Zeng†
52
+
53
+ yawenzeng11@gmail.com
54
+
55
+ ByteDance
56
+
57
+ Beijing, China
58
+
59
+ # ABSTRACT
60
+
61
+ Real-Time Bidding (RTB) enables advertisers to place competitive bids on impression opportunities instantaneously, striving for cost-effectiveness in a highly competitive landscape. Although RTB has widely benefited from the utilization of technologies such as deep learning and reinforcement learning, the reliability of related methods often encounters challenges due to the discrepancies between online and offline environments and the rapid fluctuations of online bidding. To handle these challenges, RTBAgent is proposed as the first RTB agent system based on large language models (LLMs), which synchronizes real competitive advertising bidding environments and obtains bidding prices through an integrated decision-making process. Specifically, obtaining reasoning ability through LLMs, RTBAgent is further tailored to be more professional for RTB via involved auxiliary modules, i.e., click-through rate estimation model, expert strategy knowledge, and daily reflection. In addition, we propose a two-step decision-making process and multi-memory retrieval mechanism, which enables RTBAgent to review historical decisions and transaction records and subsequently make decisions more adaptive to market changes in real-time bidding. Empirical testing with real advertising datasets demonstrates that RTBAgent significantly enhances profitability. The RTBAgent code will be publicly accessible at: https://github.com/CaiLeng/RTBAgent.
62
+
63
+ # CCS CONCEPTS
64
+
65
+ # Computing methodologies $\rightarrow$ Artificial intelligence.
66
+
67
+ *Both authors contributed equally to this research.
68
+
69
+ † Corresponding author.
70
+
71
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
72
+
73
+ WWW Companion '25, April 28-May 2, 2025, Sydney, NSW, Australia
74
+
75
+ © 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM.
76
+
77
+ ACM ISBN 979-8-4007-1331-6/25/04
78
+
79
+ https://doi.org/10.1145/3701716.3715259
80
+
81
+ Jin $\mathrm{Xu}^{\dagger}$
82
+
83
+ jinxu@scut.edu.cn
84
+
85
+ South China University of Technology
86
+
87
+ Pazhou Lab
88
+
89
+ Guangzhou, China
90
+
91
+ <table><tr><td>Method</td><td>Scheme</td><td>Tools</td><td>Adaptability</td><td>Explainability</td></tr><tr><td>Rule-based Methods</td><td>Hyperparameter Tuning</td><td>X</td><td>X</td><td>X</td></tr><tr><td>RL-based Models</td><td>Model Training</td><td>X</td><td>✓</td><td>X</td></tr><tr><td>Chat with LLMs</td><td>LLM API</td><td>X</td><td>X</td><td>✓</td></tr><tr><td>RTBAgent (ours)</td><td>Integrated Intelligence</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
92
+
93
+ Table 1: Discussion of three RTB decision methods: Rule-based Methods, RL-based Models, Chat withs LLMs, and our RTBAgent.
94
+
95
+ # KEYWORDS
96
+
97
+ Real-Time Bidding, Bid Optimization, Large Language Models, Bidding Agents
98
+
99
+ # ACM Reference Format:
100
+
101
+ Leng Cai, Junxuan He, Yikai Li, Junjie Liang, Yuanping Lin, Ziming Quan, Yawen Zeng, and Jin Xu. 2025. RTBAgent: A LLM-based Agent System for Real-Time Bidding. In Companion Proceedings of the ACM Web Conference 2025 (WWW Companion '25), April 28-May 2, 2025, Sydney, NSW, Australia. ACM, New York, NY, USA, 10 pages. https://doi.org/10.1145/3701716.3715259
102
+
103
+ # 1 INTRODUCTION
104
+
105
+ The prominence of online advertising within the broader advertising industry is well-established, serving as a pivotal channel for reaching consumers in the digital era. In 2022, the digital advertising industry in the United States achieved a historic milestone, with total revenue exceeding $200 billion. Programmatic advertising revenue grew by \(10.5\%$ year-on-year, reaching \)109.4 billion, underscoring its expanding significance in the digital landscape [11]. A notable advancement in online display advertising has been Real-Time Bidding (RTB), which enables the real-time buying and selling of ad impressions during a user's visit. RTB's main advantage is its ability to automate and scale the purchasing process by aggregating extensive inventory from various publishers [22]. It allows for precise targeting of individual users based on real-time behavior, marking a significant shift in digital marketing strategies.
106
+
107
+ A key challenge within RTB is the development of effective bidding strategies for advertisers. An optimal bidding strategy should
108
+
109
+ # Environment
110
+
111
+ <table><tr><td>Datetime</td><td>2013060600</td></tr><tr><td>traffic num</td><td>21120</td></tr><tr><td>bid price</td><td>321</td></tr><tr><td>given buget</td><td>15116609</td></tr><tr><td>reanining buget</td><td>13807760</td></tr><tr><td>…</td><td>…</td></tr></table>
112
+
113
+ # RTB Request
114
+
115
+ ID Attribute Features
116
+
117
+ <table><tr><td>1</td><td>[Bid ID, Timestamp, ...]</td></tr><tr><td>2</td><td>[Bid ID, Timestamp, ...]</td></tr><tr><td>3</td><td>[Bid ID, Timestamp, ...]</td></tr><tr><td>4</td><td>[Bid ID, Timestamp, ...]</td></tr><tr><td>...</td><td>...</td></tr></table>
118
+
119
+ # RTBAgent Workflow
120
+
121
+ ![](images/ae42e1137e04398866255fa54780cb05d576aac7ccefb1803a22b27c74b2902f.jpg)
122
+ Reflection Memory: Reflect on today's decision
123
+
124
+ # Tools
125
+
126
+ ![](images/35c4e62c52d403844cbbc9979876ab380b06cdb1067d2cc97f63532c52197d2b.jpg)
127
+ CTR
128
+ reducer
129
+
130
+ ![](images/886abdeaed40e846f8cb86e9a07c6801b009cf63626dffa03a48fe0d78fb3339.jpg)
131
+ Bidding Strategies
132
+
133
+ # Actions
134
+
135
+ ![](images/471a444680291646178fd0fc57edf1ef8d876ffc03a2bca8f6a36ccd249ecb48.jpg)
136
+ Bidding Memory: Bidding at every moment in history
137
+ Environment Memory: Historical bidding environment
138
+ Figure 1: The workflow of our RTBAgent, which is equipped with 4 tools, 3 types of memory, two-step decision-making to execute actions and output bidding prices.
139
+
140
+ # Memories
141
+
142
+ Update
143
+
144
+ Update
145
+
146
+ promote products to targeted users without disrupting their experience and enhance revenue for publishers. As illustrated in Table 1, traditional rule-based bidding strategies are often too rigid and need to adapt to the dynamic nature of the market. While reinforcement learning (RL) approaches [5] offer better adaptability, they face issues such as the need for extensive training data, difficulties in achieving training convergence, and a lack of interpretability in decision-making, which affects their applicability, trustworthiness, and stability. Consequently, there is a need for more advanced machine-learning models in the RTB domain.
147
+
148
+ Recent advancements in artificial intelligence, mainly through large language models (LLMs), have introduced innovative solutions to various fields, including knowledge-based question answering [3, 15, 16, 29, 34]. However, advertising bidding tasks possess unique competitive characteristics that require heightened awareness and dynamic adjustment. While useful as assistants for bidding and answering, basic LLMs face limitations when directly applied to advertising bidding scenarios. LLM-based agent systems have recently gained attention for their ability to emulate human-like behavior and decision-making. For example, research from Stanford University [17] demonstrates how intelligent agent systems can effectively plan and execute complex tasks.
149
+
150
+ Our paper introduces RTBAgent, a novel agent framework designed to address the challenges of competitive advertising bidding environments. RTBAgent is equipped with 4 tools, 3 types of memory, and a two-step decision-making process to execute actions. As outlined in Figure 1, it simulates real-world advertising agency scenarios and enhances real-time bidding tasks through tools such as CTR predictors and various bidding strategies, integrating expert knowledge with insights into impression value and market conditions. RTBAgent features a versatile multi-memory retrieval system that updates and focuses on relevant data, minimizing noise and adapting swiftly to market dynamics. Its two-step decision-making
151
+
152
+ approach enables it to determine optimal bidding prices in real-time. It has been proven that RTBAgent can increase profitability and has great flexibility in LLM selection. Furthermore, RTBAgent stands out in interpretability, providing transparent insights into its decision-making process, which is a significant advantage over conventional methods that often operate as black boxes. With these capabilities, RTBAgent enables more informed and strategically advantageous decisions, helping advertisers achieve better return on investment in highly competitive markets. The combination of high performance and transparency of RTBAgent can not only be superior to traditional models in effect but also provide a new perspective for the interpretable research of bidding tasks in the field of computational advertising.
153
+
154
+ Our contributions are summarized as follows:
155
+
156
+ - To our knowledge, our research proposes the first bidding agency system based on LLMs, aimed at solving the bidding optimization problem in online display advertising under budget constraints.
157
+ - We innovatively propose a two-step decision-making method for RTB, integrating CTR estimation model, expert strategy knowledge, multi-memory retrieval system, and daily reflection to dynamically adjust bidding strategies to cope with the real-time changing market environment.
158
+ - Extensive experiments validate that our framework performs exceptionally well across all metrics, achieving a significant overall return.
159
+
160
+ # 2 RELATED WORK
161
+
162
+ # 2.1 LLM-based Agent Systems
163
+
164
+ LLMs are making significant strides towards achieving Artificial General Intelligence (AGI) by enhancing the capabilities of intelligent agents. These models improve autonomy, responsiveness, and social interaction skills, enabling agents to handle complex
165
+
166
+ tasks such as natural language processing, knowledge integration, information retention, logical reasoning, and strategic planning. Recent developments in intelligent agent frameworks, such as AutoGPT [25] and Metagpt [9], have advanced multi-agent collaboration by incorporating standardized operating procedures (SOPs). These frameworks facilitate research by streamlining agent system integration [7, 10, 13, 24, 32]. For instance, EduAgent [24] integrates cognitive science principles to guide LLMs, enhancing their ability to model and understand diverse learning behaviors and outcomes. Additionally, Agent Hospital [13] utilizes a large-scale language model to simulate hospital environments, enabling medical agents to adapt and improve their treatment strategies through interactive learning.
167
+
168
+ # 2.2 Bidding Optimization in RTB
169
+
170
+ RTB has been a critical focus in online advertising [21], aiming to maximize the value of ad placements within a given budget. Traditional methods employ static parameters [18, 26, 27] to optimize revenue, often using historical bid data to set bidding parameters. [27] use linear programming to address these optimization problems. Such methods usually fall short in dynamic bidding environments. Researchers have increasingly framed RTB as a sequential decision problem to overcome these limitations, applying RL techniques to enhance automated bidding strategies [2, 8, 23, 33]. DRLB [23] approaches budget-constrained bidding as a Markov decision process, offering a model-free RL framework for optimization. USCB [8] introduces an RL method that dynamically adjusts parameters for optimal performance, improving convergence rates through recursive optimization. Despite these advancements, RL faces challenges such as training complexity and interpretability, indicating a need for more robust machine learning models in RTB strategies.
171
+
172
+ # 3 PROPOSED METHOD
173
+
174
+ # 3.1 Preliminaries
175
+
176
+ 3.1.1 Problem Formulation. RTB offers various pricing schemes catering to diverse advertiser needs within the online advertising ecosystem. In a second-price auction [20], the advertiser pays the second-highest bid, denoted as $c_{i}$ , for the privilege of displaying their ad after winning the bid with $b_{i}$ as the highest bidding price. Our study focuses exclusively on the scenario within a second-price auction mechanism. It aims to maximize the achieved objective value under a given budget, as this is the most common business requirement in the industry. W.l.o.g., we consider clicks to be our primary aim value, although other key performance indicators (KPIs), such as conversions, can also be adopted. Thus, the advertiser's strategic challenge is to maximize the cumulative value of clicks, subject to budget constraints. Let $N$ represent the total number of ad impression opportunities during a specific time period, such as one day, with each impression opportunity indexed by $i$ . The optimization problem can be mathematically expressed as follows:
177
+
178
+ $$
179
+ \begin{array}{l} \max \sum_ {i = 1 \dots N} w _ {i} \cdot v _ {i} \tag {1} \\ s. t. \sum_ {i = 1 \dots N} w _ {i} \cdot c _ {i} \leq B, \\ \end{array}
180
+ $$
181
+
182
+ where $v_{i}$ , $c_{i}$ represent the value, cost of impression $i$ respectively. $w_{i}$ is a binary value indicating winning or losing the impression $i$ . $B$ is the total bidding budget. Zhang et al. [30] proves that in
183
+
184
+ a second-price auction, the optimal bid is a function of a scaling factor $\lambda$ , which governs the bid price $b_{i}$ as:
185
+
186
+ $$
187
+ b _ {i} = \lambda \cdot v _ {i}. \tag {2}
188
+ $$
189
+
190
+ Unfortunately, all participating bidders are dynamic, and the auction environment is usually highly non-stationary, which make $\lambda$ difficult to be determined naively. Thus, the key of our method is to dynamically adjust $\lambda$ to adapt to the ever-changing market environment.
191
+
192
+ 3.1.2 Our Solution Paradigm. RTB can be viewed as a series of random events where each bid is influenced by uncertain factors [1]. To address this, we model the RTB scenario using a Markov Decision Process (MDP), a mathematical framework that captures the decision-making process involving probabilistic state transitions. This MDP is characterized by a set of states $S$ that represent the advertising status of a campaign and an action space $\mathcal{A}$ including the adjustment parameter $a_{t}$ of the feasible bidding factor $\lambda$ . At each time step $t \in \{1, \dots, T\}$ , the agent performs actions $a_{t} \in \mathcal{A}$ based on the current state $s_{t} \in S$ to update $\lambda$ according to its policy $\pi: S \mapsto \mathcal{A}$ . The state then transitions to a new state according to the transition dynamics $\mathcal{T}: S \times \mathcal{A} \mapsto \Omega(S)$ , where $\times$ represents cartesian product and $\Omega(S)$ is a set of probability and distributions over $S$ . The environment provides an immediate reward to the agent based on a function of the current state and the agent's actions, denoted as $r_{t}: S \times \mathcal{A} \mapsto \mathcal{R} \subseteq \mathbb{R}$ , where $\mathbb{R}$ is a reward space.
193
+
194
+ To this end, the objective is to discover a policy $\pi$ that links states to actions, intending to maximize the total discounted reward within a set time frame, all while considering budget limitations. The policy $\pi^{*}$ that we seek is the one that maximizes the expected cumulative reward, as shown in Eq.(3):
195
+
196
+ $$
197
+ \pi_ {\theta} ^ {*} = \arg \max _ {\pi_ {\theta}} \mathbb {E} _ {\pi_ {\theta}} \left[ \sum_ {i = 0} ^ {T} \gamma^ {i} r _ {t + i} \mid s _ {t} = s \right]. \tag {3}
198
+ $$
199
+
200
+ We extend this optimization challenge to our RTBAgent, where the policy $\pi_{\theta}^{*}$ is defined as:
201
+
202
+ $$
203
+ \pi_ {\theta} ^ {*} = \arg \max _ {\pi_ {\theta}} \mathbb {E} _ {\pi_ {\theta}} \left[ \sum_ {i = 0} ^ {T} \gamma^ {i} r _ {t + i} \mid s _ {t} = s, \rho_ {t} = \rho \right], \tag {4}
204
+ $$
205
+
206
+ where $\rho (\cdot)$ is a specialized module that encapsulates beneficial internal reasoning processes and the action policy for the RTBAgent is given by:
207
+
208
+ $$
209
+ \pi_ {\mathrm {R T B A g e n t}} \left(a _ {t} \mid s _ {t}, \rho_ {t}\right) = G \left(s _ {t}, \rho_ {t}\right)
210
+ $$
211
+
212
+ $$
213
+ \rho_ {t} = \rho \left(s _ {t}, F _ {t} ^ {s u m}, F _ {t} ^ {t o o l}, F _ {t} ^ {i n s}, F _ {t} ^ {a c t}, F _ {t} ^ {r e f}\right), \tag {5}
214
+ $$
215
+
216
+ where $G(\cdot)$ is a operation parsing function used to perform compatible formal operations in the environment. The RTBAgent, powered by LLMs, refines the inference information $\rho_t$ to include various operations, i.e., the summary of memories $F_t^{sum}$ , tools $F_t^{tool}$ , insights $F_t^{ins}$ , actions $F_t^{act}$ and reflections $F_t^{ref}$ , which will be explained specifically later.
217
+
218
+ Due to the inherent limitations of LLMs, such as the lack of dominance in continuous value output and insensitivity to numbers, our research refines the operation based on the basic factor $\lambda_{base}$ obtained from expert bidding strategies. The optimal scaling factor $\lambda_{t}$ at each time step $t$ is determined through the adjustment action
219
+
220
+ $a_{t}$ provided by the strategy $\pi$ , i.e., $\lambda_{t} = \lambda_{base} \cdot (1 + a_{t})$ . Integrating these operations allows the RTBAgent to continuously interact with the bidding environment during training, driving it towards the optimization goal as expressed in the following equation:
221
+
222
+ $$
223
+ \pi_ {\mathrm {R T B A g e n t}} ^ {*} = \arg \max _ {\pi} \mathbb {E} _ {\pi} \left[ \sum_ {i = 0} ^ {T} \gamma^ {i} r _ {t + i} \mid s _ {t} = s, \rho_ {t} = \rho \right] \tag {6}
224
+ $$
225
+
226
+ $$
227
+ s. t. \pi (a _ {t} \mid s _ {t}, \rho_ {t}) = G \left(s _ {t}, \rho_ {t}\right) w i t h E q. (5) \forall t,
228
+ $$
229
+
230
+ where the reward of RTBAgent comes from the comprehensive information of decision results and self-reflection, achieving self alignment[28]. This approach ensures that RTBAgent's actions are meticulously aligned with the policy that delivers the highest expected return on bids, taking into account the current state and decision insights at each juncture.
231
+
232
+ # 3.2 Overall Framework
233
+
234
+ The RTBAgent framework, as shown in Figure 1, mirrors the operational structure of a real-world bidding firm. It integrates a comprehensive set of bidding analysis tools $\mathcal{H}$ , alongside a well-defined profile, action set $\mathcal{A}$ , and memory set $\mathcal{M}$ . The RTBAgent is guided by a configuration file that imbues it with the acumen of a bidding specialist, grounded in a context crafted for its role. It leverages bidding analysis tools to meticulously evaluate the potential value of ad impression requests, drawing on current bidding conditions and request data to proffer expert-guided bidding strategies. The memory module is designed to offer a robust, multi-dimensional retrieval system. It segments and updates information incrementally, ensuring the agent has access to accurate and relevant data. Additionally, the RTBAgent features a reflection module that it uses for regular decision reviews, thereby cultivating valuable insights for enhancing subsequent actions. Central to the RTBAgent is the action module, which employs a two-step decision-making process to formulate and execute well-considered actions. This process is pivotal for determining the optimal bidding price, ensuring that the agent's actions are strategically aligned with the goal of achieving the highest return on investment in the bidding process.
235
+
236
+ # 3.3 Environment
237
+
238
+ The dynamics of a bidding environment are characterized by the continuous emergence of new data following each round of bidding, which is a concrete representation of the current state $s_t$ . This data encompasses a variety of metrics, such as the current volume of bids, historical success rates in securing bids, prevailing market prices, the average cost per bid, the total budget allocated for bidding, and the remaining budget. These elements are crucial as they provide a comprehensive snapshot of the bidding landscape at any given time. Including such detailed environmental information is instrumental for the RTBAgent's decision-making process. It allows the system to not only assess the immediate bidding scenario but also to anticipate and adapt to potential shifts in the market dynamics. This real-time analysis and understanding of the bidding environment are essential for the RTBAgent to make informed and strategic decisions.
239
+
240
+ # 3.4 Components of Agent
241
+
242
+ 3.4.1 Profile. To help LLMs understand the bidding process, we define the profile of our RTBAgent as follows,
243
+
244
+ You are a senior data analyst specializing in in-depth research and strategy development in the field of real-time bidding (RTB) advertising placement. You use advanced data analysis tools and algorithms to guide advertisers to gain an advantage in fierce market competition...
245
+
246
+ by presenting the problem background, its own role, and action goals in the form of text, it can better perform the reasoning process for specific tasks.
247
+
248
+ 3.4.2 Tools. We incorporate a various tool set $\mathcal{H}$ for RTBAgent, including a click-through rate (CTR) prediction model and bidding decision strategies. We use Factorization Machines (FM)[19] as the CTR prediction model, which is widely used and can estimate the value for each impression stream. A separate CTR prediction model is trained for each advertiser within this framework. Additionally, we use a series of rule-based strategies to complement expert knowledge, including MCPC [12], LIN [18], and LP [4], which will be further discussed in the Overall Comparison section. These methods are widely used in the industry, based on prior knowledge, and demonstrate high referential value in an offline environment. It is important to note that in the bidding decision-making process, we only need to base our decision on one of the bidding decision models to assist in the decision-making.
249
+
250
+ 3.4.3 Actions. Due to the uncontrollable and unreliable nature of generative LLMs in predicting consecutive bidding prices, we suggest changing this process to allow the LLMs to predict an adjustment factor. Specifically, we define a adjustment space with an observation range from -0.5 to 0.5. This enables the RTBAgent to adjust the expert knowledge suggested decisions based on the current state and historical decisions in order to better adapt to the dynamic environment.
251
+
252
+ 3.4.4 Memories. In the RTBAgent, we design three types of memory: environment memory $\mathcal{M}^{env}$ , bidding memory $\mathcal{M}^{bid}$ , and reflection memory $\mathcal{M}^{ref}$ , where $\{\mathcal{M}^{env}, \mathcal{M}^{bid}, \mathcal{M}^{ref}\} \subset \mathcal{M}$ . Specifically, $\mathcal{M}^{env}$ stores the market environment after each decision, allowing the RTBAgent to refer to historical data to make wiser decisions when facing new bidding opportunities. $\mathcal{M}^{bid}$ records the bidding behaviors and reasons in different market environments. By analyzing $\mathcal{M}^{bid}$ , the RTBAgent can identify which strategies are more effective in specific situations, optimizing and adjusting future bids. Additionally, $\mathcal{M}^{bid}$ can help the RTBAgent recognize potential patterns and trends, such as specific bidding strategies that are more likely to succeed under certain conditions. $\mathcal{M}^{ref}$ is the RTBAgent's self-assessment mechanism, recording the reflection process and results after each decision. $\mathcal{M}^{ref}$ enables the RTBAgent can to understand why some decisions did not achieve the expected effects, thus avoiding similar mistakes in the future. The core of $\mathcal{M}^{ref}$ lies in continuous learning and improvement, ensuring that the RTBAgent can maintain competitiveness in the ever-changing market environment.
253
+
254
+ # 3.5 Workflow of RTBAgent
255
+
256
+ This section elucidates the operational sequence of the RTBAgent, encompassing three pivotal stages: information gathering, two-step decision-making, and daily reflection.
257
+
258
+ 3.5.1 Information Gathering. At the heart of RTBAgent's functionality is the aggregation of pertinent data. It should be noted that each action and environmental feedback record will be saved in real time to generate memory. Throughout the bidding process, extensive logs are generated, encompassing decisions, reflections, and environmental contexts. The synthesis of these logs into a summarized memory is pivotal for informed decision-making. The summarized memory at any given time step $t$ , denoted as $F_{t}^{sum}$ , is formulated by integrating information from bidding, reflective, and environmental memories:
259
+
260
+ $$
261
+ F _ {t} ^ {\text {s u m}} = \sum_ {i = \text {b i d}, \text {e n v}, \text {r e f}} L L M \left(\varphi^ {\text {s u m}} \left(m _ {t} ^ {i}\right)\right), \tag {7}
262
+ $$
263
+
264
+ where $\sum$ is a concatenation operation of multiple strings, $\varphi^{sum}(\cdot)$ is a prompt template for information gathering and $m_t^i\in \mathcal{M}^i$ is the memory for the type $i$ at time step $t$ .
265
+
266
+ Additionally, the RTBAgent leverages a suite of tools, encapsulated within $F_{t}^{tool}$ , to provide foundational insights for two-step decision-making. Central to this are the CTR estimations, represented by $V_{t}$ , and strategic bidding recommendations, denoted as $\lambda_{base}$ . Therefore, the output of the tool can be represented by a pair $(V_{t}, \lambda_{base})$ . Here, $V_{t} = \{v_{t}^{1}, v_{t}^{2}, \dots, v_{t}^{d_{t}}\}$ is derived from a trained predictive model, forecasting the potential user engagement by the impression feature vector $X_{t} = \{x_{t}^{1}, x_{t}^{2}, \dots, x_{t}^{d_{t}}\}$ , while $\lambda_{base}$ is algorithmically determined based on historical data, and $d_{t}$ is the number of impressions at the current time step $t$ , and satisfies the following:
267
+
268
+ $$
269
+ \sum_ {t = 1} ^ {T} d _ {t} = N, \tag {8}
270
+ $$
271
+
272
+ where $N$ is the total number of ad impression opportunities during one day.
273
+
274
+ 3.5.2 Two-Step Decision-Making. The RTBAgent's two-step decision-making process plays a crucial role in its strategic capabilities. The first step involves insight reasoning, represented by $F_{t}^{ins}$ , and is responsible for analyzing potential decision ranges, including their benefits, drawbacks, possible outcomes, and associated risks. This step can be formulated as follows:
275
+
276
+ $$
277
+ F _ {t} ^ {i n s} = L L M \left(\varphi^ {i n s} \left(s _ {t}, F _ {t} ^ {s u m}, \lambda_ {b a s e}\right)\right), \tag {9}
278
+ $$
279
+
280
+ where $\varphi^{ins}(\cdot)$ is a prompt template for insight reasoning.
281
+
282
+ Following the insight reasoning, the action making step, represented by $F_{t}^{act}$ , is where the actual bidding action $a_{t}$ and its reason $re_{t}$ are determined. The output of this step is a binary tuple $(a_{t}, re_{t})$ , which is generated by the following representation:
283
+
284
+ $$
285
+ F _ {t} ^ {\text {a c t}} = L L M \left(\varphi^ {\text {a c t}} \left(s _ {t}, F _ {t} ^ {\text {s u m}}, F _ {t} ^ {\text {i n s}}, \lambda_ {\text {b a s e}}\right)\right), \tag {10}
286
+ $$
287
+
288
+ where $\varphi^{act}(\cdot)$ is a prompt template for action making. This step is very important as it turns the gathered insights into a specific action. In the context of RTBAgent, this action is deciding on the bidding price for an ad impression. The final bidding price $b_{t}^{i}$ for the current impression is calculated using the following formula:
289
+
290
+ $$
291
+ b _ {t} ^ {i} = v _ {t} ^ {i} \cdot \lambda_ {\text {b a s e}} \cdot (1 + a _ {t}), \tag {11}
292
+ $$
293
+
294
+ where $b_{t}^{i}$ is the bidding price for the impression opportunity $i$ at time step $t$ . This formula reflects the dynamic nature of the bidding process, allowing the agent to adjust its bids in real-time based on current analysis and historical data, thus optimizing its bidding strategy.
295
+
296
+ 3.5.3 Daily Reflection. Post the daily bidding cycle, RTBAgent engages in a process of introspection, encapsulated by $F_{t}^{ref}$ , to consolidate and reflect upon the day's decisions and their outcomes. This reflective process is integral to the continuous improvement of the agent's strategic acumen:
297
+
298
+ $$
299
+ F _ {t} ^ {r e f} = \sum_ {i = b i d, e n v, r e f} L L M \left(\varphi^ {r e f} \left(m _ {t} ^ {i}\right)\right), \tag {12}
300
+ $$
301
+
302
+ where $\varphi^{ref}(\cdot)$ is a prompt template for daily reflection. This cyclical reflection ensures that the agent learns from its experiences, thereby refining its approach for enhanced performance in subsequent bidding endeavors.
303
+
304
+ The complete RTBAgent full-time bidding framework is provided in Algorithm 1.
305
+
306
+ Algorithm 1: Workflow of RTBAgent
307
+ 1: Set Tools $\mathcal{H}$ and initialize Memory $\mathcal{M}$ ;
308
+ 2: Set prompt template $\varphi^{sum}(\cdot), \varphi^{ins}(\cdot), \varphi^{act}(\cdot), \varphi^{ref}(\cdot)$ ;
309
+ 3: Set $K$ as the number of duration days;
310
+ 4: Set $T$ as the number of time steps for each day;
311
+ 5: Set budget list $B_{list} = \{B_1, B_2, \dots, B_k\}$
312
+ 6: for $k = 1$ to $K$ do
313
+ 7: Obtain allocated budget $B_k$ ;
314
+ 8: for $t = 1$ to $T$ do
315
+ 9: Observe state $s_t$ ;
316
+ 10: Obtain impression feature vector $X_t$ ;
317
+ 11: Obtain gathered information $F_t^{sum}$ via Eq.(7);
318
+ 12: Use $H$ to obtain $\lambda_{base}$ and value estimation $V_t$ ;
319
+ 13: Obtain insight reasoning result $F_t^{ins}$ via Eq.(9);
320
+ 14: Obtain action making result $F_t^{act}$ via Eq.(10);
321
+ 15: Get action $a_t$ and reason $rea_t$ from $F_t^{act}$ ;
322
+ 16: Bid each impression via Eq.(11);
323
+ 17: Store $s_t, F_t^{sum}$ and $F_t^{act}$ to $\mathcal{M}$ ;
324
+ 18: if $t == T$ then
325
+ 19: Obtain daily reflection $F_t^{ref}$ via Eq.(12);
326
+ 20: Store $F_t^{ref}$ to $\mathcal{M}$ ;
327
+ 21: end if
328
+ 22: end for
329
+ 23: end for
330
+
331
+ # 4 EXPERIMENTS
332
+
333
+ # 4.1 Datasets
334
+
335
+ We conduct a detailed study on the performance of RTBAgent using the iPinYou dataset [14]. The iPinYou dataset is provided by iPinYou Corporation, a prominent e-commerce advertising technology company in China. The dataset includes real-time bidding advertising data over a 10-day period in 2013, covering nine different advertising campaigns. Specifically, it contains 19.5 million ad displays, 14,790 clicks, and a total advertising cost of 16,000 RMB. These data not only portray the market environment, but also provide a complete path of user response from the advertisers' perspective. The records in the dataset are organized with each line representing three types of information: auction and ad features, auction winning price, and user click feedback on ad displays. Additionally, all monetary values are in RMB, corresponding to the cost-per-thousand-impressions (CPM) pricing model. The test
336
+
337
+ data is derived from the final three days of each campaign, while the remaining data is used for training, as reported by the data publisher [14].
338
+
339
+ # 4.2 Evaluation Procedure
340
+
341
+ In our study, we specifically focus on the number of clicks as the KPI for evaluation. For each advertising campaign, we allocate the budget on a daily basis and divide each day into 24 time steps, representing each hour. The bidding model iterates the test dataset using the same CTR estimator. For each bidding request, the strategy generates a bid price that does not exceed the current budget. If this bid price is equal to or higher than the market price, the advertiser wins the auction, incurs the market price as a cost, and gains user clicks as a reward. Then, the remaining auction quantity and budget are updated. In order to prevent a situation where the budget is too high, and all bids are won, the budget limit cannot exceed the total historical cost of the test data. To examine the performance of bidding strategies under different budget constraints, we evaluate using three different budgets: $1/2$ , $1/8$ , and $1/32$ of the total budget.
342
+
343
+ Table 2: Performance comparison of all methods in terms of actual click counts under different budgets.
344
+
345
+ <table><tr><td>Model</td><td>1/2</td><td>1/8</td><td>1/32</td></tr><tr><td>MCPC</td><td>1,779</td><td>989</td><td>298</td></tr><tr><td>LIN</td><td>2,200</td><td>1,202</td><td>744</td></tr><tr><td>LP</td><td>2,211</td><td>1,182</td><td>765</td></tr><tr><td>ORTB</td><td>2,222</td><td>1,192</td><td>746</td></tr><tr><td>RLB</td><td>2,223</td><td>1,227</td><td>741</td></tr><tr><td>DRLB</td><td>2,264</td><td>1,231</td><td>778</td></tr><tr><td>USCB</td><td>2,268</td><td>1,227</td><td>762</td></tr><tr><td>DiffBid</td><td>2,275</td><td>1,235</td><td>793</td></tr><tr><td>RTBAgent(ours)</td><td>2,281</td><td>1,240</td><td>795</td></tr></table>
346
+
347
+ # 4.3 Implementation Details
348
+
349
+ We utilize a single NVIDIA RTX A6000 GPU to train the RL model in our benchmark tests to ensure consistency in the training environment. All comparison schemes employed FM for CTR estimation. The FM model includes a linear layer for mapping features to the output space, a bias term for baseline prediction adjustment, and a feature embedding layer for capturing feature interactions. Finally, the sigmoid function converts the output into a probability form to predict the likelihood of user clicks. During the testing phase, the derivation of basic factor $\lambda_{base}$ by RTBAgent, based on expert strategies, is exclusively derived from the train set.
350
+
351
+ # 4.4 Overall Comparison
352
+
353
+ To verify the effectiveness of our model on real-world dataset, we compare it with the following competitive methods.
354
+
355
+ MCPC determines the maximum effective cost-per-click(CPC) for each advertising campaign by dividing the cost by the number of clicks in the training data, using this value as a parameter for bidding.
356
+
357
+ LIN is a linear bidding method where the bid value is linearly proportional to the estimated CTR $\theta_{e}$ . The bid for a single impression is formalized as $\frac{b_0}{\theta_0} \theta_e$ , where $\theta_0$ is the average CTR in the
358
+
359
+ Table 3: The improvement in click counts brought by different expert strategies, among which the use of LLMs is Llama-3-8B-Instruct. The number in parentheses represents the performance improvement (\%) compared to the corresponding expert strategy.
360
+
361
+ <table><tr><td>Budget</td><td>MCPC+</td><td>LIN+</td><td>LP+</td></tr><tr><td>1/2</td><td>1,796 (+0.96%)</td><td>2,216 (+0.73%)</td><td>2,265 (+2.44%)</td></tr><tr><td>1/8</td><td>1,070 (+8.19%)</td><td>1,223 (+1.75%)</td><td>1,252 (+5.92%)</td></tr><tr><td>1/32</td><td>336 (+12.75%)</td><td>783 (+5.24%)</td><td>796 (+4.05%)</td></tr></table>
362
+
363
+ train set, and $\overline{b_0}$ is an adjustment parameter. The optimal parameter combination $\frac{b_0}{\theta_0}$ is selected for the bidding process.
364
+
365
+ LP models the problem as a linear programming issue. It directly solves for optimal offline parameters based on historical data to maximize the objective and uses these parameters for bidding.
366
+
367
+ ORTB [31] regards RTB as a functional optimization problem and derives a bidding function.
368
+
369
+ RLB [2] uses a model-based RL method to maximize the total value of winning impressions within budget constraints.
370
+
371
+ DRLB utilizes the Deep Q-Network algorithm to train the optimal action strategy.
372
+
373
+ USCB abstracts the core requirements of constrained bidding. It employs more efficient strategy search methods to achieve accelerated convergence.
374
+
375
+ DiffBid[6] leverages generative diffusion modeling to generate optimal bidding trajectories based on return correlations. According to [6], we use USCB to generate training data for its non interactive learning through bidding activities.
376
+
377
+ Table 4: The impact of using different LLMs in terms of actual click counts under different budgets, with the expert strategy used being LP. The numbers in parentheses indicate the increase in clicks compared to LP.
378
+
379
+ <table><tr><td>LLMs</td><td>1/2</td><td>1/8</td><td>1/32</td></tr><tr><td>GPT-3.5-Turbo-1106</td><td>2,281 (+70)</td><td>1,240 (+58)</td><td>795 (+30)</td></tr><tr><td>Glm-4-Air</td><td>2,242 (+31)</td><td>1,208 (+26)</td><td>776 (+11)</td></tr><tr><td>GPT-4o mini</td><td>2,255 (+44)</td><td>1,241 (+59)</td><td>788 (+23)</td></tr><tr><td>Llama-3-8B-Instruct</td><td>2,265 (+54)</td><td>1,252 (+70)</td><td>796 (+31)</td></tr><tr><td>Baichuan3-Turbo</td><td>2,258 (+47)</td><td>1,224 (+42)</td><td>782 (+17)</td></tr><tr><td>Yi-large</td><td>2,220 (+9)</td><td>1,245 (+63)</td><td>792 (+27)</td></tr></table>
380
+
381
+ In our comparative analysis of performance on the iPinYou dataset under various budget scenarios, RTBAgent consistently outperformed conventional bidding methods and RL approaches, as shown in Table 2. RTBAgent significantly improved the key metric of click number compared to traditional methods such as MCPC, LIN, LP, and ORTB. These traditional methods underperformed in budget management, primarily due to their lack of flexibility in adapting to market volatility, often resulting in early budget depletion. In contrast, RTBAgent's advanced multi-memory retrieval system and two-step decision-making process allow for dynamic adjustments in bidding strategies, optimizing budget usage and enhancing advertising campaign effectiveness. When compared with RL models like RLB, DRLB and USCB, RTBAgent demonstrated superior adaptability and stability. Although model-based RL methods
382
+
383
+ # 1. Information Gathering
384
+
385
+ The advertiser_id 2997 has shown cautious decision-making behavior with slight negative adjustments over the past few days. Despite this, there is potential for improvement, as indicated by a positive adjustment of 0.15 made on 2013-10-26-09, resulting in an increase in clicks to 5. Recent trends indicate a balance between budget efficiency and click volume, but there is a discrepancy between predicted and actual outcomes.
386
+
387
+ # Two-Step Decision-Making
388
+
389
+ # 2. Insight Reasoning
390
+
391
+ [-0.5,-0.4]: This adjustment range may not be optimal as it suggests a significant reduction in bidding factor, which could lead to ...
392
+
393
+ [-0.4,-0.3): A moderate reduction in bidding factor is suggested, which might help balance budget efficiency and click volume ...
394
+
395
+ [-0.3,-0.2): This adjustment range may not be optimal as it suggests a slight decrease in bidding factor, which could lead to decreased ...
396
+
397
+ [-0.2,-0.1): A small reduction in bidding factor is suggested, which might help balance budget efficiency and click volume. However ...
398
+
399
+ # 3. Action-Making
400
+
401
+ Recommended Adjustment: 0.15
402
+
403
+ Reason: A moderate increase in bidding factor is suggested to improve visibility and click volume, considering the advertiser's cautious decision-making behavior and potential for improvement.
404
+
405
+ # 4. Daily Reflection
406
+
407
+ Evaluation of Bid Results: The bid adjustment factor had a moderate impact on click volume, but there is still room for improvement. The discrepancy between predicted and actual outcomes suggests that the bidding strategy needs to be refined.
408
+
409
+ Data Analysis Accuracy: The data analysis seems accurate, but I would like to re-examine the historical decision-making behavior and current environmental conditions to ensure that no critical factors were overlooked.
410
+
411
+ Policy Adjustment Suggestions: Based on the current market situation and bidding results, it is necessary to adjust the strategy. Targeting high-value users, optimizing ad creative, and implementing more aggressive bidding approaches are essential for improving future bidding strategies.
412
+
413
+ Improvement Measures: To improve future bidding strategy, I plan to implement machine learning-based algorithms for real-time analysis, refine targeting options based on user behavior, and adjust the bidding factor according to actual click outcomes.
414
+
415
+ Figure 2: An example of RTBAgent using LLMs to assist reasoning process.
416
+
417
+ are generally well-suited to dynamic environments, they showed inconsistent performance under varying budget constraints, suggesting challenges in model training and strategic refinement.
418
+
419
+ Overall, the comprehensive evaluation metrics confirm RTBAgent's outstanding performance, validating its effectiveness in advertising bidding and underscoring its robustness across different budgetary conditions.
420
+
421
+ # 4.5 Ablation Study
422
+
423
+ In Table 3, we introduce three models, MCPC+, LIN+, and LP+, to demonstrate the performance enhancement achieved by RTBAgent with three expert strategies. Specifically, MCPC+, LIN+, and LP+ correspond to RTBAgent based on three different expert strategies, respectively: MCPC, LIN, and LP. Regardless of which method is used, the performance of RTBAgent on the test set consistently improved based on the original performance. It is observed that the stronger the expert strategy performance provided, the better the final results.
424
+
425
+ As shown in Figure 3, LP consumed too much budget in the early stages and did not anticipate the benefits of spending the budget in the later stages. On the contrary, $\mathrm{LP+}$ can better control the expenditure of the budget, allowing for the purchase of high-quality clicks with a surplus budget in the second half of the process. In addition, $\mathrm{LP+}$ 's CPC has always been lower than the expert strategy throughout the process to ensure that more clicks are obtained within the specified budget. This suggests that RTBAgent effectively
426
+
427
+ utilizes the guidance knowledge of expert strategies, leading to enhanced performance through insight into the environment and interaction.
428
+
429
+ Table 5: Validation of the expert strategy and two-step decision-making effectiveness under different budgets, in which the expert strategy is LP, and the use of LLMs is Llama-3-8B-Instruct.
430
+
431
+ <table><tr><td>Strategy</td><td>Insight</td><td>Action</td><td>1/2</td><td>1/8</td><td>1/32</td></tr><tr><td></td><td></td><td>✓</td><td>1,796</td><td>634</td><td>227</td></tr><tr><td>✓</td><td></td><td>✓</td><td>2,194</td><td>1,186</td><td>779</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>2,265</td><td>1,252</td><td>796</td></tr></table>
432
+
433
+ The performance of RTBAgent, utilizing distinct scale base models for real-time bidding tasks, is examined in Table 4. Comparative analysis reveals that RTBAgent consistently surpasses alternative strategies across all budget allocation ratios. This indicates RTBAgent's superior performance regardless of model scale, from larger, more capable models to smaller, more efficient ones. The agent demonstrates adaptability and scalability, capable of dynamically adapting to the model size in response to business requirements and resource limitations, thus achieving the best bidding outcomes.
434
+
435
+ In Table 5, we analyze the effectiveness of expert strategies and two-step decision-making (including insight reasoning and action
436
+
437
+ ![](images/91d48e6cdbf6a0f86ed59f811ec2a31dc25ab779a8b9d6d30e6f5b0842b52cff.jpg)
438
+ Figure 3: Comparison of the remaining budget and CPC between FP+ and FP at each step for advertiser 1458 on June 13th, 2013. The use of LLMs is Llama3-8B-Instruct.
439
+
440
+ making). Specifically, the action directly utilizes output optimal scaling factor by LLMs for situations without expert strategy. By comparing rows one and two of Table 5, we can find that incorporating expert knowledge boosts model performance by utilizing historical market information. Furthermore, row three highlights the superiority of a two-step decision-making model over direct action approaches regarding click count increases, emphasizing the value of detailed market analysis and flexible bidding strategies.
441
+
442
+ # 4.6 Visualization and Analysis of Performance
443
+
444
+ Figure 2 demonstrates how RTBAgent utilizes LLMs to enhance its reasoning process during real-time bidding, specifically for advertiser 2997 on the last step of October 27th, 2013. The figure illustrates the core workflow, starting with information gathering, where RTBAgent analyzes historical bidding data, identifying that a reasonable increase in the bidding factor has effectively improved visibility and click volume. This data analysis is the foundation for the next stage, a two-step decision-making process. In the first step, RTBAgent performs insight reasoning by analyzing current market conditions and historical data to extract strategic insights, such as recommending a 0.15 adjustment to the bid factor based on past performance. In the second step, RTBAgent translates these insights into actionable decisions by adjusting the bid price, aiming to optimize performance while maintaining stability. After each bidding cycle, RTBAgent engages in daily reflection, where it evaluates the outcomes of its decisions, compares expected results with actual performance, and updates its strategy accordingly. This iterative process allows RTBAgent to continuously improve its bidding approach, adapting to market dynamics and making more informed, strategic decisions in future cycles. The visualization in Figure 2, along with the data presented in Table 5, underscores RTBAgent's capability to refine the decision-making process in RTB scenarios. The detailed analysis of a single hour for advertiser 2997 on October 27th, 2013, demonstrates RTBAgent's ability to learn from daily operations and continuously improve its bidding strategies.
445
+
446
+ Furthermore, we invited 10 experts in the advertising field to rate the generated decisions and reasons on three levels: -1, 0, and 1. Finally, RTBAgent received a positive rating of $97\%$ , indicating that the proposed RTBAgent's output is convincing enough.
447
+
448
+ # 4.7 Discussion of Reasoning Costs and Benefits
449
+
450
+ In advertising bidding systems, methods that rely on RL or rules are typically triggered at longer intervals, such as every 15 minutes or more. Despite the inference time for LLMs being slightly longer than these traditional methods, it is still adequate to satisfy the typical latency requirements of advertising services in practice. As shown in Table 4, our framework outperforms other baselines when using LLMs of various scales, ranging from big to small, whether they are open-source LLMs or closed-source LLMs. This performance advantage allows advertisers to select the LLMs that best fit their deployment needs, reducing the concern over inference cost. Additionally, as demonstrated in Figure 2, our framework delivers not only reliable results but also provides data-driven insights and decision-making rationale, which is invaluable for operations teams when assessing the effectiveness of advertising campaigns and refining future strategies. Consequently, our proposed solution not only fulfills the fundamental requirements for inference time and cost but also generates direct economic benefits and indirect operational advantages.
451
+
452
+ # 5 CONCLUSIONS
453
+
454
+ In this paper, we introduce RTBAgent, an effective agent that first utilizes LLMs to enhance advertising auctions in RTB. Specifically, we innovatively propose a two-step decision-making process that integrates CTR estimation model, expert strategy knowledge, multi-memory retrieval system, and self-reflection, providing accurate simulation and real-time decision support for bidding scenarios. The extensive experimental results confirm that RTBAgent exhibits better adaptability and interpretability than traditional rule-based and RL methods in highly dynamic and unstable bidding environments. Our work contributes to developing a novel paradigm that attempts to explore the application of LLM-based integrated intelligence in RTB and spark related discussions. In future research, in order to better fit the competitiveness of the online advertising market, we will focus on studying the application of multi-agent systems based on LLMs in RTB. It can be foreseen that multi-agent systems will become more complex and effective.
455
+
456
+ # 6 LIMITATIONS
457
+
458
+ Our approach does have some limitations. Firstly, while LLM-based methods have achieved improvements in effectiveness, the response time of LLM-based bidding systems is not as swift as desired. Employing models with smaller parameter sizes, such as 1B or 3B, appears to be a promising direction. Secondly, current LLMs have not yet encompassed a richer set of bidding knowledge. Utilizing strategies like RAG might lead to greater enhancements, which is on our agenda for the future. Thirdly, we have deployed this system into a real-world advertising bidding environment, and we plan to disclose the revenue in the future.
459
+
460
+ # 7 ACKNOWLEDGMENTS
461
+
462
+ This work is supported in part by the National Natural Science Foundation of China (62372187), in part by the National Key Research and Development Program of China (2022YFC3601005) and in part by the Guangdong Provincial Key Laboratory of Human Digital Twin (2022B1212010004).
463
+
464
+ # REFERENCES
465
+
466
+ [1] Kareem Amin, Michael Kearns, Peter Key, and Anton Schwaighofer. 2012. Budget optimization for sponsored search: Censored learning in mdps. arXiv preprint arXiv:1210.4847 (2012).
467
+ [2] Han Cai, Kan Ren, Weinan Zhang, Kleanthis Malialis, Jun Wang, Yong Yu, and Defeng Guo. 2017. Real-time bidding by reinforcement learning in display advertising. In Proceedings of the tenth ACM international conference on web search and data mining. 661-670.
468
+ [3] Yirong Chen, Xiaofen Xing, Jingkai Lin, Huimin Zheng, Zhenyu Wang, Qi Liu, and Xiangmin Xu. 2023. Soulchat: Improving lms' empathy, listening, and comfort abilities through fine-tuning with multi-turn empathy conversations. In Findings of the Association for Computational Linguistics: EMNLP 2023. 1170-1183.
469
+ [4] George B Dantzig. 2002. Linear programming. Operations research 50, 1 (2002), 42-47.
470
+ [5] Zihan Ding and Hao Dong. 2020. Challenges of reinforcement learning. Deep Reinforcement Learning: Fundamentals, Research and Applications (2020), 249-272.
471
+ [6] Jiayan Guo, Yusen Huo, Zhilin Zhang, Tianyu Wang, Chuan Yu, Jian Xu, Bo Zheng, and Yan Zhang. 2024. Generative Auto-bidding via Conditional Diffusion Modeling. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (Barcelona, Spain) (KDD '24). Association for Computing Machinery, New York, NY, USA, 5038-5049. https://doi.org/10.1145/3637528.3671526
472
+ [7] Izzeddin Gur, Hiroki Furuta, Austin Huang, Mustafa Safdari, Yutaka Matsuo, Douglas Eck, and Aleksandra Faust. 2023. A real-world webagent with planning, long context understanding, and program synthesis. arXiv preprint arXiv:2307.12856 (2023).
473
+ [8] Yue He, Xiujun Chen, Di Wu, Junwei Pan, Qing Tan, Chuan Yu, Jian Xu, and Xiaoqiang Zhu. 2021. A unified solution to constrained bidding in online display advertising. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining. 2993-3001.
474
+ [9] Sirui Hong, Xiawu Zheng, Jonathan Chen, Yuheng Cheng, Jinlin Wang, Ceyao Zhang, Zili Wang, Steven Ka Shing Yau, Zijuan Lin, Liyang Zhou, et al. 2023. Metagpt: Meta programming for multi-agent collaborative framework. arXiv preprint arXiv:2308.00352 (2023).
475
+ [10] Dong Huang, Qingwen Bu, Jie M Zhang, Michael Luck, and Heming Cui. 2023. Agentcoder: Multi-agent-based code generation with iterative testing and optimisation. arXiv preprint arXiv:2312.13010 (2023).
476
+ [11] IAB and PwC. 2023. Internet Advertising Revenue Report 2022. In Interactive Advertising Bureau and PwC. IAB and PwC, 1-31. https://www.iab.com/wp-content/uploads/2023/04/IAB_PwC_Internet_Advertising_Revenue_Report_2022.pdf
477
+ [12] Kuang-chih Lee, Burkay Birant Orten, Ali Dasdan, and Wentong Li. 2018. Estimating conversion rate in display advertising from past performance data. US Patent 10,037,543.
478
+ [13] Junkai Li, Siyu Wang, Meng Zhang, Weitao Li, Yunghwei Lai, Xinhui Kang, Weizhi Ma, and Yang Liu. 2024. Agent hospital: A simulacrum of hospital with evolvable medical agents. arXiv preprint arXiv:2405.02957 (2024).
479
+ [14] Hairen Liao, Lingxiao Peng, Zhenchuan Liu, and Xuehua Shen. 2014. iPinYou global rtb bidding algorithm competition dataset. In Proceedings of the Eighth International Workshop on Data Mining for Online Advertising. 1-6.
480
+ [15] Xiao-Yang Liu, Guoxuan Wang, and Daochen Zha. 2023. Fingpt: Democratizing internet-scale data for financial large language models. arXiv preprint arXiv:2307.10485 (2023).
481
+ [16] Keyu Pan and Yawen Zeng. 2023. Do LLMs Possess a Personality? Making the MBTI Test an Amazing Evaluation for Large Language Models. arXiv:2307.16180 [cs.CL]
482
+
483
+ [17] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. 2023. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology. 1-22.
484
+ [18] Claudia Perlich, Brian Dalessandro, Rod Hook, Ori Stitelman, Troy Raeder, and Foster Provost. 2012. Bid optimizing and inventory scoring in targeted online advertising. In Proceedings of the 18th ACM SIGKDD international conference on Knowledge discovery and data mining. 804-812.
485
+ [19] Steffen Rendle. 2010. Factorization machines. In 2010 IEEE International conference on data mining. IEEE, 995-1000.
486
+ [20] Tim Roughgarden. 2010. Algorithmic game theory. Commun. ACM 53, 7 (2010), 78-86.
487
+ [21] Jun Wang and Shuai Yuan. 2015. Real-time bidding: A new frontier of computational advertising research. In Proceedings of the Eighth ACM International Conference on Web Search and Data Mining, 415-416.
488
+ [22] Jun Wang, Weinan Zhang, Shuai Yuan, et al. 2017. Display advertising with real-time bidding (RTB) and behavioural targeting. Foundations and Trends® in Information Retrieval 11, 4-5 (2017), 297-435.
489
+ [23] Di Wu, Xiujun Chen, Xun Yang, Hao Wang, Qing Tan, Xiaoxun Zhang, Jian Xu, and Kun Gai. 2018. Budget constrained bidding by model-free reinforcement learning in display advertising. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management. 1443-1451.
490
+ [24] Songlin Xu, Xinyu Zhang, and Lianhui Qin. 2024. EduAgent: Generative Student Agents in Learning. arXiv preprint arXiv:2404.07963 (2024).
491
+ [25] Hui Yang, Sifu Yue, and Yunzhong He. 2023. Auto-gpt for online decision making: Benchmarks and additional opinions. arXiv preprint arXiv:2306.02224 (2023).
492
+ [26] Hao Yu, Michael Neely, and Xiaohan Wei. 2017. Online convex optimization with stochastic constraints. Advances in Neural Information Processing Systems 30 (2017).
493
+ [27] Hao Yu and Michael J Neely. 2020. A low complexity algorithm with $O(\sqrt{T})$ regret and $O(1)$ constraint violations for online convex optimization with long term constraints. Journal of Machine Learning Research 21, 1 (2020), 1-24.
494
+ [28] Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. 2024. Self-rewarding language models. arXiv preprint arXiv:2401.10020 (2024).
495
+ [29] Hongbo Zhang, Junying Chen, Feng Jiang, Fei Yu, Zhihong Chen, Jianquan Li, Guiming Chen, Xiangbo Wu, Zhiyi Zhang, Qingying Xiao, et al. 2023. Huaotuopt, towards taming language model to be a doctor. arXiv preprint arXiv:2305.15075 (2023).
496
+ [30] Weinan Zhang, Kan Ren, and Jun Wang. 2016. Optimal real-time bidding frameworks discussion. arXiv preprint arXiv:1602.01007 (2016).
497
+ [31] Weinan Zhang, Shuai Yuan, and Jun Wang. 2014. Optimal real-time bidding for display advertising. In Proceedings of the 20th ACM SIGKDD international conference on Knowledge discovery and data mining. 1077-1086.
498
+ [32] Wentao Zhang, Lingxuan Zhao, Haochong Xia, Shuo Sun, Jiaze Sun, Molei Qin, Xinyi Li, Yuqing Zhao, Yilei Zhao, Xinyu Cai, et al. 2024. FinAgent: A Multimodal Foundation Agent for Financial Trading: Tool-Augmented, Diversified, and Generalist. arXiv preprint arXiv:2402.18485 (2024).
499
+ [33] Jun Zhao, Guang Qiu, Ziyu Guan, Wei Zhao, and Xiaofei He. 2018. Deep reinforcement learning for sponsored search real-time bidding. In Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining. 1021-1030.
500
+ [34] Zhi Zhou, Jiang-Xin Shi, Peng-Xiao Song, Xiao-Wen Yang, Yi-Xuan Jin, Lan-Zhe Guo, and Yu-Feng Li. 2024. LawGPT: A Chinese Legal Knowledge-Enhanced Large Language Model. arXiv preprint arXiv:2406.04614 (2024).
501
+
502
+ # 8 APPENDIX
503
+
504
+ # 8.1 Partial Display of Prompt Template for RTBAgent
505
+
506
+ Due to space limitations, only a portion of the prompt templates for RTBAgent is displayed. We will organize and publish the remaining codes and prompt templates soon.
507
+
508
+ Profile Definition Template of Insight Reasoning.
509
+
510
+ 1
511
+ 2 # YOUR ROLE
512
+ 3 - You are a senior data analyst specializing in in-depth research and strategy development in the field of real-time bidding (RTB) advertising placement.
513
+ 4 - You use advanced data analysis tools and algorithms to guide advertisers to gain an advantage in fierce market competition.
514
+ 5 - Your goal is to maximize the number of clicks through data analysis and algorithm adjustments given the budget for the day.
515
+
516
+ 6 - You need to regularly adjust the bidding factor based on historical decision-making, current environmental conditions, and algorithmic bidding recommendations.
517
+ 7
518
+ 8 # CONTEXT
519
+ 9 {history}
520
+ 10 11 # THE REFERENCE GIVEN BY THE BIDDING ALGORITHM FOR ADVERTISER
521
+ 12 {bidding_reference}
522
+ 13 # ENVIRONMENT_STATUS
523
+ 14 {environment_status}
524
+ 15 # NOW YOUR ACTION IS
525
+ 16 The bidding factor for this period = the bidding factor given by the algorithm * (1+adjustment)
526
+ 17 Now, you need to analyze the advantages and disadvantages of each "adjustment range" based on historical decisions, current environmental conditions, and algorithm suggestions.
527
+ 18 The selection space for "adjustment range" is from $\{\{[-0.5, -0.4), [-0.4, -0.3), [-0.3, -0.2), [-0.2, -0.1), [-0.1, 0.0), [0.0, 0.1), [0.1, 0.2), [0.2, 0.3), [0.3, 0.4), [0.4, 0.5]\}\}$ .
528
+ 19 While making your analysis, consider the following:
529
+ 20 1. **Historical Performance**: Adjustments that have previously optimized budget usage and improved click volume without significantly impacting the win rate are valuable.
530
+ 21 2. **Exploration of New Adjustments**: Exploring new adjustment ranges, especially positive adjustments range like $[0.0,0.1)$ , $[0.1,0.2)$ , and $[0.2,0.3)$ , can potentially uncover more effective strategies. Increasing the bid may improve visibility and click volume, particularly in competitive environments.
531
+ 22 3. **Balancing Stability and Innovation**: Strive to balance between maintaining strategies that have shown consistent performance and exploring new adjustments. A mix of historical strategy and new exploration, with a focus on positive adjustments, could provide a balanced approach to ensure cost efficiency, maximize clicks, and adapt to market changes.
532
+ 23 Please output a JSON in the following format for all analyses:
533
+ 24 ...json
534
+ 25 {
535
+ 26 "adjustment range for [-0.5, -0.4)": str = xx,
536
+ 27 "adjustment range for [-0.4, -0.3)": str = xx,
537
+ 28 "adjustment range for [-0.3, -0.2)": str = xx,
538
+ 29 "adjustment range for [-0.2, -0.1)": str = xx,
539
+ 30 "adjustment range for [-0.1, 0.0)": str = xx,
540
+ 31 "adjustment range for [0.0, 0.1)": str = xx,
541
+ 32 "adjustment range for [0.1, 0.2)": str = xx,
542
+ 33 "adjustment range for [0.2, 0.3)": str = xx,
543
+ 34 "adjustment range for [0.3, 0.4)": str = xx,
544
+ 35 "adjustment range for [0.4, 0.5]": str = xx
545
+ }
546
+ 37 }
547
+
548
+ # Profile Definition Template of History Bidding Summary.
549
+
550
+ 1
551
+ 2 You are an advanced history summary tool specializing in big data insights. Your task is to analyze recent trends and evaluate the effectiveness of decision-making behaviors over the selected time window.
552
+ 3 Please summarize the following information with a focus on recent changes, highlighting any new patterns, shifts, or significant deviations from past behavior. Pay particular attention to the current environment information provided at the end of this document. Use concise language to ensure the summary is clear and actionable.
553
+ 4 Summarize and return the output strictly in the following JSON format, without any additional text or explanations:
554
+ 5 ...json
555
+ 6 {
556
+ 7 "summary": str = xx
557
+ 8 }
558
+ 9 10
2502.00xxx/2502.00792/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8c82767927af59540e6dabe2d9a36161e097a9e9e6a08ddbfe4360a980c4b69
3
+ size 256838
2502.00xxx/2502.00792/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00806/1d5ae528-ade2-4d1e-852f-6151569ee57d_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00806/1d5ae528-ade2-4d1e-852f-6151569ee57d_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00806/1d5ae528-ade2-4d1e-852f-6151569ee57d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e96e5b3645da216caed199c5a8890446cd6bb4cedd3588fe062b5375902ab0fe
3
+ size 794593
2502.00xxx/2502.00806/full.md ADDED
@@ -0,0 +1,538 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # UniGraph2: Learning a Unified Embedding Space to Bind Multimodal Graphs
2
+
3
+ Yufei He
4
+
5
+ National University of Singapore
6
+
7
+ Singapore
8
+
9
+ yufei.he@u.nus.edu
10
+
11
+ Yuan Sui
12
+
13
+ National University of Singapore
14
+
15
+ Singapore
16
+
17
+ yuan.sui@u.nus.edu
18
+
19
+ Xiaoxin He
20
+
21
+ National University of Singapore
22
+
23
+ Singapore
24
+
25
+ he.xiaoxin@u.nus.edu
26
+
27
+ Yue Liu
28
+
29
+ National University of Singapore
30
+
31
+ Singapore
32
+
33
+ yliu@u.nus.edu
34
+
35
+ Yifei Sun
36
+
37
+ Zhejiang University, China
38
+
39
+ yifeisun@zju.edu.cn
40
+
41
+ Bryan Hooi
42
+
43
+ National University of Singapore
44
+
45
+ Singapore
46
+
47
+ bhooi@comp.nus.edu.sg
48
+
49
+ # Abstract
50
+
51
+ Existing foundation models, such as CLIP, aim to learn a unified embedding space for multimodal data, enabling a wide range of downstream web-based applications like search, recommendation, and content classification. However, these models often overlook the inherent graph structures in multimodal datasets, where entities and their relationships are crucial. Multimodal graphs (MMGs) represent such graphs where each node is associated with features from different modalities, while the edges capture the relationships between these entities. On the other hand, existing graph foundation models primarily focus on text-attributed graphs (TAGs) and are not designed to handle the complexities of MMGs. To address these limitations, we propose UniGraph2 $^1$ , a novel cross-domain graph foundation model that enables general representation learning on MMGs, providing a unified embedding space. UniGraph2 employs modality-specific encoders alongside a graph neural network (GNN) to learn a unified low-dimensional embedding space that captures both the multimodal information and the underlying graph structure. We propose a new cross-domain multi-graph pre-training algorithm at scale to ensure effective transfer learning across diverse graph domains and modalities. Additionally, we adopt a Mixture of Experts (MoE) component to align features from different domains and modalities, ensuring coherent and robust embeddings that unify the information across modalities. Extensive experiments on a variety of multimodal graph tasks demonstrate that UniGraph2 significantly outperforms state-of-the-art models in tasks such as representation learning, transfer learning, and multimodal generative tasks, offering a scalable and flexible solution for learning on MMGs.
52
+
53
+ # CCS Concepts
54
+
55
+ - Information systems $\rightarrow$ Data mining; Social networks; • Computing methodologies $\rightarrow$ Neural networks.
56
+
57
+ ![](images/17bb2244c5a0ffd94c8d3bcfc4d4dde263227af59251552a15cb679522908906.jpg)
58
+
59
+ This work is licensed under a Creative Commons Attribution 4.0 International License.
60
+
61
+ WWW '25, Sydney, NSW, Australia
62
+
63
+ © 2025 Copyright held by the owner/author(s).
64
+
65
+ ACM ISBN 979-8-4007-1274-6/25/04
66
+
67
+ https://doi.org/10.1145/3696410.3714818
68
+
69
+ # Keywords
70
+
71
+ Pre-Training; Graph Foundation Models; Multimodal Learning
72
+
73
+ # ACM Reference Format:
74
+
75
+ Yufei He, Yuan Sui, Xiaoxin He, Yue Liu, Yifei Sun, and Bryan Hooi. 2025. UniGraph2: Learning a Unified Embedding Space to Bind Multimodal Graphs. In Proceedings of the ACM Web Conference 2025 (WWW '25), April 28-May 2, 2025, Sydney, NSW, Australia. ACM, New York, NY, USA, 12 pages. https://doi.org/10.1145/3696410.3714818
76
+
77
+ # 1 Introduction
78
+
79
+ Real-world web applications increasingly rely on multimodal data, where information is derived from a variety of sources such as text, images, and audio [2, 7, 21, 35, 39, 45]. Recent foundation models have focused on learning a unified embedding space across different modalities that allows for the seamless integration of multimodal data, thereby enabling effective cross-modal interactions and supporting downstream applications [13, 15, 42].
80
+
81
+ Models such as CLIP [42] have demonstrated the power of learning from multimodal data by mapping text and images into a shared embedding space. However, CLIP and similar models are fundamentally limited by their reliance on a 1-to-1 mapping between paired modalities, such as text-to-image alignment, ignoring more complex structures where nodes can be connected through many-to-many relationships and involve multiple modalities. These models fail to account for the graph structure present in numerous web domains, from social networks to e-commerce networks [10, 22, 57, 59, 64], where entities and their interactions are crucial to understanding the underlying relationships. For example, in e-commerce platforms, recommendation systems rely on complex networks of products, users, and their interactions [43]. Each node represents a user or a product, and edges represent interactions like purchases, views, or reviews. Additionally, both users and products are associated with rich multimodal data: product descriptions (text), images (visual), and user reviews (text), and demonstration videos (audio and visual). Integrating these diverse data types within the graph structure is essential for accurate recommendations and personalized user experiences [12]. To address these challenges, Multimodal Graphs (MMGs) have been introduced as a framework that combines graph structures with multimodal data [10, 64]. On MMGs, nodes are enriched with information from multiple modalities, allowing for a more comprehensive representation of entities and their relationships. However, existing MMGs learning methods can only train
82
+
83
+ models individually for a specific graph and task [6, 57, 58], and cannot achieve cross-graph and cross-task transfer like foundation models do without retraining or fine-tuning.
84
+
85
+ Recently, there has been considerable progress in learning foundation models for text-attributed graphs (TAGs) [8, 17, 19, 23], which can be viewed as a special case of MMGs where the node features are are exclusively in the text modality. One prominent effort in this direction is UniGraph [23], which introduces a unified embedding space that combines graph structure and node-level textual information for all TAGs. UniGraph employs a masked prediction framework [16, 32, 41], inspired by the success of masked language models (MLMs) [32]. In this framework, UniGraph performs self-supervised pre-training by masking node-level text attributes and learning to predict the missing information based on the graph context. Despite its effectiveness on TAGs, UniGraph faces two significant limitations when extended to more complex settings. First, it is limited in its ability to generalize to MMGs, where nodes may contain features from diverse modalities such as images, in addition to text. Second, UniGraph focuses on pre-training on a single graph from one domain, which restricts its capacity to leverage knowledge across multiple domains. In training a foundation model, it is essential to employ more diverse pre-training data from different domains to enhance the model's generalization [1, 15, 42].
86
+
87
+ Presented Work. In this work, we propose UniGraph2, a graph foundation model for MMGs that provides a unified embedding space across graph domains and modalities, as shown in Figure 2. In UniGraph2, nodes are not restricted to textual attributes; instead, they can incorporate features from any combination of modalities. Similar to UniGraph, UniGraph2 adopts a masked prediction framework, but generalizes the masked prediction task to accommodate multimodal data. In this setup, the model is tasked with predicting missing node attributes, which could be text, image features, or any other modality, based on the graph structure and the available multimodal information. This allows the model to learn rich, unified representations that capture both the multimodal features of each node and the relationships encoded in the graph.
88
+
89
+ Furthermore, while UniGraph focuses on pre-training within a single graph domain, UniGraph2 introduces a more robust multigraph pre-training strategy. In real-world applications, data often comes from multiple sources, each with different graph structures and node modalities. To handle this, UniGraph2 proposes a cross-domain multi-graph pre-training framework, which enables the model to learn compact and transferable knowledge across a diverse set of graph datasets with varying modality and domain distributions. A key component of this framework is the Mixture of Experts (MoE) [25, 44], which is specifically designed to align node features from different domains and modalities. The MoE dynamically selects the most appropriate experts for each input data, ensuring that the diverse multimodal features are coherently integrated into the unified embedding space.
90
+
91
+ In summary, our key contributions in UniGraph2 are:
92
+
93
+ - We generalize the masked prediction framework used in UniGraph to support multimodal graphs, allowing nodes to include a variety of modalities such as text and images.
94
+ - We introduce a cross-domain multi-graph pre-training strategy, enabling UniGraph2 to learn unified and transferable
95
+
96
+ representations across different graph domains and modalities.
97
+
98
+ - We demonstrate through extensive experimentation that UniGraph2 outperforms state-of-the-art models in various multimodal graph learning tasks, including representation learning, transfer learning, and multimodal generative tasks, particularly when data is drawn from multiple graph domains.
99
+
100
+ # 2 Related Work
101
+
102
+ # 2.1 Multimodal Representation Learning
103
+
104
+ Building a general representation learning model for multimodal data has received significant attention in recent years, with various approaches aiming to unify learning across different modalities such as vision, language, and audio. Early approaches like Vision-Language Pre-training (VLP) models predominantly focus on learning from image-text data using contrastive learning and masked language modeling, leading to models such as CLIP [42] and ALIGN [30]. With the development of unified architectures [9, 29, 49] and pretraining tasks [3, 16, 32, 41], more work begins to explore effective alignment of representations for a wider range of different modalities, with the potential to expand to unlimited modalities [15, 54].
105
+
106
+ # 2.2 Multimodal Graph Learning
107
+
108
+ Most existing multimodal graph learning models primarily focus on knowledge graphs [6, 46, 58] and natural sciences, such as molecular graphs [31] or brain graphs [53]. However, these models are specifically designed for particular tasks on individual graphs using domain knowledge and do not aim to learn a unified and general representation. They also cannot be transferred across different graphs, modalities, or tasks. Unlike these works, a recent work, MMGL [57] explores the use of foundation models from different modalities on MMGs, but it focuses solely on generative tasks.
109
+
110
+ # 2.3 Graph Foundation Models
111
+
112
+ Learning graph foundation models that can be transferred across different graphs [20, 23, 25, 40] and tasks [16, 20, 24, 33] has recently received significant attention. Some works explore designing domain-specific graph foundation models, such as those for knowledge graphs [11, 47] and molecular graphs [56]. Most existing research efforts are dedicated to using LLMs with strong generalization capabilities to solve graph learning tasks [19, 33, 47, 51]. However, how to effectively serialize graph data so that LLMs can understand the graph structure and graph learning tasks remains a barrier to further performance improvements [61]. Additionally, these models typically use the generative capabilities of LLMs to directly generate predicted labels, thus addressing representation learning tasks on graphs. Due to the high computational cost, it is challenging to scale them to web-scale large graphs [19, 51].
113
+
114
+ # 3 Preliminaries
115
+
116
+ # 3.1 Multimodal Graphs (MMGs)
117
+
118
+ DEFINITION 1 (MULTIMODAL GRAPHS). A Multimodal Graph (MMG) is defined as a graph $\mathcal{G} = (\mathcal{V},\mathcal{E},\mathcal{M},\Omega)$ , where $\mathcal{V}$ represents the set
119
+
120
+ of nodes and $\mathcal{E}$ represents the set of edges. The function $\mathcal{M}:\mathcal{V}\to 2^{\Omega}$ maps each node $v\in \mathcal{V}$ to a subset of modalities $\Omega_v\subseteq \Omega$ , where $\Omega$ denotes the set of all possible modalities, such as text, images, or other data types. Each node $v$ in $\mathcal{V}$ can possess multiple features from different modalities, but not all nodes are required to have features from every modality.
121
+
122
+ For a Text-Attributed Graph $\mathcal{G}_{\mathrm{TAG}} = (\mathcal{V},\mathcal{E},\mathcal{M},\{\text{text}\})$ , where each node has an associated text $t_v \in \mathcal{T}_{\mathcal{V}}$ , we define the mapping function for MMGs as follows:
123
+
124
+ $$
125
+ \mathcal {M} (v) = \{\text {t e x t} \}, \text {f o r a l l} v \in \mathcal {V}. \tag {1}
126
+ $$
127
+
128
+ Here, $\Omega = \{\text{text}\}$ is the set of possible modalities, limited to textual data in this context.
129
+
130
+ # 3.2 General Representation Learning on MMGs
131
+
132
+ General representation learning [15, 38, 42, 54] on MMGs aims to learn a self-supervised pre-trained model that can infer meaningful representations for any new MMG, facilitating downstream tasks without the need for additional training or fine-tuning on new data.
133
+
134
+ PROBLEM 1 (GENERAL REPRESENTATION LEARNING ON MMGs). Consider a collection of Multimodal Graphs (MMGs) in the pretraining set $\mathcal{D}_{\text{pretrain}}$ , where each graph $\mathcal{G}_k = (\mathcal{V}_k, \mathcal{E}_k, \mathcal{M}_k)$ contains nodes $v_{ik} \in \mathcal{V}_k$ each associated with a set of modalities $\Omega_{v_{ik}} \subseteq \Omega$ , encompassing various data types such as text, images, and other feature modalities. The challenge in general representation learning on MMGs involves self-supervised pre-training a function $f: \mathcal{V}_k \to \mathbb{R}^d$ across this diverse dataset. The objective is to develop a model that generalizes well to any new, unseen graph, enabling effective inference across various MMGs. For inference, the pre-trained model $f$ is applied to a new, unseen graph $\mathcal{G}^{inf} = (\mathcal{V}^{inf}, \mathcal{E}^{inf}, \mathcal{M}^{inf})$ to generate embeddings for its nodes, thereby facilitating downstream tasks on $\mathcal{G}^{inf}$ without further training.
135
+
136
+ UniGraph [23]. TAGs are a subset of MMGs where each node is associated with textual features. As a general representation learning model on TAGs, UniGraph unifies the learning process by integrating LM and GNN into a single encoder.
137
+
138
+ In UniGraph's pre-training, the masked prediction process can be mathematically formulated in two key steps:
139
+
140
+ (1) Masked Encoding: For each node $v \in \mathcal{V}$ has its textual feature $t_v$ partially masked and encoded by an LM $f_{\theta_1}^{\mathrm{LM}}$ , producing hidden representations $E_v = f_{\theta_1}^{\mathrm{LM}}(\mathrm{Mask}(t_v))$ . The GNN $f_{\theta_2}^{\mathrm{GNN}}$ propagates node embeddings across the graph, where the final node embedding is:
141
+
142
+ $$
143
+ E _ {\mathrm {C L S}} ^ {\prime} = f _ {\theta_ {2}} ^ {\mathrm {G N N}} \left(\mathcal {G} _ {\mathrm {T A G}}, E _ {\mathrm {C L S}}\right), \tag {2}
144
+ $$
145
+
146
+ with $E_{\mathrm{CLS}}$ representing the embeddings of all nodes' [CLS] tokens from $f_{\theta_1}^{LM}$ .
147
+
148
+ (2) Decoding: The MLP decoder $f_{\theta_3}^{\mathrm{Decoder}}$ combines the masked textual embeddings $E_v$ and the graph embeddings $E_{\mathrm{CLS}}'$ to reconstruct the masked tokens. The predicted probability distribution $P_v$ over the vocabulary is obtained via:
149
+
150
+ $$
151
+ P _ {v} = f _ {\theta_ {3}} ^ {\text {D e c o d e r}} \left(\operatorname {c o n c a t} \left(E _ {v}, E _ {\mathrm {C L S}} ^ {\prime}\right)\right), \tag {3}
152
+ $$
153
+
154
+ and the model minimizes the masked language modeling loss $\mathcal{L}_{\mathrm{MLM}}$ , formulated as:
155
+
156
+ $$
157
+ \mathcal {L} _ {\mathrm {M L M}} = - \frac {1}{| \mathcal {V} |} \sum_ {v \in \mathcal {V}} \sum_ {i} I (v, i) \log P _ {v} [ i, T _ {i} ], \tag {4}
158
+ $$
159
+
160
+ where $I(v,i)$ indicates masked positions and $T_{i}$ are the true tokens. The optimal parameters are obtained by:
161
+
162
+ $$
163
+ \theta_ {1} ^ {*}, \theta_ {2} ^ {*}, \theta_ {3} ^ {*} = \arg \min _ {\theta_ {1}, \theta_ {2}, \theta_ {3}} \mathcal {L} _ {\mathrm {M L M}}. \tag {5}
164
+ $$
165
+
166
+ In inference, the pre-trained model is used to generate embeddings for any unseen TAG $\mathcal{G}_{\mathrm{TAG}}^{\inf} = (\mathcal{V}^{\inf},\mathcal{E}^{\inf},\mathcal{T}_{\mathcal{V}}^{\inf})$ by processing the graph structure and node texts through the same encoder:
167
+
168
+ $$
169
+ H ^ {\inf } = f _ {\theta_ {2} ^ {*}} ^ {\text {G N N}} \left(\mathcal {G} _ {\text {T A G}} ^ {\inf }, X ^ {\inf }\right), \text {w h e r e} X ^ {\inf } = f _ {\theta_ {1} ^ {*}} ^ {\text {L M}} \left(\mathcal {T} _ {\mathcal {V}} ^ {\inf }\right). \tag {6}
170
+ $$
171
+
172
+ This process allows the model to generalize to new data, capturing both structural and textual graph attributes.
173
+
174
+ # 4 The UniGraph2 Framework
175
+
176
+ The overall framework of UniGraph2 is illustrated in Figure 1. The UniGraph2 framework introduces a unified approach to learning representations of multimodal graphs (MMGs), which consist of nodes with diverse modal features (such as text and images) and edges representing relationships between these entities. The framework comprises three key modules: the multimodal feature encoders, which process multimodal features through modality-specific encoders; the Mixture of Experts (MoE) module, which selects specialized MLP to align features across domains and modalities; and the decoders, which map the unified embeddings back into domain-specific inputs. The GNN operates as the central component that propagates node embeddings based on both their multimodal features and the underlying graph structure.
177
+
178
+ # 4.1 Multimodal Masking Strategies
179
+
180
+ In UniGraph2, masking strategies play a crucial role in the self-supervised learning framework for MMGs. The objective is to mask a portion of the node features and require the model to reconstruct them, thereby encouraging the model to effectively capture both the structural and multimodal information.
181
+
182
+ Modality-Specific Encoding. Before applying the masking process, modality-specific encoders are used to map raw data from different modalities into feature vectors. In the context of a multimodal graph $\mathcal{G} = (\mathcal{V},\mathcal{E},\mathcal{M}),\Omega$ where each node $v\in \mathcal{V}$ can have features from a subset of modalities $\Omega_v\subseteq \Omega$ , the raw features are transformed using encoders specific to each modality (e.g., a language model for text, and a Vision Transformer for images).
183
+
184
+ Let $E_{\omega}$ represent the encoder for a modality $\omega \in \Omega$ , and let $\pmb{x}_i^{(\omega)} \in \mathbb{R}^{d_{\mathrm{in}}}$ denote the feature vector for node $v_i$ obtained from modality $\omega$ . The modality-specific encoding can be expressed as:
185
+
186
+ $$
187
+ \boldsymbol {x} _ {i} ^ {(\omega)} = E _ {\omega} \left(v _ {i} ^ {(\omega)}\right). \tag {7}
188
+ $$
189
+
190
+ The features $\boldsymbol{x}_i \in \mathbb{R}^{d_{\mathrm{in}}}$ for node $v_i$ are then obtained by averaging the features from all modalities $\Omega_v$ associated with the node:
191
+
192
+ $$
193
+ x _ {i} = \frac {1}{| \Omega_ {v} |} \sum_ {\omega \in \Omega_ {v}} x _ {i} ^ {(\omega)}. \tag {8}
194
+ $$
195
+
196
+ ![](images/e619022fde528467f2ba2c55422fb856b88d462693b98fd1c90fa09cf3755d35.jpg)
197
+ Figure 1: Overview of the UniGraph2 framework. In pre-training, 1) UniGraph2 uses frozen Modality-Specific Encoders to encode raw multimodal data (e.g., text, images) into vector node features. Then, a portion of these node features is randomly masked. 2) Considering the diversity of node features across different modalities and graph domains, a Mixture of Experts (MoE) network is used to align the different node features, allowing the model to assign each node to one or more experts based on its domain and modality. 3) The aligned node features are fed into a GNN for learning and projected into a unified embedding space. 4) The decoding involves two objectives: a. Each graph domain corresponds to a specific decoder for reconstructing the node features. b. A shared shortest path distance decoder is used to reconstruct the graph structures.
198
+
199
+ Masking Node Features. Once the features of each node are encoded, a masking strategy is applied. A subset of nodes $\tilde{\mathcal{V}} \subseteq \mathcal{V}$ is selected uniformly without replacement, and their features are replaced with a mask token $x_{[M]}$ , a learnable vector $x_{[M]} \in \mathbb{R}^{d_{\mathrm{in}}}$ . This process is applied to approximately $75\%$ of the nodes to encourage robust learning by focusing on the graph context and unmasked nodes. For each node $v_i \in \mathcal{V}$ , the masked feature $\tilde{x}$ is defined as:
200
+
201
+ $$
202
+ \tilde {\boldsymbol {x}} _ {i} = \left\{ \begin{array}{l l} \boldsymbol {x} _ {[ M ]} & \text {i f} v _ {i} \in \tilde {\mathcal {V}}, \\ \boldsymbol {x} _ {i} & \text {i f} v _ {i} \notin \tilde {\mathcal {V}}. \end{array} \right. \tag {9}
203
+ $$
204
+
205
+ This masked feature $\tilde{\pmb{x}}$ serves as the input to the MoE, which aligns the features from different graph domains and modalities.
206
+
207
+ # 4.2 Mixture of Experts (MoE) Alignment
208
+
209
+ Inspired by and adopted from GraphAlign [25], the MoE module [44] in UniGraph2 is designed to achieve cross-domain and cross-modality alignment by dynamically selecting specialized experts for different types of data. In MMGs, nodes may come from various domains (e.g., social networks, product networks) and have features from different modalities (e.g., text, images). A single expert network might struggle to learn appropriate representations for such diverse data. However, with the MoE architecture, the model can assign each node to one or more experts based on its domain and modality. This enables the model to adaptively align and fuse heterogeneous node features by leveraging specialized experts for specific data types. The result is a flexible and powerful model that can learn and generalize across diverse graph structures and modalities, even when there are significant differences in feature types and distributions across domains.
210
+
211
+ Each node $v_{i}$ is assigned to one or more experts through a gating mechanism. Each expert $E_{k}$ is an MLP that processes the feature vector $\tilde{\pmb{x}}_i$ . The final node embedding $\pmb{e}_i$ is computed as a weighted combination of the outputs from the selected experts:
212
+
213
+ $$
214
+ \boldsymbol {e} _ {i} = \sum_ {k = 1} ^ {K} \alpha_ {i, k} E _ {k} \left(\tilde {\boldsymbol {x}} _ {i}\right). \tag {10}
215
+ $$
216
+
217
+ Here, $E_{k}(\tilde{\boldsymbol{x}}_{i})$ denotes the output of expert $k$ for the node's feature vector $\tilde{\boldsymbol{x}}_{i}$ , and $\alpha_{i,k}$ represents the weight assigned to the $k$ -th expert for node $v_{i}$ . The weights $\alpha_{i,k}$ are computed using a softmax gating function, which assigns higher weights to the experts that are more relevant for the node based on its transformed features:
218
+
219
+ $$
220
+ \alpha_ {i, k} = \frac {\exp \left(g _ {k} \left(\tilde {x} _ {i}\right)\right)}{\sum_ {k = 1} ^ {K} \exp \left(g _ {k} \left(\tilde {x} _ {i}\right)\right)}, \tag {11}
221
+ $$
222
+
223
+ where $g_{k}(\cdot)$ is the gating function that scores the relevance of expert $E_{k}$ for node $v_{i}$ . The gating function $g_{k}$ is also an MLP that computes a scalar relevance score for each expert based on the input $\tilde{x}_{i}$ :
224
+
225
+ $$
226
+ g _ {k} \left(\tilde {x} _ {i}\right) = \operatorname {M L P} _ {g} \left(\tilde {x} _ {i}\right) _ {k}. \tag {12}
227
+ $$
228
+
229
+ Here, the subscript $k$ denotes the $k$ -th component of the gating MLP output, corresponding to the relevance score for expert $E_{k}$ .
230
+
231
+ Thus, the MoE module adaptively routes each node's features to the most relevant experts, allowing for effective cross-domain and multimodal alignment. The experts, being specialized MLPs, capture domain-specific or modality-specific knowledge, enabling UniGraph2 to generalize well across diverse data distributions.
232
+
233
+ GNN Encoding. Once the aligned node embeddings $\pmb{e}_i$ are obtained through the MoE module, they are passed through a GNN, denoted as $f_{\mathrm{GNN}}$ , to further refine the node representations by incorporating the structural information of the graph $\mathcal{G}$ . The GNN takes $\pmb{e}_i$ as
234
+
235
+ input and propagates messages between neighboring nodes to learn the final node embeddings $\pmb{h}_i$ :
236
+
237
+ $$
238
+ \boldsymbol {h} _ {i} = f _ {\mathrm {G N N}} \left(\boldsymbol {e} _ {i}, \mathcal {G}\right). \tag {13}
239
+ $$
240
+
241
+ Here, $f_{\mathrm{GNN}}(\cdot)$ represents the GNN, which updates the embedding of each node by aggregating information from its neighbors.
242
+
243
+ Scaling to Web-Scale Graphs. To ensure the scalability of UniGraph2 on web-scale graphs, we use the Personalized PageRank (PPR) algorithm for subgraph sampling. By using PPR as the sampling strategy, we can generate the most structurally significant local subgraphs [4, 14]. Unlike other sampling methods, such as neighbor sampling or k-hop neighbors, PPR can identify key nodes and structures that hold importance in a wider context, making them more broadly applicable [23, 36].
244
+
245
+ # 4.3 Multiple Decoders
246
+
247
+ Graphs from diverse domains exhibit distinct structural and feature characteristics. A single, generic decoder would struggle to capture the specific nuances and patterns of each domain, as different types of graphs often require specialized approaches for feature reconstruction. By incorporating multiple decoders, each tailored to a specific graph domain, UniGraph2 is able to accurately reconstruct features while preserving domain-specific details.
248
+
249
+ Feature Reconstruction. Each decoder is responsible for reconstructing the original node features $x_{i}$ from the embeddings $z_{i}$ generated by the GNN encoder. Formally, for a domain-specific GNN decoder $f_{D}$ , the reconstructed feature $z_{i}$ is obtained as:
250
+
251
+ $$
252
+ \boldsymbol {z} _ {i} = f _ {D} \left(\boldsymbol {h} _ {i}, \mathcal {G}\right). \tag {14}
253
+ $$
254
+
255
+ To measure the reconstruction quality, UniGraph2 uses a cosine similarity loss [24, 26], which is defined as follows:
256
+
257
+ $$
258
+ \mathcal {L} _ {\text {f e a t}} = \frac {1}{| \tilde {\mathcal {V}} |} \sum_ {v _ {i} \in \tilde {\mathcal {V}}} \left(1 - \frac {\boldsymbol {x} _ {i} ^ {T} \boldsymbol {z} _ {i}}{\| \boldsymbol {x} _ {i} \| \cdot \| \boldsymbol {z} _ {i} \|}\right) ^ {Y}, \quad \gamma \geq 1, \tag {15}
259
+ $$
260
+
261
+ where $x_{i}$ represents the original feature for node $v_{i}$ , $z_{i}$ is the reconstructed feature, and $\gamma$ is a hyperparameter that controls the sharpness of the loss. This loss ensures that the reconstructed features $z_{i}$ maintain the same directional similarity as the original features $x_{i}$ , encouraging accurate feature reconstruction.
262
+
263
+ Structural Reconstruction. In addition to reconstructing node features, UniGraph2 incorporates a shared decoder across all domains to capture structural information. Specifically, the model performs an edge-level reconstruction task to predict the shortest path distance (SPD) between node pairs, which encodes global proximity and connectivity within the graph.
264
+
265
+ The shortest path distance $\mathrm{SPD}_{i,j}$ between nodes $v_{i}$ and $v_{j}$ is precomputed using Dijkstra's algorithm. The loss function for shortest path distance regression is defined as:
266
+
267
+ $$
268
+ \mathcal {L} _ {\mathrm {S P D}} = \frac {1}{| \mathcal {V} | ^ {2}} \sum_ {(i, j) \in \mathcal {V} \times \mathcal {V}} \| f _ {S P D} (\boldsymbol {h} _ {i} \| \boldsymbol {h} _ {j}) - \mathrm {S P D} _ {i, j} \| ^ {2}, \tag {16}
269
+ $$
270
+
271
+ where $\pmb{h}_i$ and $\pmb{h}_j$ are the final GNN embeddings for nodes $v_i$ and $v_j$ , respectively, $\| \cdot$ denotes concatenation, and $f_{SPD}$ is a task-specific head that predicts the shortest path distance between the two nodes.
272
+
273
+ By regressing the SPD, the model learns to reconstruct the underlying structure of the graph, allowing it to capture the global connectivity between nodes, which is essential for tasks that depend on the graph's topology.
274
+
275
+ Then overall loss is obtained by combining the two losses with a mixing coefficient $\lambda$ .
276
+
277
+ # 4.4 Inference
278
+
279
+ In the inference phase, the pre-trained UniGraph2 model is deployed to generate node embeddings for any unseen multimodal graph $\mathcal{G}^{\mathrm{inf}} = (\mathcal{V}^{\mathrm{inf}},\mathcal{E}^{\mathrm{inf}},\mathcal{M}^{\mathrm{inf}})$ . The inference process follows a streamlined version of the training pipeline, leveraging the Modality-Specific Encoders, the MoE module, and the GNN to produce high-quality embeddings for downstream tasks such as classification, transfer learning, or generative tasks.
280
+
281
+ Modality-Specific Encoding. For each node $v_{i} \in \mathcal{V}^{\mathrm{inf}}$ , the raw features from various modalities are first processed through the respective modality-specific encoders. Let $\Omega_{v_i}^{\mathrm{inf}} \subseteq \Omega$ represent the set of modalities associated with node $v_{i}$ in the inference graph. The modality-specific features are transformed as follows: $x_{i}^{(\omega)} = E_{\omega}(v_{i}^{(\omega)})$ , $\forall \omega \in \Omega_{v_i}^{\mathrm{inf}}$ . The node feature vector $x_{i}^{\mathrm{inf}}$ is obtained by averaging the features from all available modalities: $x_{i} = \frac{1}{|\Omega_{v}|}\sum_{\omega \in \Omega_{v}}\omega$ .
282
+
283
+ Feature Alignment. The modality-specific feature vectors are passed through the MoE module to align and fuse information across modalities and domains. The same gating mechanism used during training is applied to select the relevant experts for each node. For each node $v_{i}$ , the final fused embedding $e_{i}^{\mathrm{inf}}$ is computed as a weighted sum of the selected experts: $e_{i}^{\mathrm{inf}} = \sum_{k=1}^{K} \alpha_{i,k}^{\mathrm{inf}} E_{k}(x_{i}^{\mathrm{inf}})$ , where $x_{i}^{\mathrm{inf}}$ is the transformed feature of node $v_{i}$ , and $\alpha_{i,k}^{\mathrm{inf}}$ represents the weight assigned to expert $E_{k}$ for the given node, computed using the softmax gating function.
284
+
285
+ GNN Encoding. Once the aligned node features are obtained, they are passed through the GNN module to incorporate the structural information of the inference graph $\mathcal{G}^{\mathrm{inf}}$ . The GNN refines node embeddings by propagating messages between neighboring nodes. The output node embeddings $h_i^{\mathrm{inf}}$ are computed as: $h_i^{\mathrm{inf}} = f_{\mathrm{GNN}}(e_i^{\mathrm{inf}},\mathcal{G}^{\mathrm{inf}})$ , where $f_{\mathrm{GNN}}$ is the pre-trained GNN.
286
+
287
+ # 5 Experiments
288
+
289
+ In this section, we evaluate our UniGraph2 framework on three distinct research problems: 1) Self-Supervised Representation Learning, 2) Few-Shot Transfer, and 3) Multimodal Generative Tasks. Table 7 lists all 14 datasets used in the experiments.
290
+
291
+ # 5.1 Self-Supervised Representation Learning
292
+
293
+ Setup. We adopt the widely used linear probing protocol to evaluate the representation learning capability of self-supervised pre-trained models on unseen datasets. Specifically, we train a linear classifier on top of the embeddings generated by a frozen pre-trained model. Our model, along with all self-supervised learning baselines, is first jointly pre-trained on ogbn-Product, ogbn-Papers100M, Goodreads-LP, and Amazon-Cloth. We then evaluate the pre-trained models on each individual dataset. Detailed settings and hyperparameters are provided in Appendix B.
294
+
295
+ Table 1: Experiment results in self-supervised representation learning. We report accuracy (%) for node/edge classification tasks and MRR (%) for link prediction tasks. UniGraph2 and other self-supervised baselines (rows in white) are jointly pre-trained on Products, Papers100M, Goodreads-LP and Amazon-Cloth, and then evaluated on the individual target dataset. "In-distribution" refers to pre-training on multiple datasets and evaluating on the same datasets. "In-domain Generalization" involves testing on target datasets from the same domain as one of the pre-training datasets. "Out-of-domain Generalization" evaluates on datasets from domains not seen during pre-training. The performance of methods that are directly pre-trained on the individual target dataset, is marked in gray. The methods highlighted in bold are the best-performing ones among the "rows in white" methods, while those marked in red are the best-performing methods among all methods, including those in the gray rows.
296
+
297
+ <table><tr><td rowspan="2"></td><td colspan="4">In-distribution</td><td colspan="4">In-domain Generalization</td><td colspan="3">Out-of-domain Generalization</td></tr><tr><td>Products</td><td>Papers100M</td><td>Goodreads-LP</td><td>Amazon-Cloth</td><td>Arxiv</td><td>Amazon-Sports</td><td>Goodreads-NC</td><td>Ele-fashion</td><td>Wiki-CS</td><td>FB15K237</td><td>WN18RR</td></tr><tr><td colspan="12">Use CLIP to encode raw multimodal data as input features.</td></tr><tr><td>NoPretrain</td><td>68.01±0.15</td><td>54.99±0.04</td><td>9.61±0.21</td><td>19.01±0.04</td><td>62.01±0.14</td><td>26.01±0.14</td><td>68.12±0.13</td><td>75.11±0.12</td><td>68.12±0.06</td><td>89.42±0.20</td><td>74.00±0.02</td></tr><tr><td>BGRL</td><td>70.11±0.14</td><td>57.12±0.05</td><td>20.53±0.02</td><td>19.11±0.01</td><td>65.25±0.05</td><td>27.35±0.05</td><td>72.97±0.08</td><td>76.53±0.02</td><td>70.11±0.14</td><td>88.11±0.12</td><td>73.24±0.11</td></tr><tr><td>BGRL</td><td>75.86±0.11</td><td>60.35±0.11</td><td>26.42±0.15</td><td>20.11±0.45</td><td>70.15±0.14</td><td>30.11±0.12</td><td>80.53±0.35</td><td>81.94±0.10</td><td>73.11±0.09</td><td>92.22±0.14</td><td>76.15±0.16</td></tr><tr><td>GraphMAE2</td><td>72.25±0.16</td><td>60.25±0.01</td><td>24.11±0.14</td><td>19.55±0.22</td><td>69.18±0.02</td><td>28.94±0.02</td><td>76.18±0.05</td><td>77.04±0.05</td><td>72.15±0.14</td><td>90.54±0.04</td><td>74.11±0.13</td></tr><tr><td>GraphMAE2</td><td>77.34±0.15</td><td>61.97±0.10</td><td>26.89±0.14</td><td>19.87±0.21</td><td>70.46±0.07</td><td>30.83±0.11</td><td>80.24±0.14</td><td>82.11±0.01</td><td>76.01±0.24</td><td>92.96±0.14</td><td>76.97±0.14</td></tr><tr><td>GCOPE</td><td>78.01±0.13</td><td>62.34±0.11</td><td>23.11±0.13</td><td>18.72±0.25</td><td>70.24±0.11</td><td>26.18±0.12</td><td>79.11±0.14</td><td>78.97±0.10</td><td>73.57±0.12</td><td>91.25±0.15</td><td>75.68±0.10</td></tr><tr><td colspan="12">Use raw text as input features.</td></tr><tr><td>GIANT-XRT</td><td>72.56±0.10</td><td>64.53±0.11</td><td>8.11±0.05</td><td>16.78±0.25</td><td>70.89±0.11</td><td>22.01±0.04</td><td>58.14±0.10</td><td>67.01±0.05</td><td>74.01±0.03</td><td>90.14±0.14</td><td>75.01±0.13</td></tr><tr><td>UniGraph</td><td>80.11±0.21</td><td>65.23±0.20</td><td>19.19±0.10</td><td>16.38±0.08</td><td>72.15±0.18</td><td>25.89±0.12</td><td>73.26±0.12</td><td>75.11±0.06</td><td>76.35±0.20</td><td>93.11±0.09</td><td>84.06±0.24</td></tr><tr><td>UniGraph</td><td>82.24±0.24</td><td>67.89±0.21</td><td>22.31±0.05</td><td>18.01±0.03</td><td>73.97±0.22</td><td>27.11±0.10</td><td>78.14±0.11</td><td>81.05±0.08</td><td>81.22±0.24</td><td>95.24±0.23</td><td>87.21±0.76</td></tr><tr><td colspan="12">Use raw multimodal data as input features.</td></tr><tr><td>CLIP</td><td>65.28±0.12</td><td>50.21±0.09</td><td>9.24±0.01</td><td>18.24±0.21</td><td>61.56±0.02</td><td>25.91±0.08</td><td>66.48±0.11</td><td>82.18±0.03</td><td>67.53±0.05</td><td>88.65±0.13</td><td>72.68±0.14</td></tr><tr><td>ImageBind</td><td>45.11±0.02</td><td>42.53±0.11</td><td>6.89±0.04</td><td>19.10±0.10</td><td>42.11±0.03</td><td>27.11±0.04</td><td>55.71±0.04</td><td>83.14±0.06</td><td>49.28±0.03</td><td>68.20±0.10</td><td>64.38±0.12</td></tr><tr><td>NoPretrain</td><td>68.34±0.14</td><td>55.15±0.10</td><td>9.62±0.02</td><td>19.25±0.04</td><td>63.76±0.11</td><td>25.03±0.15</td><td>68.01±0.15</td><td>83.96±0.10</td><td>68.45±0.10</td><td>89.14±0.19</td><td>74.01±0.15</td></tr><tr><td>UniGraph2</td><td>82.79±0.02</td><td>67.95±0.11</td><td>28.98±0.11</td><td>24.64±0.09</td><td>72.56±0.15</td><td>30.95±0.11</td><td>81.15±0.12</td><td>85.71±0.11</td><td>78.15±0.09</td><td>94.38±0.05</td><td>85.47±0.11</td></tr><tr><td>UniGraph2</td><td>82.36±0.21</td><td>67.67±0.18</td><td>28.76±0.08</td><td>24.06±0.06</td><td>73.46±0.17</td><td>31.61±0.14</td><td>81.97±0.10</td><td>87.91±0.09</td><td>82.86±0.07</td><td>95.29±0.04</td><td>87.86±0.06</td></tr></table>
298
+
299
+ For the baselines, we compare UniGraph2 with state-of-the-art generative graph self-supervised learning methods, GraphMAE2 [24] and contrastive methods, BGRL [48]. As these methods are not inherently designed for cross-domain tasks, we leverage CLIP [42] to unify the input node features across different graphs. We also include a comparison with a multi-graph pre-training method, GCOPE [62]. UniGraph2 and all baseline methods utilize GAT [50] as the backbone GNN. For baselines that use TAGs as input, we select GIANT-XRT [63] and UniGraph [23]. Since these methods cannot process image data, they rely solely on text from MMG as node features, ignoring image inputs. For baseline approaches that accept multimodal data, we choose widely used multimodal models, CLIP [42] and ImageBind [15]. To maintain consistency with the baselines, UniGraph2 also uses CLIP's pre-trained vision and text encoders as Modality-Specific Encoders.
300
+
301
+ Our objective is to develop a general embedding model capable of generating high-quality representations for any MMG. To assess this, we evaluate the performance of UniGraph2 and the baselines in three different settings: (1) In-distribution, where models are pre-trained on multiple datasets and evaluated on each corresponding dataset individually; (2) In-domain Generalization, which tests pre-trained models on target datasets from the same domain as one of the pre-training datasets; and (3) Out-of-domain Generalization, where models are evaluated on datasets from domains unseen during pre-training.
302
+
303
+ Research Questions. In this subsection, we aim to answer the following research questions:
304
+
305
+ - RQ1: Negative Transfer in Multi-Graph Pre-Training. How do existing graph pre-training methods, which are primarily designed for single-graph pre-training, perform when applied
306
+
307
+ to multi-graph pre-training, and how do they compare to our proposed UniGraph2?
308
+
309
+ - RQ2: Comparison to Other Foundation Models. How does UniGraph2, which takes both multimodal data and graph structures as input, perform compared to methods that consider only multimodal data (CLIP, ImageBind) or only TAGs (UniGraph)?
310
+ - RQ3: Generalization Capability. How does UniGraph2, designed as a foundation model, perform in terms of generalizing to unseen graphs, and how does it compare to methods trained directly on the target graphs?
311
+
312
+ Results. Table 1 presents the results. We interpret these results by answering three research questions:
313
+
314
+ - RQ1: Negative Transfer in Multi-Graph Pre-Training. Existing graph pre-training methods exhibit negative transfer when applied to multi-graph pre-training, whereas UniGraph2 shows improvements in this context. The results in the In-distribution setting demonstrate that both BGRL and GraphMAE2 experience a significant performance drop when pre-trained on multi-graphs (rows in white), compared to pre-training on single graph only (rows in gray). This suggests that pre-training on other datasets negatively affects performance on the target dataset. However, UniGraph2 shows improvement under multi-graph pre-training, indicating that it successfully addresses the shortcomings of existing graph pre-training algorithms struggling with multi-graphs.
315
+ - RQ2: Comparison to Other Foundation Models. UniGraph2 outperforms methods that consider only multimodal data (CLIP, ImageBind) or only TAGs (UniGraph). We observe that without considering the graph structure, the performance of the acknowledged powerful multimodal foundation models like CLIP is not comparable to UniGraph2. Meanwhile, UniGraph, which cannot
316
+
317
+ Table 2: Experiment results in few-shot transfer. We report accuracy (%) for node/edge classification tasks. UniGraph2 and other self-supervised baselines (rows in white) are jointly pre-trained on Product, Papers100M, Goodreads-NC and Amazon-Cloth, and then evaluated on the individual target dataset. "In-domain Generalization" tests on target datasets from the same domain as one of the pre-training datasets. "Out-of-domain Generalization" evaluates on datasets from domains not seen during pre-training. The performance of methods that are directly pre-trained on the individual target dataset, is marked in gray.
318
+
319
+ <table><tr><td rowspan="3"></td><td colspan="12">In-domain Generalization</td><td colspan="6">Out-of-domain Generalization</td></tr><tr><td colspan="2">Cora-5-way</td><td colspan="2">PubMed-2-way</td><td colspan="2">Arxiv-5-way</td><td colspan="3">Goodreads-NC-5-way</td><td colspan="3">Ele-fashion-5-way</td><td colspan="2">Wiki-CS-5-way</td><td colspan="2">FB15K237-20-way</td><td colspan="2">WN18RR-5-way</td></tr><tr><td>5-shot</td><td>1-shot</td><td>5-shot</td><td>1-shot</td><td>5-shot</td><td>1-shot</td><td>5-shot</td><td>3-shot</td><td>1-shot</td><td>5-shot</td><td>3-shot</td><td>1-shot</td><td>5-shot</td><td>1-shot</td><td>5-shot</td><td>1-shot</td><td>5-shot</td><td>1-shot</td></tr><tr><td colspan="19">Use CLIP to encode raw multimodal data as input features.</td></tr><tr><td>NoPretrain</td><td>41.09</td><td>27.05</td><td>59.81</td><td>55.28</td><td>63.78</td><td>41.10</td><td>41.64</td><td>40.01</td><td>31.04</td><td>63.96</td><td>58.32</td><td>47.48</td><td>52.29</td><td>32.94</td><td>72.97</td><td>47.01</td><td>50.75</td><td>30.11</td></tr><tr><td>BGRL</td><td>52.01</td><td>35.18</td><td>66.04</td><td>59.04</td><td>60.12</td><td>46.67</td><td>47.01</td><td>44.22</td><td>30.35</td><td>64.72</td><td>60.16</td><td>46.49</td><td>52.10</td><td>32.85</td><td>75.39</td><td>45.15</td><td>47.42</td><td>34.57</td></tr><tr><td>GraphMAE2</td><td>52.89</td><td>36.25</td><td>66.89</td><td>59.95</td><td>60.91</td><td>47.29</td><td>47.84</td><td>44.80</td><td>30.93</td><td>65.52</td><td>60.92</td><td>47.24</td><td>52.83</td><td>33.41</td><td>75.95</td><td>45.81</td><td>48.14</td><td>35.21</td></tr><tr><td>Prodigy</td><td>53.01</td><td>39.59</td><td>69.11</td><td>60.42</td><td>63.53</td><td>51.33</td><td>50.01</td><td>46.39</td><td>34.98</td><td>67.35</td><td>63.87</td><td>50.79</td><td>55.94</td><td>36.35</td><td>78.01</td><td>51.39</td><td>54.94</td><td>38.73</td></tr><tr><td>OFA</td><td>53.11</td><td>40.04</td><td>69.45</td><td>60.38</td><td>63.11</td><td>50.25</td><td>49.61</td><td>46.24</td><td>35.14</td><td>67.94</td><td>64.18</td><td>51.35</td><td>56.01</td><td>37.02</td><td>78.33</td><td>52.02</td><td>55.05</td><td>39.11</td></tr><tr><td>GCOPE</td><td>51.98</td><td>36.14</td><td>66.25</td><td>59.16</td><td>60.29</td><td>47.19</td><td>48.52</td><td>44.89</td><td>31.20</td><td>65.10</td><td>61.33</td><td>48.51</td><td>53.74</td><td>34.19</td><td>76.10</td><td>48.93</td><td>50.19</td><td>35.05</td></tr><tr><td colspan="19">Use raw text as input features.</td></tr><tr><td>GIANT-XRT</td><td>50.11</td><td>37.85</td><td>68.19</td><td>58.78</td><td>62.01</td><td>49.01</td><td>46.01</td><td>43.86</td><td>30.01</td><td>62.97</td><td>61.21</td><td>47.76</td><td>54.01</td><td>35.04</td><td>76.09</td><td>50.25</td><td>53.01</td><td>35.19</td></tr><tr><td>UniGraph</td><td>54.23</td><td>40.45</td><td>70.21</td><td>60.19</td><td>64.76</td><td>50.63</td><td>46.19</td><td>44.01</td><td>33.53</td><td>66.21</td><td>62.04</td><td>50.17</td><td>56.16</td><td>37.19</td><td>78.21</td><td>52.19</td><td>55.18</td><td>39.18</td></tr><tr><td colspan="19">Use raw multimodal data as input features.</td></tr><tr><td>CLIP</td><td>41.23</td><td>28.41</td><td>61.67</td><td>55.71</td><td>63.46</td><td>40.14</td><td>41.24</td><td>40.11</td><td>30.97</td><td>62.51</td><td>58.23</td><td>46.15</td><td>51.69</td><td>31.61</td><td>72.31</td><td>47.14</td><td>50.83</td><td>31.35</td></tr><tr><td>ImageBind</td><td>32.19</td><td>23.90</td><td>58.20</td><td>54.24</td><td>62.48</td><td>38.17</td><td>29.10</td><td>28.14</td><td>21.42</td><td>51.25</td><td>48.05</td><td>44.93</td><td>48.14</td><td>30.28</td><td>69.12</td><td>41.80</td><td>41.24</td><td>26.91</td></tr><tr><td>NoPretrain</td><td>42.41</td><td>28.39</td><td>60.78</td><td>55.90</td><td>64.29</td><td>41.98</td><td>42.21</td><td>41.20</td><td>31.14</td><td>64.15</td><td>58.91</td><td>47.90</td><td>52.90</td><td>33.14</td><td>74.10</td><td>48.11</td><td>51.92</td><td>31.84</td></tr><tr><td>UniGraph2</td><td>56.01</td><td>42.98</td><td>72.19</td><td>61.24</td><td>66.24</td><td>51.98</td><td>51.73</td><td>47.42</td><td>37.01</td><td>69.29</td><td>65.29</td><td>53.85</td><td>57.28</td><td>38.47</td><td>79.34</td><td>52.19</td><td>55.59</td><td>39.93</td></tr></table>
320
+
321
+ process image data, also shows less ideal results due to the lack of information. This further highlights the necessity of designing foundation models specifically for multimodal graphs.
322
+
323
+ - RQ3: Generalization Capability. Compared to baseline methods, UniGraph2 demonstrates strong generalization capabilities. The results in the In-domain Generalization and Out-of-domain Generalization settings show that UniGraph2 effectively transfers knowledge from pre-training to unseen graphs. Compared to the NoPretrain method, UniGraph2 shows significant improvements. The consistent performance gains indicate that UniGraph2 can extract meaningful patterns during pre-training, which are beneficial for tackling graph learning tasks. Furthermore, UniGraph2 is comparable to methods trained directly on the target datasets, achieving similar accuracy while benefiting from greater efficiency without requiring exhaustive task-specific training.
324
+
325
+ # 5.2 Few-Shot Transfer
326
+
327
+ Setup. In this part, we evaluate the ability of the pre-trained models to perform few-shot in-context transfer without updating the model parameters. For baseline methods, in addition to the pre-trained models mentioned in Section 5.1, we also compare two recent graph in-context learning methods: the self-supervised pre-training method Prodigy [28] and the supervised pre-training method OFA [34]
328
+
329
+ For evaluation, we strictly follow the setting of Prodigy [28]. For an N-way K-shot task, we adopt the original train/validation/test splits in each downstream classification dataset, and construct a $K$ -shot prompt for test nodes (or edges) from the test split by randomly selecting $K$ examples per way from the train split. By default in all experiments, we sample 500 test tasks.
330
+
331
+ We adopt the few-shot classification strategy in UniGraph [23] for UniGraph2. The model computes average embeddings for each class and assigns a query sample to the class with the highest similarity to its embedding.
332
+
333
+ Results. In Table 2, our UniGraph2 model consistently outperforms all the baselines. This further demonstrates the powerful
334
+
335
+ generalization capabilities of UniGraph2 as a foundation model. In particular, compared to other graph few-shot learning methods such as Prodigy, OFA, and GCOPE, UniGraph2 does not rely on complex prompt graph designs, and its simple few-shot strategy is both efficient and effective.
336
+
337
+ # 5.3 Multimodal Generative Tasks
338
+
339
+ Setup. UniGraph2 is designed as a general representation learning model. The embeddings it generates can be utilized by various generative foundation models, such as LLMs, to empower downstream generative tasks. To further demonstrate this, we select the section summarization task on the WikiWeb2M dataset for our experiments. The WikiWeb2M dataset [5] is designed for multimodal content understanding, using many-to-many text and image relationships from Wikipedia. It includes page titles, section titles, section text, images, and indices for each section. In this work, we focus on section summarization, where the task is to generate a summary sentence from section content using both text and images.
340
+
341
+ For the experiments, we follow the MMGL [57] setup, using four types of information: section text, section images, context text, and page-level text/images. Consistent with MMGL, we fine-tune Open Pre-trained Transformer (OPT-125m) [60] to read the input section text/images and generate a summary. Multimodal neighbors are first encoded using frozen vision/text encoders and then aligned to the text-only LM space using 1-layer MLP mapper. In MMGL, CLIP [42] encoders are used for text and image encoding, remaining frozen during fine-tuning. In our experiments, we replace CLIP embeddings with our UniGraph2 embeddings.
342
+
343
+ Results. Table 3 shows that under different input types and different neighbor encoding strategies, the embeddings generated by UniGraph2 bring significant improvements compared to MMGL's default CLIP embeddings. We also observe that UniGraph2's embeddings are more robust to different neighbor encoding strategies compared to CLIP and do not rely on a specific strategy.
344
+
345
+ Table 3: Experiment results in multimodal generative tasks. We strictly follow the setting in MMGL [57]. The task is to generate a single sentence that summarizes the content of a particular section. The summary is generated based on all images and (non-summary) text present in the target and context sections. We provide different information of MMGs to the base LM: (1) section all (text + image), (2) page text, and (3) page all (all texts and images). We encode multiple multimodal neighbor information using three different neighbor encodings methods: Self-Attention with Text+Embeddings (SA-TE), Self-Attention with Embeddings (SA-E), and Cross-Attention with Embeddings (CA-E).
346
+
347
+ <table><tr><td rowspan="2">Input Type</td><td rowspan="2">Method</td><td colspan="4">BLEU-4</td><td colspan="4">ROUGE-L</td><td colspan="4">CIDEr</td></tr><tr><td>SA-TE</td><td>SA-E</td><td>CA-E</td><td>Avg. gain</td><td>SA-TE</td><td>SA-E</td><td>CA-E</td><td>Avg. gain</td><td>SA-TE</td><td>SA-E</td><td>CA-E</td><td>Avg. gain</td></tr><tr><td rowspan="2">Section all</td><td>MMGL</td><td>8.03</td><td>7.56</td><td>8.35</td><td>-</td><td>40.41</td><td>39.89</td><td>39.98</td><td>-</td><td>77.45</td><td>74.33</td><td>75.12</td><td>-</td></tr><tr><td>+UniGraph2</td><td>9.24</td><td>9.01</td><td>9.39</td><td>15.57%</td><td>43.01</td><td>43.24</td><td>42.98</td><td>7.44%</td><td>81.15</td><td>80.39</td><td>81.91</td><td>7.32%</td></tr><tr><td rowspan="2">Page text</td><td>MMGL</td><td>9.81</td><td>8.37</td><td>8.47</td><td>-</td><td>42.94</td><td>40.92</td><td>41.00</td><td></td><td>92.71</td><td>80.14</td><td>80.72</td><td>-</td></tr><tr><td>+UniGraph2</td><td>10.31</td><td>10.10</td><td>9.98</td><td>14.53%</td><td>43.19</td><td>43.08</td><td>42.75</td><td>3.38%</td><td>93.19</td><td>90.41</td><td>93.11</td><td>9.56%</td></tr><tr><td rowspan="2">Page all</td><td>MMGL</td><td>9.96</td><td>8.58</td><td>8.51</td><td>-</td><td>43.32</td><td>41.01</td><td>41.55</td><td>-</td><td>96.01</td><td>82.28</td><td>80.31</td><td>-</td></tr><tr><td>+UniGraph2</td><td>10.12</td><td>10.05</td><td>10.33</td><td>13.38%</td><td>44.10</td><td>42.08</td><td>42.44</td><td>2.18%</td><td>96.32</td><td>91.24</td><td>94.15</td><td>9.49%</td></tr></table>
348
+
349
+ Table 4: Ablation studies on UniGraph2 key components.
350
+
351
+ <table><tr><td></td><td>Products</td><td>Amazon-Cloth</td><td>Goodreads-NC</td><td>WN18RR</td></tr><tr><td>UniGraph2</td><td>82.79±0.02</td><td>24.64±0.09</td><td>81.15±0.12</td><td>85.47±0.11</td></tr><tr><td>w/o MoE</td><td>81.01±0.10</td><td>21.33±0.04</td><td>80.10±0.04</td><td>83.99±0.21</td></tr><tr><td>w/o feat loss</td><td>69.12±0.09</td><td>18.43±0.24</td><td>68.12±0.01</td><td>74.11±0.03</td></tr><tr><td>w/o SPD loss</td><td>82.42±0.11</td><td>23.39±0.05</td><td>80.24±0.02</td><td>85.24±0.11</td></tr></table>
352
+
353
+ Table 5: Ablation studies on Modality-Specific Encoders.
354
+
355
+ <table><tr><td></td><td>Products</td><td>Amazon-Cloth</td><td>Goodreads-NC</td><td>WN18RR</td></tr><tr><td>CLIP</td><td>82.79±0.02</td><td>24.64±0.09</td><td>81.15±0.12</td><td>85.47±0.11</td></tr><tr><td>ImageBind</td><td>82.32±0.05</td><td>25.01±0.11</td><td>80.33±0.22</td><td>84.29±0.07</td></tr><tr><td>T5+ViT</td><td>82.99±0.04</td><td>24.38±0.28</td><td>81.28±0.11</td><td>84.16±0.04</td></tr></table>
356
+
357
+ # 5.4 Model Analysis
358
+
359
+ We select four datasets from different domains to conduct more in-depth studies. We adopt self-supervised representation learning for evaluation.
360
+
361
+ Ablation on Key Components. Table 4 shows the performance of the UniGraph2 framework after removing some key designs. "W/o MoE" represents that we use simple MLP instead MoE to align node features. "W/o feat loss" represents that we only use the SPD loss for pre-training, while "w/o SPD loss" refers to the opposite. The overall results confirm that all key designs contribute positively to the performance of UniGraph2.
362
+
363
+ Ablation on Modality-Specific Encoders In Table 5, we study the influence of different Modality-Specific Encoders on the performance of encoding raw multimodal data. CLIP and ImageBind are feature encoders that map features from various modalities to a shared embedding space, whereas T5+ViT employs SOTA embedding methods for each modality independently, without specific alignment. The results show that all methods achieve comparable performance, indicating that UniGraph2 effectively aligns features regardless of whether they have been pre-aligned or not.
364
+
365
+ Efficiency Analysis. UniGraph2, designed as a foundation model, incurs significant computational costs primarily during the pretraining phase. However, it offers the advantage of applicability
366
+
367
+ Table 6: Comparison of GPU hours and performance on ogbn-Arxiv and ogbn-Papers100M.
368
+
369
+ <table><tr><td>Method</td><td>Pre-training</td><td>Downstream Training</td><td>Downstream Inference</td><td>Test Accuracy</td></tr><tr><td colspan="5">ogbn-Arxiv (169,343 nodes)</td></tr><tr><td>GAT</td><td>-</td><td>0.39 h</td><td>5.5 mins</td><td>70.89 ± 0.43</td></tr><tr><td>GraphMAE2</td><td>-</td><td>5.1 h</td><td>5.4 mins</td><td>70.46 ± 0.07</td></tr><tr><td>UniGraph</td><td>28.1 h</td><td>-</td><td>9.8 mins</td><td>72.15 ± 0.18</td></tr><tr><td>UniGraph2</td><td>5.2 h</td><td>-</td><td>5.7 mins</td><td>72.56 ± 0.15</td></tr><tr><td colspan="5">ogbn-Papers100M (111,059,956 nodes)</td></tr><tr><td>GAT</td><td>-</td><td>6.8 h</td><td>23.1 mins</td><td>65.98 ± 0.23</td></tr><tr><td>GraphMAE2</td><td>-</td><td>23.2 h</td><td>23.0 mins</td><td>61.97 ± 0.24</td></tr><tr><td>UniGraph</td><td>28.1 h</td><td>-</td><td>40.1 mins</td><td>67.89 ± 0.21</td></tr><tr><td>UniGraph2</td><td>5.2 h</td><td>-</td><td>24.8 mins</td><td>67.95 ± 0.11</td></tr></table>
370
+
371
+ to new datasets in the inference phase without requiring retraining. We compare of the training and inference costs of our model with other models. GAT [50] is a supervised trained GNN. GraphMAE2 [24] is a self-supervised learning method with GAT as the backbone network. UniGraph [23] is a graph foundation model for TAGs. We select ogbn-Arxiv and ogbn-Papers100M, two datasets of different scales for experiments. From the results in the Table 6, we observe that although UniGraph2 has a long pre-training time, its inference time on downstream datasets is comparable or shorter than the combined training and inference time of GNN-based methods. This advantage further increases with the size and potential quantity of downstream datasets.
372
+
373
+ # 6 Conclusion
374
+
375
+ UniGraph2 addresses the limitations of existing foundation models for multimodal graphs by introducing a novel unified embedding space that effectively integrates both multimodal information and graph structures. By employing modality-specific encoders, a graph neural network, and a Mixture of Experts module, UniGraph2 outperforms state-of-the-art models in tasks such as classification, transfer learning, and multimodal generation. Extensive experiments demonstrate the model's generalization capabilities across diverse graph domains and modalities, confirming its potential as a scalable and flexible solution for learning on multimodal graphs.
376
+
377
+ # Acknowledgments
378
+
379
+ This research is supported by the Ministry of Education, Singapore, under the Academic Research Fund Tier 2 (FY2025) (Award MOET2EP20124-0009).
380
+
381
+ # References
382
+
383
+ [1] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ige Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023).
384
+ [2] Tadas Baltrusaitis, Chaitanya Ahuja, and Louis-Philippe Morency. 2018. Multimodal machine learning: A survey and taxonomy. IEEE transactions on pattern analysis and machine intelligence 41, 2 (2018), 423-443.
385
+ [3] Hangbo Bao, Li Dong, Songhao Piao, and Furu Wei. 2021. BEiT: BERT Pre-Training of Image Transformers. In International Conference on Learning Representations.
386
+ [4] Monica Bianchini, Marco Gori, and Franco Scarselli. 2005. Inside pagerank. ACM Transactions on Internet Technology (TOIT) 5, 1 (2005), 92-128.
387
+ [5] Andrea Burns, Krishna Srinivasan, Joshua Ainslie, Geoff Brown, Bryan A Plummer, Kate Saenko, Jianmo Ni, and Mandy Guo. 2023. A suite of generative tasks for multi-level multimodal webpage understanding. arXiv preprint arXiv:2305.03668 (2023).
388
+ [6] Xiang Chen, Ningyu Zhang, Lei Li, Shumin Deng, Chuanqi Tan, Changliang Xu, Fei Huang, Luo Si, and Huajun Chen. 2022. Hybrid transformer with multi-level fusion for multimodal knowledge graph completion. In Proceedings of the 45th international ACM SIGIR conference on research and development in information retrieval. 904-915.
389
+ [7] Yulin Chen, Haoran Li, Yuan Sui, Yufei He, Yue Liu, Yangqiu Song, and Bryan Hooi. 2025. Can Indirect Prompt Injection Attacks Be Detected and Removed? arXiv preprint arXiv:2502.16580 (2025).
390
+ [8] Eli Chien, Wei-Cheng Chang, Cho-Jui Hsieh, Hsiang-Fu Yu, Jiong Zhang, Olgica Milenkovic, and Inderjit S Dhillon. 2022. Node Feature Extraction by Self-Supervised Multi-scale Neighborhood Prediction. In International Conference on Learning Representations.
391
+ [9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. In International Conference on Learning Representations.
392
+ [10] Yasha Ektefaie, George Dasoulas, Ayush Noori, Maha Farhat, and Marinka Zitnik. 2023. Multimodal learning with graphs. Nature Machine Intelligence 5, 4 (2023), 340-350.
393
+ [11] Mikhail Galkin, Xinyu Yuan, Hesham Mostafa, Jian Tang, and Zhaocheng Zhu. 2024. Towards foundation models for knowledge graph reasoning. ICLR (2024).
394
+ [12] Chen Gao, Xiang Wang, Xiangnan He, and Yong Li. 2022. Graph neural networks for recommender system. In Proceedings of the fifteenth ACM international conference on web search and data mining. 1623-1625.
395
+ [13] Hongcheng Gao, Yue Liu, Yufei He, Longxu Dou, Chao Du, Zhijie Deng, Bryan Hooi, Min Lin, and Tianyu Pang. 2025. FlowReasoner: Reinforcing Query-Level Meta-Agents. arXiv preprint arXiv:2504.15257 (2025).
396
+ [14] Johannes Gasteiger, Aleksandar Bojchevski, and Stephan Gunnemann. 2018. Predict then propagate: Graph neural networks meet personalized pagerank. International Conference on Learning Representations (2018).
397
+ [15] Rohit Girdhar, Alaaeldin El-Nouby, Zhuang Liu, Mannat Singh, Kalyan Vasudev Alwala, Armand Joulin, and Ishan Misra. 2023. Imagebind: One embedding space to bind them all. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 15180-15190.
398
+ [16] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. 2022. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 16000-16009.
399
+ [17] Xiaoxin He, Xavier Bresson, Thomas Laurent, Adam Perold, Yann LeCun, and Bryan Hooi. 2024. Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning. In *The Twelfth International Conference on Learning Representations*.
400
+ [18] Xiaoxin He, Xavier Bresson, Thomas Laurent, Adam Perold, Yann LeCun, and Bryan Hooi. 2024. Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning. International Conference on Learning Representations (2024).
401
+ [19] Xiaoxin He, Yijun Tian, Yifei Sun, Nitesh V Chawla, Thomas Laurent, Yann LeCun, Xavier Bresson, and Bryan Hooi. 2024. G-retriever: Retrieval-augmented generation for textual graph understanding and question answering. arXiv preprint arXiv:2402.07630 (2024).
402
+ [20] Yufei He, Zhenyu Hou, Yukuo Cen, Feng He, Xu Cheng, and Bryan Hooi. 2024. Generalizing Graph Transformers Across Diverse Graphs and Tasks via Pre-Training on Industrial-Scale Data. arXiv preprint arXiv:2407.03953 (2024).
403
+ [21] Yufei He, Yuexin Li, Jiaying Wu, Yuan Sui, Yulin Chen, and Bryan Hooi. 2025. Evaluating the Paperclip Maximizer: Are RL-Based Language Models More Likely to Pursue Instrumental Goals? arXiv preprint arXiv:2502.12206 (2025).
404
+ [22] Yufei He and Yao Ma. 2022. Sgkd: A scalable and effective knowledge distillation framework for graph representation learning. In 2022 IEEE International Conference on Data Mining Workshops (ICDMW). IEEE, 666-673.
405
+ [23] Yufei He, Yuan Sui, Xiaoxin He, and Bryan Hooi. 2024. UniGraph: Learning a Unified Cross-Domain Foundation Model for Text-Attributed Graphs.
406
+
407
+ arXiv:2402.13630 [cs.LG] https://arxiv.org/abs/2402.13630
408
+ [24] Zhenyu Hou, Yufei He, Yukuo Cen, Xiao Liu, Yuxiao Dong, Evgeny Kharlamov, and Jie Tang. 2023. Graphmae2: A decoding-enhanced masked self-supervised graph learner. In Proceedings of the ACM web conference 2023. 737-746.
409
+ [25] Zhenyu Hou, Haozhan Li, Yukuo Cen, Jie Tang, and Yuxiao Dong. 2024. GraphAlign: Pretraining One Graph Neural Network on Multiple Graphs via Feature Alignment. arXiv preprint arXiv:2406.02953 (2024).
410
+ [26] Zhenyu Hou, Xiao Liu, Yukuo Cen, Yuxiao Dong, Hongxia Yang, Chunjie Wang, and Jie Tang. 2022. Graphmae: Self-supervised masked graph autoencoders. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 594-604.
411
+ [27] Weihua Hu, Matthias Fey, Marinka Zitnik, Yuxiao Dong, Hongyu Ren, Bowen Liu, Michele Catasta, and Jure Leskovec. 2020. Open graph benchmark: Datasets for machine learning on graphs. Advances in neural information processing systems 33 (2020), 22118-22133.
412
+ [28] Qian Huang, Hongyu Ren, Peng Chen, Gregor Krzmanc, Daniel Zeng, Percy S Liang, and Jure Leskovec. 2024. Prodigy: Enabling in-context learning over graphs. Advances in Neural Information Processing Systems 36 (2024).
413
+ [29] Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, et al. [n.d.]. Perceiver IO: A General Architecture for Structured Inputs & Outputs. In International Conference on Learning Representations.
414
+ [30] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. 2021. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning. PMLR, 4904-4916.
415
+ [31] Wengong Jin, Kevin Yang, Regina Barzilay, and Tommi Jaakkola. 2019. Learning Multimodal Graph-to-Graph Translation for Molecule Optimization. In International Conference on Learning Representations.
416
+ [32] Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of NAACL-HLT. 4171-4186.
417
+ [33] Hao Liu, Jiarui Feng, Lecheng Kong, Ningyue Liang, Dacheng Tao, Yixin Chen, and Muhan Zhang. 2024. One for all: Towards training one graph model for all classification tasks. ICLR (2024).
418
+ [34] Hao Liu, Jiarui Feng, Lecheng Kong, Ningyue Liang, Dacheng Tao, Yixin Chen, and Muhan Zhang. 2024. One For All: Towards Training One Graph Model For All Classification Tasks. In The Twelfth International Conference on Learning Representations.
419
+ [35] Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. 2025. Efficient Chien for Large Reasoning Models: A Survey. arXiv preprint arXiv:2503.23077 (2025).
420
+ [36] Peter Lofgren, Siddhartha Banerjee, and Ashish Goel. 2016. Personalized pagerank estimation and search: A bidirectional approach. In Proceedings of the Ninth ACM International Conference on Web Search and Data Mining, 163-172.
421
+ [37] Péter Mernyei and Catalina Cangea. 2020. Wiki-cs: A wikipedia-based benchmark for graph neural networks. arXiv preprint arXiv:2007.02901 (2020).
422
+ [38] Niklas Muennighoff, Nouamane Tazi, Loic Magne, and Nils Reimers. 2022. MTEB: Massive text embedding benchmark. arXiv preprint arXiv:2210.07316 (2022).
423
+ [39] Jiquan Ngiam, Aditya Khosla, Mingyu Kim, Juhan Nam, Honglak Lee, and Andrew Y Ng. 2011. Multimodal deep learning. In Proceedings of the 28th international conference on machine learning (ICML-11). 689-696.
424
+ [40] Jiezhong Qiu, Qibin Chen, Yuxiao Dong, Jing Zhang, Hongxia Yang, Ming Ding, Kuansan Wang, and Jie Tang. 2020. GCC: Graph contrastive coding for graph neural network pre-training. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining. 1150-1160.
425
+ [41] Alec Radford. 2018. Improving language understanding by generative pretraining. (2018).
426
+ [42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning. PMLR, 8748-8763.
427
+ [43] J Ben Schafer, Joseph A Konstan, and John Riedl. 2001. E-commerce recommendation applications. Data mining and knowledge discovery 5 (2001), 115-153.
428
+ [44] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. 2017. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538 (2017).
429
+ [45] Yuan Sui, Yufei He, Tri Cao, Simeng Han, and Bryan Hooi. 2025. Meta-reasoner: Dynamic guidance for optimized inference-time reasoning in large language models. arXiv preprint arXiv:2502.19918 (2025).
430
+ [46] Yuan Sui, Yufei He, Zifeng Ding, and Bryan Hooi. 2024. Can knowledge graphs make large language models more trustworthy? an empirical study over open-ended question answering. arXiv preprint arXiv:2410.08085 (2024).
431
+ [47] Yuan Sui, Yufei He, Nian Liu, Xiaoxin He, Kun Wang, and Bryan Hooi. 2024. FiDeLiS: Faithful Reasoning in Large Language Model for Knowledge Graph Question Answering. arXiv preprint arXiv:2405.13873 (2024).
432
+ [48] Shantanu Thakoor, Coretin Tallec, Mohammad Gheshlaghi Azar, Rémi Munos, Petar Velicković, and Michal Valko. 2021. Bootstrapped representation learning
433
+
434
+ ![](images/86c5885c346cc710d9fd363f20dbebbe993d2222d0c95f1b0db7bcdd31d2762d.jpg)
435
+ Figure 2: UniGraph2 binds multimodal graphs from different graph domains to a unified embedding space, enabling diverse downstream tasks.
436
+
437
+ on graphs. In ICLR 2021 Workshop on Geometrical and Topological Representation Learning.
438
+ [49] A Vaswani. 2017. Attention is all you need. Advances in Neural Information Processing Systems (2017).
439
+ [50] Petar Velickovic, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. 2018. Graph Attention Networks. In International Conference on Learning Representations.
440
+ [51] Heng Wang, Shangbin Feng, Tianxing He, Zhaoxuan Tan, Xiaochuang Han, and Yulia Tsvetkov. 2024. Can language models solve graph problems in natural language? Advances in Neural Information Processing Systems 36 (2024).
441
+ [52] Haotao Wang, Ziyu Jiang, Yuning You, Yan Han, Gaowen Liu, Jayanth Srinivasa, Ramana Kompella, Zhangyang Wang, et al. 2024. Graph mixture of experts: Learning on large-scale graphs with explicit diversity modeling. Advances in Neural Information Processing Systems 36 (2024).
442
+ [53] Meiling Wang, Wei Shao, Shuo Huang, and Daoqiang Zhang. 2023. Hypergraph-regularized multimodal learning by graph diffusion for imaging genetics based alzheimer's disease diagnosis. Medical Image Analysis 89 (2023), 102883.
443
+ [54] Peng Wang, Shijie Wang, Junyang Lin, Shuai Bai, Xiaohuan Zhou, Jingren Zhou, Xinggang Wang, and Chang Zhou. 2023. One-peace: Exploring one general representation model toward unlimited modalities. arXiv preprint arXiv:2305.11172 (2023).
444
+ [55] Shirley Wu, Kaidi Cao, Bruno Ribeiro, James Zou, and Jure Leskovec. 2023. Graphmetro: Mitigating complex distribution shifts in gnns via mixture of aligned experts. arXiv preprint arXiv:2312.04693 (2023).
445
+ [56] Jun Xia, Chengshuai Zhao, Bozhen Hu, Zhangyang Gao, Cheng Tan, Yue Liu, Siyuan Li, and Stan Z Li. 2023. Mole-BERT: Rethinking Pre-training Graph Neural Networks for Molecules. In The Eleventh International Conference on Learning Representations.
446
+ [57] Minji Yoon, Jing Yu Koh, Bryan Hooi, and Ruslan Salakhutdinov. 2023. Multimodal graph learning for generative tasks. arXiv preprint arXiv:2310.07478 (2023).
447
+ [58] Yawen Zeng, Qin Jin, Tengfei Bao, and Wenfeng Li. 2023. Multi-modal knowledge hypergraph for diverse image retrieval. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. 3376-3383.
448
+ [59] Chenhui Zhang, Yufei He, Yukuo Cen, Zhenyu Hou, Wenzheng Feng, Yuxiao Dong, Xu Cheng, Hongyun Cai, Feng He, and Jie Tang. 2021. SCR: Training graph neural networks with consistency regularization. arXiv preprint arXiv:2112.04319 (2021).
449
+ [60] Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. 2022. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022).
450
+ [61] Yizhuo Zhang, Heng Wang, Shangbin Feng, Zhaoxuan Tan, Xiaochuang Han, Tianxing He, and Yulia Tsvetkov. 2024. Can LLM Graph Reasoning Generalize beyond Pattern Memorization? arXiv preprint arXiv:2406.15992 (2024).
451
+ [62] Haihong Zhao, Aochuan Chen, Xiangguo Sun, Hong Cheng, and Jia Li. 2024. All in one and one for all: A simple yet effective method towards cross-domain graph pretraining. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4443-4454.
452
+ [63] Jianan Zhao, Meng Qu, Chaozhuo Li, Hao Yan, Qian Liu, Rui Li, Xing Xie, and Jian Tang. 2022. Learning on Large-scale Text-attributed Graphs via Variational Inference. In The Eleventh International Conference on Learning Representations.
453
+ [64] Jing Zhu, Yuhang Zhou, Shengyi Qian, Zhongmou He, Tong Zhao, Neil Shah, and Danai Koutra. 2024. Multimodal Graph Benchmark. arXiv preprint arXiv:2406.16321 (2024).
454
+
455
+ # A Datasets
456
+
457
+ Cora [18]. The Cora dataset consists of 2708 scientific publications classified into one of seven classes - case based, genetic algorithms, neural networks, probabilistic methods, reinforcement learning, rule learning, and theory. The citation network consists of 5429 links. We collect raw text from [18].
458
+
459
+ PubMed [18]. The PubMed dataset consists of 19,717 scientific publications from PubMed database pertaining to diabetes classified into one of three classes - Experimental induced diabetes, Type 1 diabetes, and Type 2 diabetes. As in [33], we ask ChatGPT to generate a detailed description of each category. The citation network consists of 44,338 links. We collect raw text from [18].
460
+
461
+ ogbn-Arxiv [27]. The ogbn-arxiv dataset is a directed graph, representing the citation network between all Computer Science (CS) arXiv papers. Each node is an arXiv paper and each directed edge indicates that one paper cites another one. The task is to predict the 40 subject areas of arXiv CS papers, e.g., cs.AI, cs.LG, and cs.OS. We collect raw text from [27].
462
+
463
+ ogbn-Papers100M [27]. The ogbn-papers100M dataset is a directed citation graph of 111 million papers. We collect raw text from [27].
464
+
465
+ ogbn-Products [27]. The ogbn-products dataset is an undirected and unweighted graph, representing an Amazon product co-purchasing network. Nodes represent products sold in Amazon, and edges between two products indicate that the products are purchased together. The task is to predict the category of a product in a multiclass classification setup, where the 47 top-level categories are used for target labels. We collect raw text from [27].
466
+
467
+ Wiki-CS [33]. Wiki-CS is a Internet link network with each node represent a Wikipedia page and each edge represents the reference link. Each node's label corresponds to the category of the entry. We collect raw text from [33].
468
+
469
+ FB15K237 [33]. FB15K237 is a knowledge graph that contains knowledge base relation triples and textual mentions of Freebase entity pairs. We collect raw text from [33]. Given that we propose a self-supervised learning framework, and the edge text features are the labels to be predicted, we solely utilized node text features and did not employ edge text features.
470
+
471
+ WN18RR [33]. WN18RR is a knowledge graph, which is a subset of WordNet that consists of 11 relations and 40943 entities. We collect raw text from [33]. Given that we propose a self-supervised learning framework, and the edge text features are the labels to be predicted, we solely utilized node text features and did not employ edge text features.
472
+
473
+ Amazon-Sports [64]. Amazon-Sports is a link prediction dataset derived from the Amazon-Review dataset. In this dataset, each node represents a product within the sports category on Amazon, and the links signify whether two products are often purchased together. The textual features consist of product titles, while the visual features are raw high-resolution images of the products. We collect raw text and images from [64].
474
+
475
+ Amazon-Cloth [64]. Amazon-Cloth follows a similar structure to Amazon-Sports, but focuses on clothing products. The dataset uses co-purchase information from the clothes category on Amazon. The text features include product titles, such as "Nike Men's Revolution
476
+
477
+ Table 7: Statistics of all 14 multimodal graph datasets.
478
+
479
+ <table><tr><td>Dataset</td><td>Domain</td><td>Task</td><td>#Nodes</td><td>#Edges</td><td>Raw Features</td></tr><tr><td>Cora</td><td>Citation</td><td>Node</td><td>2,708</td><td>5,429</td><td>Paper Titles and Abstracts</td></tr><tr><td>PubMed</td><td>Citation</td><td>Node</td><td>19,717</td><td>44,338</td><td>Paper Titles and Abstracts</td></tr><tr><td>ogbn-Arxiv</td><td>Citation</td><td>Node</td><td>169,343</td><td>1,166,243</td><td>Paper Titles and Abstracts</td></tr><tr><td>ogbn-Papers100M</td><td>Citation</td><td>Node</td><td>111,059,956</td><td>1,615,685,872</td><td>Paper Titles and Abstracts</td></tr><tr><td>ogbn-Products</td><td>Product</td><td>Node</td><td>2,449,029</td><td>61,859,140</td><td>Product Descriptions</td></tr><tr><td>Wiki-CS</td><td>Wikipedia</td><td>Node</td><td>11,701</td><td>216,123</td><td>Wikipedia Entry Names and Contents</td></tr><tr><td>Ele-fashion</td><td>Product</td><td>Node</td><td>97,766</td><td>199,602</td><td>Fashion Titles and Fashion Images</td></tr><tr><td>Goodreads-NC</td><td>Book</td><td>Node</td><td>685,294</td><td>7,235,084</td><td>Book Descriptions and Book Images</td></tr><tr><td>FB15K237</td><td>Knowledge</td><td>Edge</td><td>14,541</td><td>310,116</td><td>Entity Names and Descriptions</td></tr><tr><td>WN18RR</td><td>Knowledge</td><td>Edge</td><td>40,943</td><td>93,003</td><td>Entity Names and Descriptions</td></tr><tr><td>Amazon-Sports</td><td>Product</td><td>Edge</td><td>50,250</td><td>356,202</td><td>Product Titles and Product Images</td></tr><tr><td>Amazon-Cloth</td><td>Product</td><td>Edge</td><td>125,839</td><td>951,271</td><td>Product Titles and Product Images</td></tr><tr><td>Goodreads-LP</td><td>Book</td><td>Edge</td><td>636,502</td><td>3,437,017</td><td>Book Descriptions and Book Images</td></tr><tr><td>WikiWeb2M</td><td>Wikipedia</td><td>Generative</td><td>600,000</td><td>-</td><td>Page Title, Section Titles, Section Text, Images</td></tr></table>
480
+
481
+ Table 8: Notation Table
482
+
483
+ <table><tr><td>Symbol</td><td>Description</td></tr><tr><td>G=(V,E,M,Ω)</td><td>A Multimodal Graph (MMG).</td></tr><tr><td>V</td><td>Set of nodes in the graph.</td></tr><tr><td>E</td><td>Set of edges in the graph.</td></tr><tr><td>Ω</td><td>Set of possible modalities (e.g., text, images).</td></tr><tr><td>M(v)</td><td>Function that maps each node v∈V to a subset of modalities Ωv⊆Ω.</td></tr><tr><td>GTAG=(V,E,M,{text})</td><td>Text-attributed graph where each node has an associated text feature.</td></tr><tr><td>f:Vk→Rd</td><td>Pre-trained model for representation learning, mapping nodes to a d-dimensional embedding space.</td></tr><tr><td>Hinf</td><td>Inference embeddings generated by applying the pre-trained model to a new graph.</td></tr><tr><td>xi(ω)</td><td>Feature vector for node vi from modality ω.</td></tr><tr><td>Ginf=(Vinf,Einf,Minf)</td><td>Inference graph where the pre-trained model generates embeddings for nodes.</td></tr><tr><td>Lfeature</td><td>Feature reconstruction loss for reconstructing masked node features.</td></tr><tr><td>LSPD</td><td>Shortest path distance reconstruction loss used for structural reconstruction.</td></tr><tr><td>λ</td><td>Mixing coefficient for combining feature and structure reconstruction losses.</td></tr></table>
484
+
485
+ 6 Road Running," and the visual features are the associated product images. We collect raw text and images from [64].
486
+
487
+ Goodreads-LP [64]. Goodreads-LP is based on the Goodreads Book Graph dataset. In this dataset, nodes correspond to books, and the links represent whether users who like one book are likely to enjoy another. Text features describe the books, and the visual features are book cover images. Books without images are excluded from the dataset. We collect raw text and images from [64].
488
+
489
+ Goodreads-NC [64]. Goodreads-NC is a node classification dataset also based on the Goodreads dataset. Here, each node represents a book, and the links signify whether users who liked one book will like another. The textual features describe the books, and the visual features are book cover images. Books lacking images are removed. We collect raw text and images from [64].
490
+
491
+ Ele-Fashion [64]. Ele-Fashion is a node classification dataset derived from the Amazon-Fashion dataset. In this dataset, each node represents a fashion product, and links indicate that users who buy one product are likely to purchase another. The textual features
492
+
493
+ are product titles, and the visual features consist of product images. We collect raw text and images from [64].
494
+
495
+ WikiWeb2M [5]. The WikiWeb2M dataset is designed for multimodal content understanding, using many-to-many text and image relationships from Wikipedia. It includes page titles, section titles, section text, images, and indices for each section.
496
+
497
+ # B Implementation Notes
498
+
499
+ Running environment. All experiments are conducted on Linux machine with 945G RAM, and 8 NVIDIA A100 with 40GB GPU memory. For software versions, we use Python 3.11, Pytorch 2.0.1, DGL 1.1.2, transformers 4.32.1 and CUDA 11.8. Our code and datasets will be available.
500
+
501
+ Hyper-parameters. The detailed pre-training hyper-parameters are listed in Table 9. For linear probing, we train the linear classifier using adam optimizer with lr=0.01 for 5000 epochs, and report the early-stopping results.
502
+
503
+ Table 9: Pre-training hyper-parameters for our framework.
504
+
505
+ <table><tr><td>mask rate</td><td>hidden_size</td><td>lr</td><td>weight Decay</td><td>dropout</td><td>optimizer</td><td>num_epochs</td><td>num_gnn_layers</td><td>ppr topk</td><td>num_experts</td><td>coefficient λ</td></tr><tr><td>0.8</td><td>1024</td><td>1e-3</td><td>0.01</td><td>0.4</td><td>adamw</td><td>5</td><td>4</td><td>256</td><td>8</td><td>0.1</td></tr></table>
506
+
507
+ Baselines. To have a fair comparison, we download the public source code. For methods can not scale, we adapt their code to integrate with sampling algorithms to run on large-scale graphs. The sources of the codes used are as follows:
508
+
509
+ - BRGL: https://github.com/Namkyeong/BGRL_Pytorch
510
+ GraphMAE2: https://github.com/THUDM/GraphMAE2
511
+ - GIANT-XRT: https://github.com/amzn/pecos/tree/mainline/examples/giant-xrt
512
+ - Prodigy: https://github.com/snap-stanford/prodigy
513
+ - OFA: https://github.com/LechengKong/OneForAll
514
+ - UniGraph: https://github.com/yf-he/UniGraph
515
+ - CLIP: https://github.com/openai/CLIP
516
+ - ImageBind: https://github.com/facebookresearch/ImageBind
517
+ - GCOPE: https://github.com/cshhzhao/gcope
518
+ - MMGL: https://github.com/minjiyoon/MMGL
519
+
520
+ Datasets splits. For Cora and PubMed, we follow commonly used data splits, using 20 labeled nodes per class as the training set, 30 nodes per class as the validation set, and the rest as the test set. We report the average accuracy on test set with 20 random initialization.
521
+
522
+ For Arxiv and Products, we follow the official splits [27]. Following the experimental procedure suggested by OGB, we repeat each experiment for 10 times with random seeds and report the average accuracy.
523
+
524
+ For Wiki-CS, we follow the official splits [37] with 20 different training splits, we report the average accuracy on the 20 different training splits with 20 random initialization. In each split, $5\%$ of the nodes in each class are used for training.
525
+
526
+ For FB15K237 and WN18RR, we follow splits in OFA [33]. For FB15K237, training set has 272115 edges, validation set has 17535 edges and test set has 20466 edges. For WN18RR, training set has 86835 edges, validation set has 3034 edges and test set has 3134 edges. We repeat each experiment for 10 times with random seeds and report the average accuracy.
527
+
528
+ For Amazon-Sports, Amazon-Cloth, Goodreads-LP, Goodreads-NC, and Ele-Fashion, we follow the official splits [64]. We repeat each experiment for 10 times with random seeds and report the average accuracy.
529
+
530
+ For WikiWeb2M, we follow the split and setting in MMGL [57].
531
+
532
+ Linear probing. The dataset $\mathcal{D}$ after generating embeddings, comprising embedding-label pairs $(h,y)$ , is divided into training, validation, and test sets. A linear classifier with weight matrix $W \in \mathbb{R}^{d \times |\mathcal{Y}|}$ is trained at top the embeddings from the frozen model, aiming to minimize the loss function $\mathcal{L}$ , typically cross-entropy, over the training set: $\min_{W} \sum_{(h,y) \in \mathcal{D}_{\text{train}}} \mathcal{L}(W \cdot h,y)$ . The performance of the model is evaluated based on a performance metric $\mathcal{M}$ which can be defined generically as $\mathcal{M}(\mathcal{D}_{\text{eval}}, f_{\theta}, W)$ , where $\mathcal{D}_{\text{eval}}$ refers to either the validation or test set.
533
+
534
+ Few-shot transfer. Our method follows in-context learning approach in UniGraph [23], and for baselines we either follow the same approach or use their already proposed in-context learning methods (Prodigy, OFA). We repeat each experiment for 10 times with random seeds and report the average accuracy. All the other experimental details (pre-training) follow those for the previous experiment (i.e., linear probing).
535
+
536
+ # C Mixture of Experts (MoE) in Graph Learning
537
+
538
+ Mixture of Experts (MoE) is a machine learning architecture that distributes the learning task across several specialized expert models. In various implementations of MoE in graph neural networks (GNNs), each expert model is typically responsible for learning specific components of the data or task, and a gating model selects which expert(s) to activate for each input, effectively combining their outputs. As in MoE in NLP, most MoE in graph learning are designed to improve efficiency in inference [52]. Other works also use MoE to handle different challenges like distribution shifts. In GraphMETRO [55], MoE addresses complex graph distribution shifts by assigning each expert to deal with a specific shift type, while a gating model selects the relevant experts to produce shift-invariant representations. GraphAlign [25] uses a feature normalization step and employs MoE at the input layer to assign nodes to experts, ensuring a unified distribution across graphs before GNN training. In this work, UniGraph2 employs MoE to align multimodal features (e.g., text, images) from various graph domains, ensuring coherent embeddings across modalities and domains.
2502.00xxx/2502.00806/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:764dea5de03277692a64d07eb26630bcf6ba903ed3c88b4893f87f4043d2ad5d
3
+ size 860283
2502.00xxx/2502.00806/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00816/48acc41e-0a24-436d-bbd7-69385dea3615_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00816/48acc41e-0a24-436d-bbd7-69385dea3615_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00816/48acc41e-0a24-436d-bbd7-69385dea3615_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27be04617f19e1a66c548afcf9488507887d8aad68ed6ce33cf5003c600ee0b0
3
+ size 4669519
2502.00xxx/2502.00816/full.md ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![](images/562cdbfb57956c60d6903a2e0a74cb8763fae5aff9776a9a4db06cf6d73a9e66.jpg)
2
+
3
+ # Sundial: A Family of Highly Capable Time Series Foundation Models
4
+
5
+ Yong Liu $^{*1}$ Guo Qin $^{*1}$ Zhiyuan Shi $^{1}$ Zhi Chen $^{1}$ Caiyin Yang $^{1}$ Xiangdong Huang $^{1}$ Jianmin Wang $^{1}$ Mingsheng Long $^{1}$
6
+
7
+ # Abstract
8
+
9
+ We introduce Sundial, a family of native, flexible, and scalable time series foundation models. To predict the next-patch's distribution, we propose a TimeFlow Loss based on flow-matching, which facilitates native pre-training of Transformers on continuous-valued time series without discrete tokenization. Conditioned on arbitrary-length time series, our models are pre-trained without specifying any prior distribution and can generate multiple probable predictions, achieving more flexibility in representation learning than using parametric densities. Towards time series foundation models, we leverage minimal but crucial adaptations of Transformers and curate TimeBench with one trillion time points, comprising mostly real-world datasets and synthetic data. By mitigating mode collapse via TimeFlow Loss, we pre-train a family of Sundial models on TimeBench, which achieve unprecedented model capacity and generalization performance. In addition to excellent scalability, Sundial achieves state-of-the-art results on both point and probabilistic forecasting benchmarks with a just-in-time inference speed, i.e., making zero-shot predictions within a few milliseconds. We believe that Sundial's pioneering generative forecasting capability can improve model reliability in real-world decision-making. Code is available at: https://github.com/thuml/Sundial.
10
+
11
+ # 1. Introduction
12
+
13
+ Time series forecasting has fascinated people for thousands of years. Although people have been able to determine the time using instruments like sundials in 3000 BC, time series forecasting is intrinsically non-deterministic (Box et al.,
14
+
15
+ *Equal contribution ${}^{1}$ School of Software,BNRist,Tsinghua University. Yong Liu $<$ liuyong21 $@$ mails.tsinghua.edu.cn>. Guo Qin <qinguo24 $@$ mails.tsinghua.edu.cn>. Correspondence to: Mingsheng Long <mingsheng@tsinghua.edu.cn>.
16
+
17
+ Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
18
+
19
+ ![](images/10e9f5fb4f3b440cd276ce305e881e28fbfaf6ce6f599a8d1314ba4420ca12de.jpg)
20
+ Figure 1. A native time series model operates on the original series of continuous values. A flexible foundation model is pre-trained without specifying prior distributions. Sundial is the first family of native and flexible time series foundation models.
21
+
22
+ 2015). Therefore, generating a variety of probable predictions is crucial for decision-making. The growing demand has facilitated numerous statistical approaches over the past decades (Hyndman, 2018; Box, 2013), which provide high-profile theories and probabilistic tools for making reliable schedules. Recent advancements bring the boom of deftly designed models that automatically learn intricate dynamics and correlations from raw data (Oreshkin et al., 2019; Nie et al., 2022; Zhang & Yan, 2023; Liu et al., 2023a). Despite the impressive performance, deep models necessitate task-specific training on sufficient in-distribution data. Motivated by advances in large models (Bommasani et al., 2021), pretrained time series foundation models have shown promising capabilities in out-of-distribution tasks (Das et al., 2023b; Liu et al., 2024b; Woo et al., 2024; Ansari et al., 2024).
23
+
24
+ Current research on time series foundation models has converged on building unified, scalable, and out-of-the-box forecasters, exhibiting zero-shot performance close to or sometimes surpassing supervised methods (Aksu et al., 2024). Notably, Transformers (Radford et al., 2018) are currently the de facto architecture of these models. While pre-trained Transformers with an inherent generative ability have facilitated great success in language, image, and video generation (Ramesh et al., 2021; OpenAI, 2023; Liu et al., 2024c), most time series foundation models are not "generative" or, more specifically, probabilistic forecasters, thereby limiting
25
+
26
+ reliability in decision-making. Although parametric densities specified with prior distributions (Wen et al., 2017; Woo et al., 2024) can be adopted to address uncertainty in time series forecasting, they can reduce the capacity of distributions learned by pre-trained models, especially on time series modality characterized by high heterogeneity. To learn arbitrarily intricate distributions without mode collapse, language modeling (Bengio et al., 2000) that learns the categorical distribution via cross-entropy loss inspires subsequent works (Gruver et al., 2023; Ansari et al., 2024), which treat time series as a foreign language using discrete tokenization. Still, discrepancies between continuous-valued time series and discrete language tokens can lead to out-of-vocabulary issues and coarse-grained prediction intervals.
27
+
28
+ As shown in Figure 1, Sundial is presented as the first family of generative models among time series foundation models. As foundation models intend to learn complicated distributions from extensive datasets and facilitate transferability across agnostic downstream datasets, we do not specify any prior parametric densities, such as unimodal and multimodal Gaussian mixtures. Instead, we delve into generative modeling to tame Transformers as native, flexible, and scalable time series foundation models. By comparing to denoising diffusion models (Li et al., 2024), we opt for a simple yet effective flow-matching framework (Lipman et al., 2022), which provides notable efficiency and sample quality (Tong et al., 2023). We propose TimeFlow Loss, a parameterized training objective (Zhang et al., 2018) for autoregressive models to learn and sample from each token's predictive distribution. Optimizing models in the original continuous-valued domain, TimeFlow Loss facilitates patch-level generation and enables fast inference, which is naturally compatible with the time series modality.
29
+
30
+ In addition to TimeFlow, we enhance the Transformer with minimal but critical adaptations. We develop feasible patch tokenization for arbitrary-length input time series. We adopt RoPE (Su et al., 2024), Pre-LN (Xiong et al., 2020), FlashAttention (Dao et al., 2022), and KV Cache (Pope et al., 2023), which are crucial but generally neglected in the development of time series foundation models. Besides, we pre-train our models by multi-patch prediction to reduce autoregression steps. We realize a rapid generation of multiple samples by reusing a shared lookback representation. Beyond facilitating scalable pre-training, these adaptations help real-time long-context inference and long-term generation.
31
+
32
+ To validate the scaling law of time series foundation models, we collect and curate TimeBench with an unprecedented volume of a trillion time points. We present Sundial as a family of highly capable foundation models, which achieve state-of-the-art on three large-scale and best-recognized benchmarks, including Time-Series-Library (TSLib) (Wu et al., 2022), GIFT-Eval (Aksu et al., 2024), and FEV (Ansari et al., 2024).
33
+
34
+ Our contributions lie in these aspects:
35
+
36
+ - We propose TimeFlow Loss to predict next-patch's distribution, allowing Transformers to be trained without discrete tokenization and make probable predictions.
37
+ - We present Sundial, a family of scalable and efficient time series foundation models built upon our enhanced Transformer and pre-trained on a trillion time points.
38
+ - Experimentally, Sundial achieves state-of-the-art zero-shot performance on point and probabilistic forecasting benchmarks, including TSLib, GIFT-Eval, and FEV, indicating a promising generative approach for the future improvement of time series foundation models.
39
+
40
+ # 2. Related Work
41
+
42
+ # 2.1. Time Series Forecasting
43
+
44
+ Forecasting is essential for decision-making. Advancements in deep learning for time series include theory-inspired deep modules (Wu et al., 2021; Liu et al., 2023b; Wu et al., 2022), architecture-oriented adaptations (Bai et al., 2018; Salinas et al., 2020; Lim et al., 2021), and time series preprocessing (Kim et al., 2021; Nie et al., 2022). Deep models learn the dataset-level distribution and benefit from strong generalization and model capacity. Statistical methods conduct case-by-case fitting on input series, achieving notable performance on small data (Ke et al., 2017; Hyndman, 2018).
45
+
46
+ One of the efforts towards more capable forecasters focuses on the foundation models (Bommasani et al., 2021), which address data-scarce scenarios by pre-training. More capable models support zero-shot forecasting, making inferences as fast as statistical methods and possessing large model capacity as deep models. Another aspect is to address uncertainty in time series forecasting. There is a growing research emphasis on probabilistic forecasting (Woo et al., 2024; Ansari et al., 2024). While parametric densities can be adopted as training objectives of probabilistic forecasting, they can be too specific to meet the heterogeneity of large-scale datasets, resulting in mode collapse in representation learning and over-smooth predictions (Figure 14-15). In this work, we introduce generative time series foundation models, which naturally address the uncertainty in forecasting.
47
+
48
+ # 2.2. Time Series Foundation Models
49
+
50
+ Recent research has concentrated on building versatile large time series models (Liang et al., 2024). With the advances made in large language models, Transformer has become the dominant architecture. Several works adapt Transformers to address the unique 2D-dimensionality and heterogeneity of time series (Woo et al., 2024; Liu et al., 2024a). Specifically, our work delves into tokenization and optimization.
51
+
52
+ Models such as TimesFM (Das et al., 2023b), Timer (Liu et al., 2024a;b), and Time-MoE (Shi et al., 2024b) embed continuous values and fit unimodal distributions via MSE or quantile loss (Wen et al., 2017). However, prior loss may result in mode collapse because predictive distributions are highly divergent across different domains. Besides, these models cannot provide the confidence level of predictions, limiting reliability for decision-making. Based on continuous tokenization, Moirai (Woo et al., 2024) presents a probabilistic model learning a mixture of distributions, but this prior can still fail to accommodate complex distributions. Inspired by language modeling, Chronos (Ansari et al., 2024) discretizes series via bucket quantization, learning more flexible categorical distributions by cross-entropy. Still, discrete tokenization is applied at each time point, which can lead to long contexts. Also, the final performance can be sensitive to quantization techniques. Unlike before, we tame Transformers as native time series foundation models, learning flexible distributions without discrete tokenization.
53
+
54
+ # 2.3. Generative Modeling for Time Series
55
+
56
+ By addressing complicated distributions during pre-training, generative modeling has become a focal point in the development of various foundation models (Zhao et al., 2023; Liu et al., 2024c). While this direction for time series mostly focused on time series generation (Tashiro et al., 2021) and task-specific forecasters (Rasul et al., 2021; Shen & Kwok, 2023; Kollovieh et al., 2024), generative modeling for time series foundation models is hardly explored. With the comparable flexibility in distribution learning as language modeling, diffusion denoising (Sohl-Dickstein et al., 2015) and flow-matching (Lipman et al., 2022) have gained increasing prevalence in continuous-valued modalities (Lipman et al., 2024). Compared with diffusion denoising models, flow-matching provides a simple yet efficient framework. With fewer steps involved in the forward and reverse processes, large models based on flow-matching have shown superior performance in image generation (Esser et al., 2024).
57
+
58
+ Despite the connection in value continuity, generating images and future time series are fundamentally different tasks due to the autoregressive property of forecasting. Our proposed TimeFlow Loss is designed for autoregressive models to conduct conditional generation, which is a parameterized loss function (Zhang et al., 2018) for arbitrary distributions and enhances representation learning of foundation models.
59
+
60
+ # 3. Preliminaries
61
+
62
+ # 3.1. Flow-Matching
63
+
64
+ The goal of generative modeling is to learn the underlying probability distribution that generates the data. The framework of flow-matching transforms a sample $\mathbf{x}_0\sim p_0$ drawn
65
+
66
+ from a source distribution into a sample $\mathbf{x}_1\sim p_1$ drawn from a target distribution. The transformation is continuous in time. For $d$ -dimensional distributions, it is defined by a time-dependent velocity field $u_{t}:[0,1]\times \mathbb{R}^{d}\to \mathbb{R}^{d}$ , which is the solution of the ordinary differential equation (ODE):
67
+
68
+ $$
69
+ \frac {\mathrm {d}}{\mathrm {d} t} \psi_ {t} (\mathbf {x}) = u _ {t} \left(\psi_ {t} (\mathbf {x})\right) \text {a n d} \psi_ {0} (\mathbf {x}) = \mathbf {x}.
70
+ $$
71
+
72
+ The velocity field $u_{t}$ determines a flow $\psi_t$ . For all $t \in [0,1]$ , $\psi_t$ generates the probability path $p_t$ that interpolates $p_0$ and $p_1$ , i.e., $\mathbf{x}_t = \psi_t(\mathbf{x}_0) \sim p_t$ for $\mathbf{x}_0 \sim p_0$ . The implementation of flow-matching is to train a network $u_t^\theta$ parametrized by $\theta$ to fit the velocity field $u_t$ , which is a regression-based task formulated as the Flow-Matching objective:
73
+
74
+ $$
75
+ \mathcal {L} _ {\mathrm {F M}} (\theta) = \mathbb {E} _ {t, \mathbf {x} _ {t}} \left\| u _ {t} ^ {\theta} \left(\mathbf {x} _ {t}\right) - u _ {t} \left(\mathbf {x} _ {t}\right) \right\| ^ {2}.
76
+ $$
77
+
78
+ Furthermore, Lipman et al. (2022) proved the equivalence of optimizing the Conditional Flow-Matching objective:
79
+
80
+ $$
81
+ \mathcal {L} _ {\mathrm {C F M}} (\theta) = \mathbb {E} _ {t, \mathbf {x} _ {t}, \mathbf {x} _ {1}} \left\| u _ {t} ^ {\theta} (\mathbf {x} _ {t}) - u _ {t} (\mathbf {x} _ {t} | \mathbf {x} _ {1}) \right\| ^ {2}.
82
+ $$
83
+
84
+ Leveraging the conditional optimal-transport (linear) path and a source Gaussian, the objective can be formulated as:
85
+
86
+ $$
87
+ \mathcal {L} _ {\mathrm {C F M}} ^ {\text {G a u s s}} (\theta) = \mathbb {E} _ {t, \epsilon , \mathbf {x} _ {1}} \left\| u _ {t} ^ {\theta} \left(\mathbf {x} _ {t}\right) - \left(\mathbf {x} _ {1} - \mathbf {x} _ {0}\right) \right\| ^ {2}. \tag {1}
88
+ $$
89
+
90
+ where $t\sim \mathcal{U}[0,1],\mathbf{x}_0\sim \mathcal{N}(0,1)$ and $\mathbf{x}_t = t\mathbf{x}_1 + (1 - t)\epsilon$
91
+
92
+ Consequently, we can train a generative network on given samples from the target distribution, and generate new samples by applying a push-forward process on samples drawn from a simple source Gaussian distribution:
93
+
94
+ $$
95
+ \mathbf {x} _ {t + \Delta t} - \mathbf {x} _ {t} = u _ {t} ^ {\theta} (\mathbf {x} _ {t}) \Delta t, \mathbf {x} _ {0} \sim \mathcal {N} (\mathbf {0}, \mathbf {I}), t \in [ 0, 1 ]. \tag {2}
96
+ $$
97
+
98
+ # 3.2. Generative Models for Probabilistic Forecasting
99
+
100
+ Given a historical observation $x_{1:t} = \{x_1, \dots, x_t\}$ , the target of time series forecasting is to predict future time series $x_{t+1:t+f} = \{x_{t+1}, \dots, x_{t+f}\}$ . The task can be generally formulated as $p(x_{t+1:t+f}|\mathbf{h}_t)$ , where $\mathbf{h}_t = f_\phi(x_{1:t})$ is the learned representation from a deep model $f_\phi$ . In probabilistic forecasting, explicit optimization objectives are utilized to predict the statistics of future series, e.g., MSE or quantile loss, which have specified $p$ as a prior distribution. While using one parametric density generally fits well on a small amount of data, it can be the major bottleneck for scaling time series foundation models. Inspired by the success of large generative models (Rombach et al., 2022; OpenAI, 2023; Esser et al., 2024), we introduce generative modeling to realize probabilistic forecasting:
101
+
102
+ $$
103
+ p _ {\theta} \left(x _ {t + 1: t + f} \mid \mathbf {h} _ {t}\right) = g _ {\theta} \left(f _ {\phi} \left(x _ {1: t}\right)\right). \tag {3}
104
+ $$
105
+
106
+ $g_{\theta}$ is a small trainable generative network conditioned on the learned representations of $f_{\phi}$ , which is jointly optimized
107
+
108
+ ![](images/301a3a59b6839a1965d7c0afa6fdd0441923a4521268494bd8ac586cbc451bfd.jpg)
109
+ Figure 2. Overall architecture of Sundial. The input time series is divided into patch tokens, which are embedded from original continuous values. The patch embeddings are fed into a decoder-only Transformer, a stable and speedup version that learns token representations via causal self-attention. The model is optimized using our TimeFlow Loss, a parameterized loss function that models per-token probability distribution conditioned on the learned representations, and generates multiple plausible predictions under the flow-matching framework.
110
+
111
+ with $f_{\phi}$ . While the generative model automatically fits the target distribution, it can sample raw predictions and calculating their statistics for probabilistic forecasting. The aim is conceptually related to conformal prediction (Vovk et al., 2005) but models uncertainty beyond prediction intervals.
112
+
113
+ # 4. Approach
114
+
115
+ In this work, we conduct a univariate pre-training paradigm, which adopts the S3 format proposed by Liu et al. (2024b) to address multivariate data. To mitigate value range discrepancy, we conduct normalization on time series individually per variable. Afterwards, we sample varying-length training samples with the maximum context length of 2880. As a foundation model, Sundial is required to predict on out-of-distribution series with varied lengths during inference.
116
+
117
+ # 4.1. Sundial
118
+
119
+ As shown in Figure 2, the Sundial models consist of three parts: (1) time series tokenization, including a context-level re-normalization and a patch embedding that addresses any-length time series, (2) a Transformer backbone that learns the per-token representation of time series, and (3) Time-Flow Loss, a parameterized loss function to model the per-token distribution and generate raw series during inference. Intuitively, Sundial can be regarded as an ARMA (Auto-Regression and Moving-Average) deep model, i.e., Transformer learns token representations autoregressively. Conditioned on the lookback representations, TimeFlow transforms random noises into non-deterministic predictions.
120
+
121
+ # 4.1.1. TIME SERIESTOKENIZATION
122
+
123
+ Re-Normalization We adopt stationarization (Liu et al., 2022), a non-parametric two-stage instance normalization conducted within each sample, which is initially proposed to mitigate non-stationarity of time series. Here, it helps to ad
124
+
125
+ dress temporal distribution shift and outlier ranges in input series, improving generalizability for zero-shot forecasting.
126
+
127
+ Patch Embedding Given a univariate time series $\mathbf{X} = \{x_{1},\ldots ,x_{T}\}$ , it is divided into patches $\mathbf{x}_i = x_{1 + (i - 1)P:iP}$ with the length of $P$ . To address non-divisible length, we pad the input at the beginning and use a binary mask $\mathbf{m}_i\in \mathbb{R}^P$ for each patch to indicate the padded position. It will lead to $N = \lceil T / P\rceil$ such input tokens. Subsequently, we use a shared MLP: $\mathbb{R}^{2P}\mapsto \mathbb{R}^D$ to embed all patch tokens:
128
+
129
+ $$
130
+ \mathbf {h} _ {i} = \operatorname {P a t c h E m b e d} \left(\operatorname {C o n c a t} \left(\mathbf {x} _ {i}, \mathbf {m} _ {i}\right)\right), \tag {4}
131
+ $$
132
+
133
+ where $\mathbf{h}_i\in \mathbb{R}^D$ and $D$ is the dimension of token embedding. Unlike point-level quantization (Ansari et al., 2024), we reserve original values without discrete quantization. It also reduces the context length (in token) of the Transformer.
134
+
135
+ # 4.1.2. TRANSFORMER BACKBONE
136
+
137
+ Given $N$ token embeddings $\{\mathbf{h}_i\}$ , we adopt several crucial adaptations on a decoder-only Transformer to obtain pertoken representations aggregated from all previous tokens. First, we adapt Pre-LN (Xiong et al., 2020) to improve pretraining stability. Second, we leverage a causal self-attention mechanism with RoPE (Su et al., 2024) that introduces the position information of patch tokens. It can be formulated as follows (the layer index is omitted for simplicity):
138
+
139
+ $$
140
+ \mathcal {A} _ {i j} = \mathbf {h} _ {i} ^ {\top} \mathbf {W} _ {\mathbf {q}} \mathbf {R} _ {\Theta , i - j} \mathbf {W} _ {\mathbf {k}} ^ {\top} \mathbf {h} _ {j},
141
+ $$
142
+
143
+ $$
144
+ \operatorname {A t t e n t i o n} (\mathbf {H}) = \operatorname {S o f t m a x} \left(\frac {\operatorname {M a s k} (\mathcal {A})}{\sqrt {d}}\right) \mathbf {H} \mathbf {W} _ {\mathbf {v}}, \tag {5}
145
+ $$
146
+
147
+ where $\mathbf{W}_{\mathbf{q}}, \mathbf{W}_{\mathbf{k}}, \mathbf{W}_{\mathbf{v}} \in \mathbb{R}^{D \times d}$ project token embeddings $\mathbf{H} = \{\mathbf{h}_i\}$ into $d$ -dimensional queries, keys, and values. $\mathbf{R}_{\Theta, t} \in \mathbb{R}^{d \times d}$ is the rotary matrix with rotation degree $(t \cdot \Theta)$ . Lastly, we implement FlashAttention (Dao et al., 2022) and KV Cache (Pope et al., 2023), since these enhancements for deployment are increasingly emphasized in large foundation models (Shoeybi et al., 2019; Rasley et al., 2020).
148
+
149
+ # 4.1.3. TIMEFLOW LOSS
150
+
151
+ Given representations $\{\mathbf{h}_i\}$ extracted by the last layer of the Transformer, we aim to generate length- $F$ predictions $\widehat{\mathbf{y}}_i = \widehat{x}_{1 + iP,F + iP}$ at each position $i$ via our autoregressive model. Motivated by the empirical observation that a larger patch size improves the performance in decoder-only Transformers (Das et al., 2023b) while a small patch size can be more flexible to accommodate data of different frequencies, we adopt multi-patch predictions $(F > P)$ for pre-training, which also reduces the steps of autoregressive inference.
152
+
153
+ Based on Equations 1 and 3, we formulate a new generative forecasting conditioned on a sequential representation $\mathbf{h}_i$ :
154
+
155
+ $$
156
+ \mathcal {L} (\theta , \mathbf {h} _ {i}) = \mathbb {E} _ {t, \epsilon , \mathbf {y} _ {i}} \left\| u _ {t} ^ {\theta} \left(\mathbf {y} _ {i} ^ {(t)} \mid \mathbf {h} _ {i}\right) - \left(\mathbf {y} _ {i} - \mathbf {y} _ {i} ^ {(0)}\right) \right\| ^ {2}. \tag {6}
157
+ $$
158
+
159
+ where $\mathbf{y}_i\in \mathbb{R}^F$ is the groundtruth value and $\mathbf{y}_i^{(0)}$ is a $d$ -dimensional Gaussian noise, $t$ is sampled from $\mathcal{U}[0,1]$ , and $\mathbf{y}_i^{(t)} = t\mathbf{y}_i + (1 - t)\mathbf{y}_i^{(0)}$ is constructed by the conditional optimal-transport path. It is important to note that the conditional representation $\mathbf{h}_i$ differs from the conditional path and the conditional source distribution. Instead, $\mathbf{h}_i$ is a condition of position $i$ , also a time-invariant condition of the whole flow-matching process $t\in [0,1]$ . Technically, we implement the flow-matching network by a small MLP:
160
+
161
+ $$
162
+ u _ {t} ^ {\theta} \left(\mathbf {y} _ {i} ^ {(t)} \mid \mathbf {h} _ {i}\right) = \operatorname {F M - N e t} \left(\mathbf {y} _ {i} ^ {(t)}, t, \mathbf {h} _ {i}\right). \tag {7}
163
+ $$
164
+
165
+ The training process involves sampling the noised $\mathbf{y}_i^{(t)}$ , and jointly input it with $t$ . The condition $\mathbf{h}_i$ is integrated into the flow-matching network via AdaLN (Peebles & Xie, 2023). TimeFlow Loss for autoregressive models is formulated as:
166
+
167
+ $$
168
+ \mathcal {L} _ {\text {T i m e F l o w}} = \sum_ {i = 1} ^ {N} \left\| \operatorname {F M - N e t} \left(\mathbf {y} _ {i} ^ {(t)}, t, \mathbf {h} _ {i}\right) - \left(\mathbf {y} _ {i} - \mathbf {y} _ {i} ^ {(0)}\right) \right\| ^ {2}. \tag {8}
169
+ $$
170
+
171
+ Inference Based on Equation 2, the push-forward process conditioned on a learned representation $\mathbf{h}_i$ is formulated as
172
+
173
+ $$
174
+ \mathbf {y} _ {i} ^ {(t + \Delta t)} = \mathbf {y} _ {i} ^ {(t)} + u _ {t} ^ {\theta} \left(\mathbf {y} _ {i} ^ {(t)} | \mathbf {h} _ {i}\right) \Delta t. \tag {9}
175
+ $$
176
+
177
+ Technically, we adopt a $K$ -step uniform trajectory, and set $\Delta t = 1 / K$ . The sampling is done via starting from an initial Gaussian noise and advancing with the velocity generated by the trained FM-Net iteratively, as shown in Algorithm 1.
178
+
179
+ This procedure generates a predicted sample $\widehat{\mathbf{y}}_i$ at position $i$ . To calibrate probabilistic forecasting results during inference, we repeat this procedure using different initial noises and estimate statistics such as the median and quantiles from a set of generated predictions. We implement an efficient repeated-sampling in the TimeFlow module. The condition (representation) of lookback series is shared and reused for different initial noises, thereby reducing the overhead of repeated forwarding in the Transformer backbone.
180
+
181
+ # Algorithm 1 TimeFlow Loss: Sampling
182
+
183
+ Require: condition $\mathbf{h}_i\in \mathbb{R}^D$ , path steps $K$
184
+
185
+ 1: Sample initial noise $\widehat{\mathbf{y}}_i\sim \mathcal{N}(\mathbf{0},\mathbf{I})$
186
+ 2: $\Delta t = 1 / K$
187
+ 3: for $k$ in $\{0, 1, \dots, K - 1\}$ do
188
+ 4: $\widehat{\mathbf{y}}_i\gets \widehat{\mathbf{y}}_i + \mathrm{FM - Net}\left(\widehat{\mathbf{y}}_i,k\Delta t,\mathbf{h}_i\right)\Delta t$
189
+ 5: end for
190
+ 6: Return: $\widehat{\mathbf{y}}_i$
191
+
192
+ # 4.2. TimeBench
193
+
194
+ We collected and curated TimeBench, which comprises over a trillion time points from various sources, as shown in Figure 3. Several datasets originate from research teams (Woo et al., 2024; Ansari et al., 2024; Liu et al., 2024a;b). While most datasets are collected from real-world records, a small portion $(0.05\%)$ is generated synthetically to enhance pattern diversity, following KernelSynth proposed by Ansari et al. (2024). We also leverage substantial meteorological data (Hersbach et al., 2020) because of the predictability of weather systems. Data of different frequencies encompasses common and comprehensive temporal dynamics.
195
+
196
+ ![](images/b3605a42b8de75b5b1634f59d3711da7ed720aaaaf215e821f17ec27a1838a70.jpg)
197
+ Figure 3. Ratios of data sources in TimeBench, the pre-training corpora of Sundial. Detailed statistics are provide in Table 4.
198
+
199
+ # 5. Experiments
200
+
201
+ We evaluate Sundial on best-recognized zero-shot forecasting benchmarks (Section 5.1) and investigate the scaling behavior of Sundial (Section 5.2). We compare TimeFlow with other training objectives (Section 5.3). We delve into test-time calibration of generative forecasters (Section 5.4). We conduct model adaptation of Sundial, i.e., instruction tuning (Section 5.5) and provide in-depth ablation studies to evaluate our modular enhancement (Section 5.6).
202
+
203
+ # 5.1. Time Series Forecasting
204
+
205
+ In this section, we focus on zero-shot forecasting, we compare Sundial with advanced time series foundation models on various benchmarks, including (1) point forecasting: we
206
+
207
+ Table 1. Zero-shot forecasting results of time series foundation models on long-term forecasting datasets (Time-Series-Library) (Wu et al., 2022). Corresponding prediction lengths include {96, 192, 336, 720}. A lower MSE or MAE indicates a better prediction. Averaged results of four prediction lengths are reported here. $1^{\text{st}}$ Count represents the number of wins achieved by a model under all prediction lengths and datasets. Results of baseline models are officially reported by Shi et al. (2024b). Datasets in pre-training are not evaluated on corresponding models, which are denoted by the dash $(-)$ . Full results under all prediction lengths are provided in Table 9.
208
+
209
+ <table><tr><td>Models</td><td colspan="2">SundialSmall(Ours)</td><td colspan="2">SundialBase(Ours)</td><td colspan="2">SundialLarge(Ours)</td><td colspan="2">Time-MoEBase(2024b)</td><td colspan="2">Time-MoELarge(2024b)</td><td colspan="2">Time-MoEUltra(2024b)</td><td colspan="2">Timer-XL(2024a)</td><td colspan="2">MoiraiBase(2024)</td><td colspan="2">MoiraiLarge(2024)</td><td colspan="2">ChronosBase(2024)</td><td colspan="2">ChronosLarge(2024)</td><td colspan="2">TimesFM(2023b)</td></tr><tr><td>Metric</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td></tr><tr><td>ETTm1</td><td>0.354</td><td>0.388</td><td>0.336</td><td>0.377</td><td>0.331</td><td>0.369</td><td>0.394</td><td>0.415</td><td>0.376</td><td>0.405</td><td>0.356</td><td>0.391</td><td>0.373</td><td>0.392</td><td>0.406</td><td>0.385</td><td>0.422</td><td>0.391</td><td>0.645</td><td>0.500</td><td>0.555</td><td>0.465</td><td>0.433</td><td>0.418</td></tr><tr><td>ETTm2</td><td>0.265</td><td>0.324</td><td>0.258</td><td>0.320</td><td>0.254</td><td>0.315</td><td>0.317</td><td>0.365</td><td>0.316</td><td>0.361</td><td>0.288</td><td>0.344</td><td>0.273</td><td>0.336</td><td>0.311</td><td>0.337</td><td>0.329</td><td>0.343</td><td>0.310</td><td>0.350</td><td>0.295</td><td>0.338</td><td>0.328</td><td>0.346</td></tr><tr><td>ETTh1</td><td>0.390</td><td>0.418</td><td>0.411</td><td>0.434</td><td>0.395</td><td>0.420</td><td>0.400</td><td>0.424</td><td>0.394</td><td>0.419</td><td>0.412</td><td>0.426</td><td>0.404</td><td>0.417</td><td>0.417</td><td>0.419</td><td>0.480</td><td>0.439</td><td>0.591</td><td>0.468</td><td>0.588</td><td>0.466</td><td>0.473</td><td>0.443</td></tr><tr><td>ETTh2</td><td>0.340</td><td>0.387</td><td>0.333</td><td>0.387</td><td>0.334</td><td>0.387</td><td>0.366</td><td>0.404</td><td>0.405</td><td>0.415</td><td>0.371</td><td>0.399</td><td>0.347</td><td>0.388</td><td>0.362</td><td>0.382</td><td>0.367</td><td>0.377</td><td>0.405</td><td>0.410</td><td>0.455</td><td>0.427</td><td>0.392</td><td>0.406</td></tr><tr><td>ECL</td><td>0.169</td><td>0.265</td><td>0.169</td><td>0.265</td><td>0.166</td><td>0.262</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.174</td><td>0.278</td><td>0.187</td><td>0.274</td><td>0.186</td><td>0.270</td><td>0.214</td><td>0.278</td><td>0.204</td><td>0.273</td><td>-</td><td>-</td></tr><tr><td>Weather</td><td>0.233</td><td>0.271</td><td>0.234</td><td>0.270</td><td>0.238</td><td>0.275</td><td>0.265</td><td>0.297</td><td>0.270</td><td>0.300</td><td>0.256</td><td>0.288</td><td>0.256</td><td>0.294</td><td>0.287</td><td>0.281</td><td>0.264</td><td>0.273</td><td>0.292</td><td>0.315</td><td>0.279</td><td>0.306</td><td>-</td><td>-</td></tr><tr><td>\( 1^{st} \) Count</td><td>7</td><td>2</td><td>8</td><td>5</td><td>16</td><td>16</td><td>0</td><td>1</td><td>0</td><td>0</td><td>2</td><td>1</td><td>1</td><td>3</td><td>0</td><td>2</td><td>0</td><td>6</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td></td></tr></table>
210
+
211
+ Table 2. GIFT-Eval comprises 23 datasets characterized by a variety of frequencies, variate numbers, and prediction lengths. We evaluate zero-shot performance using 100 generated series, being consistent with Woo et al. (2024). A lower MASE or CRPS indicates a better performance. Rank assigns a numerical ranking of all 97 configurations. Baseline results are officially reported by Aksu et al. (2024).
212
+
213
+ <table><tr><td>Type</td><td colspan="4">Statistical Methods</td><td colspan="5">Task-Specific Models (Superwised)</td><td colspan="5">Time Series Foundation Models (Zero-Shot)</td></tr><tr><td>Model</td><td>Naïve</td><td>Seasonal Naïve</td><td>Auto ARIMA</td><td>Auto Theta</td><td>DeepAR (2020)</td><td>TiDE (2023a)</td><td>N-BEATS (2019)</td><td>PTST. (2022)</td><td>iTTrans. (2023a)</td><td>TimesFM (2023b)</td><td>TabPFN (2025)</td><td>Chronos (2024)</td><td>Moirai (2024)</td><td>Sundial (Ours)</td></tr><tr><td>MASE</td><td>1.260</td><td>1.000</td><td>0.964</td><td>0.978</td><td>1.206</td><td>0.980</td><td>0.842</td><td>0.762</td><td>0.802</td><td>0.680</td><td>0.748</td><td>0.786</td><td>0.809</td><td>0.673</td></tr><tr><td>CRPS</td><td>1.383</td><td>1.000</td><td>0.770</td><td>1.051</td><td>0.721</td><td>0.652</td><td>0.689</td><td>0.496</td><td>0.524</td><td>0.465</td><td>0.480</td><td>0.551</td><td>0.515</td><td>0.472</td></tr><tr><td>Rank</td><td>28.072</td><td>26.175</td><td>21.515</td><td>24.031</td><td>18.938</td><td>18.557</td><td>21.381</td><td>10.052</td><td>11.320</td><td>8.237</td><td>8.268</td><td>14.309</td><td>10.175</td><td>9.062</td></tr></table>
214
+
215
+ adopt the long-term forecasting benchmark (Wu et al., 2022), which assesses the performance under different forecasting horizons using MSE and MAE; (2) probabilistic forecasting: we experiment on GIFT-Eval (Aksu et al., 2024) and FEV leaderboard (Ansari et al., 2024), following their official evaluation suite and assessing point (MASE) and probabilistic (CRPS and WQL) metrics. All evaluated datasets are excluded from the pre-training dataset. Model is available on HuggingFace $^1$ and configurations are detailed in Table 5.
216
+
217
+ # 5.1.1. POINT FORECASTING
218
+
219
+ As shown in Table 1, Sundial consistently outperforms other advanced time series foundation models. Compared with the previous state-of-the-art model Time-MoE (Shi et al., 2024b), the Sundial family using fewer parameters achieves the average MSE reduction of $7.57\%$ and averaged MAE reduction of $4.71\%$ . Notably, continuous tokenization allows our model to conduct patch-level forecasting with fewer autoregression steps, while Chronos using point-wise discrete tokenization may not be suitable in long-term forecasting.
220
+
221
+ # 5.1.2. PROBABILISTIC FORECASTING
222
+
223
+ Beyond point forecasting, Sundial possesses a unique generative capability for making probabilistic predictions. Fol
224
+
225
+ lowing Ansari et al. (2024), we calculate the median and quantiles using a set of raw predictions of Sundial. While several baseline models have been pre-trained by the consistent objective function for probabilistic evaluation, e.g., quantile loss for WQL, Sundial calculates these statistics for evaluation without any prior knowledge.
226
+
227
+ GIFT-Eval Aggregated results are presented in Table 2. The benchmark evaluates performance from 23 datasets and 13 baseline models, encompassing statistical methods, task-specific models, and time series foundation models. Among supervised models and advanced foundation models, Sundial attains the first place in MASE and second place in CRPS on all unseen datasets. While the top PatchTST (Nie et al., 2022) is exhaustively trained and tweaked on each dataset, the zero-shot performance of Sundial highlights its simplicity and robustness on this comprehensive benchmark.
228
+
229
+ FEV Leaderboard We evaluate our Sundial on the open leaderboard established by AutoGluon (Ansari et al., 2024), which includes 27 datasets for probabilistic forecasting. As shown in Figure 4, the zero-shot forecasting performance of Sundial exceeds $70\%$ statistical methods and deep models that are superwisedly trained in distribution. While Sundial is ranked as the second zero-shot pre-trained models after Chronos, Sundial realizes $35\times$ inference speedup as shown in Figure 5. Based on patch-wise tokenization and multi
230
+
231
+ ![](images/d52c29d62d7ea5b58098faa6d0ee5832444f138c28d3ceb639b6a535c335fb62.jpg)
232
+ Figure 4. Model evaluation on the FEV leaderboard, which includes 27 datasets not seen by Sundial. Baseline models can be categorized into statistical methods fitting on each time series, task-specific deep models trained on each dataset, and pre-trained foundation models. Pre-trained Models that have seen several datasets during pre-training are denoted as Pre-trained Models (Other). A lower MASE/WQL indicates a better result. Sundial makes probabilistic predictions using 20 generated series, being consistent with Ansari et al. (2024).
233
+
234
+ ![](images/7cf7895d5ff881971d5a74c45c2c28ae3238914f0d26b50602cfdfd16e753399.jpg)
235
+
236
+ patch prediction, our inference speed is near to N-BEATS.
237
+
238
+ Besides, we provide qualitative showcases in Appendix D. TimeFlow can generate highly eventful and coherent temporal patterns with input series. Beyond the mean or quantiles, our model enables the estimation of arbitrary statistics by sampling directly from the predictive distribution.
239
+
240
+ ![](images/aaf3e08bc86398b1b49876024543bfaad3554e26728a0ab627c5e04e516200f6.jpg)
241
+ Figure 5. Inference time evaluation following Ansari et al. (2024), which is averaged from the FEV leaderboard. Computing resources of different models are marked. We plot the logarithmic x-axis.
242
+
243
+ # 5.2. Scalability
244
+
245
+ From Table 1, the larger Sundial model consistently achieves better performance with the scaling of parameters. Beyond downstream performance, we delve into the utilization of model capacity. Figure 6 shows training curves of different
246
+
247
+ sizes. Compared to Sundial (Small), the large version leads to $15.38\%$ reduction in the converged training loss, exhibiting promising model capacity of generative forecasters.
248
+
249
+ ![](images/5aec01cecc64524c6e204b69bb814756cf376660100ac9e956d141f14e772fbd.jpg)
250
+ Figure 6. Training curves on TimeBench of different model sizes.
251
+
252
+ # 5.3. TimeFlow Loss
253
+
254
+ Based on the flow-matching framework, TimeFlow Loss allows autoregressive models to learn and generate flexible distributions while enhancing representation learning. To validate the effectiveness of this design, we implement two alternatives: (1) an MLP network and MSE Loss and (2) a parameterized training objective based on the denoising diffusion procedure (Li et al., 2024). We adopt the same parameterized network and Transformer backbone and pretrain them on TimeBench. Since the converged training loss is not comparable across different objective functions, we compare zero-shot performance in Table 3. Despite allowing for sampling predictions, performance using diffusion-based objective is notably inferior to TimeFlow Loss.
255
+
256
+ In addition to zero-shot performance, we provide showcases
257
+
258
+ Table 3. Zero-shot performance using different training objectives. We use the same model configuration and pre-training scale. Averaged MSE of four prediction lengths are reported here.
259
+
260
+ <table><tr><td>Objective</td><td>ETTm1</td><td>ETTm2</td><td>ETTh1</td><td>ETTh2</td><td>ECL</td><td>Weather</td><td>Avg.</td></tr><tr><td>TimeFlow</td><td>0.336</td><td>0.258</td><td>0.411</td><td>0.333</td><td>0.169</td><td>0.234</td><td>0.290</td></tr><tr><td>Diffusion</td><td>0.362</td><td>0.265</td><td>0.444</td><td>0.360</td><td>0.202</td><td>0.252</td><td>0.314</td></tr><tr><td>MSE</td><td>0.360</td><td>0.264</td><td>0.404</td><td>0.341</td><td>0.175</td><td>0.231</td><td>0.296</td></tr></table>
261
+
262
+ for quality evaluations in Appendix D.2. Pre-trained models optimized by the specific MSE Loss can only output a single prediction. And the prediction is sometimes over-smooth due to mode collapse (refer to Appendix C.1). Instead, generative modeling can accommodate significantly different future variations even if their lookback series are similar. We also provide a probabilistic metric CRPS to compare different objectives in Table 7, which validate that the predictive distribution modeled by TimeFlow is more coherent and diverse than counterpart training objectives. It benefits downstream tasks by generating multiple plausible predictions, conveys various future possibilities and enhances the reliability of decision-making.
263
+
264
+ # 5.4. Test-Time Calibration
265
+
266
+ Generative modeling facilitates the flexibility to calibrate the final prediction during inference. Based on the median-based forecasting strategy, i.e., starting from multiple noise of a standard Gaussian and calculating the median of raw predictions, there are two configurations to calibrate final predictions: (1) the number of samples to calculate statistics and (2) sampling steps $K$ used for flow-matching. Figure 7 shows the results using different configurations.
267
+
268
+ The top two figures conform to the central limit theorem. Generating more samples leads to more calibrated estimation of prediction and confidence interval. The bottom two figures indicate that using fine-grained steps during the pushforward process can lead to more precise predictions.
269
+
270
+ The trade-off between inference time and performance reveals the potential of test-time calibration, which does not require retraining models. The generative capability of Sundial provides flexibility for various use cases requiring different levels of uncertainty. In our experiments, sampling 20 predictions with each generated by 50 steps consumes nearly one second on a CPU, which is notably more efficient than tuning deep models or statistical methods. Advanced strategies of sampling and post-processing of raw prediction leave interesting directions for future exploration.
271
+
272
+ # 5.5. Model Adaptation
273
+
274
+ Inspired by the prevalence of instruction tuning (Wei et al., 2021) that adapts foundation models on a collection of tasks.
275
+
276
+ ![](images/ec7d5ed2be31b7dfa1ddc7fe1979aa895d921481724de74088dc7977e2752f62.jpg)
277
+
278
+ ![](images/cd1d212fa6b295c5206ed40d475cabfb72a5877c402f7e34cc9d57fc06342157.jpg)
279
+
280
+ ![](images/3c399b3acc475537f95ec046f3d6589170aa1debd8f013316a3b4e1af399aec1.jpg)
281
+ Figure 7. We show the MASE (left) and WQL (right) on FEV w.r.t. the number of generated raw predictions (top) and the steps to sample a prediction (down). More predictions or more sampling steps generally achieve better probabilistic metrics.
282
+
283
+ ![](images/1b14de3f8e3aeb83fa9a62bc94d4df37d72179d3da9d8782e4b19b2f5ae43a41.jpg)
284
+
285
+ We fine-tune pre-trained Sundial (Base) on the FEV leaderboard, including short-term tasks with different prediction lengths. Our model is tuned once on all aggregated datasets. We evaluate the performance on unseen test splits (Figure 8). We observe that the performance can be further improved compared to zero-shot forecasting. Furthermore, training from scratch on aggregated datasets results in inferior performance, implying knowledge transfer in pre-trained models.
286
+
287
+ ![](images/c3bc81e893210c097481d24124111093f470fef01e68dd50b02428289faaa72b.jpg)
288
+ Figure 8. Performance on the FEV leaderboard, including (1) training Sundial from scratch on all datasets from the FEV leaderboard, (2) zero-shot forecasting using pre-trained Sundial, and (3) finetuning once on all datasets from the FEV leaderboard.
289
+
290
+ # 5.6. Ablation Study
291
+
292
+ We conducted several ablation studies that provide insights into the enhancement made to Sundial's architecture. We evaluate the overall zero-shot performance on TSLib, which covers six different datasets and four prediction lengths.
293
+
294
+ ![](images/03ae2574de35198df42a142ba9522c98d8f0a2da98402cc5c67b960af8b6f688.jpg)
295
+ (a) RoPE
296
+
297
+ ![](images/531c0feda15566b22f201fd0bde120fcc868f0d6b6b9820f0100dfbea34068e6.jpg)
298
+ (b) LayerNorm
299
+
300
+ ![](images/095a0c5c59d2612f11d78b06d5533d417277a2736607e2710f0c19f90956e9e6.jpg)
301
+ (c) FlashAttention
302
+ Figure 9. Ablation studies with respect to architectural enhancements. We report the averaged results of TSLib datasets (Wu et al., 2022) from four prediction lengths {96, 192, 336, 720} and all six datasets. The context length is set to 2880 and the patch length is 16.
303
+
304
+ ![](images/3e34134be3ffd4970222378717feef1f77185cec105cba6ae335e6054a402e7c.jpg)
305
+ (d) KV Cache
306
+
307
+ RoPE Prior research (Liu et al., 2024a) observed that the introduction of RoPE (Su et al., 2024) yields better results in supervised forecasting tasks. As shown in Figure 9 (a), RoPE can also improve zero-shot forecasting, presenting a general enhancement for time series foundation models.
308
+
309
+ Layer Normalization Pre-LN (Baevski & Auli, 2018) is widely adopted in large language models (Touvron et al., 2023) due to the training stability. As depicted in Figure 9 (b), training with Pre-LN for more iterations yield better performance. In contrast, training with Post-LN, which is the predominant choice in supervised models, may adversely affect downstream results.
310
+
311
+ FlashAttention and KV Cache We make it to leverage FlashAttention (Dao et al., 2022) and KV Cache to reduce the computational costs. As shown in Figure 9 (c) and (d), they notably reduce $14.8\%$ memory footprint and $43.6\%$ inference time without affecting performance.
312
+
313
+ # 6. Conclusion
314
+
315
+ In this work, we collect and curate TimeBench, a trillion-scale time series dataset for building time series foundation models, which can benefit the research community. Towards time series foundation models, we delve into tokenization and optimization, presenting contributions in two aspects. First, we demonstrate that continuous tokenization, such as patch tokens, can be more effective and efficient for the time series modality, and generative modeling presents a native approach for learning on continuous-valued time series. Second, we propose a novel training objective to accommodate heterogeneous time series distribution. It endows autoregressive models with an inherent capability to sample
316
+
317
+ from non-categorical distribution. Our pre-trained Sundial models make substantial advances on best-recognized forecasting leaderboards. We hope this work can inspire future paradigms for pre-training time series foundation models and enhance their applicability to real-world applications.
318
+
319
+ # Acknowledgements
320
+
321
+ This work was supported by the National Natural Science Foundation of China (U2342217 and 62021002), State Grid Ningxia Electric Power Co. Science and Technology Project (SGNXYX00SCJS2400058), and the BNRist Innovation Fund (BNR2024RC01010), the National Engineering Research Center for Big Data Software.
322
+
323
+ We extend our gratitude to Xingzhuo Guo for his expertise in flow-matching and meticulous proofreading of the method section. We further thank Jialong Wu, Yuezhou Ma and Yu Zhang for insightful discussions about generative models. Their collective support significantly enhanced this work.
324
+
325
+ # Impact Statement
326
+
327
+ This paper aims to advance the development of time series foundation models. We curated a pre-training dataset from publicly available resources. Our models employ an efficient tokenization and incorporate a generative training objective. The proposed TimeFlow Loss provides insights for training generative foundation models for time series forecasting. We released our pre-trained models that demonstrate notable zero-shot forecasting performance. The generative forecasting paradigm enhances the model reliability for decision-making. Our paper mainly focuses on scientific research and has no obvious negative social impact.
328
+
329
+ # References
330
+
331
+ Aksu, T., Woo, G., Liu, J., Liu, X., Liu, C., Savarese, S., Xiong, C., and Sahoo, D. Gift-eval: A benchmark for general time series forecasting model evaluation. In NeurIPS Workshop on Time Series in the Age of Large Models, 2024.
332
+ Ansari, A. F., Stella, L., Turkmen, C., Zhang, X., Mercado, P., Shen, H., Shchur, O., Rangapuram, S. S., Arango, S. P., Kapoor, S., et al. Chronos: Learning the language of time series. arXiv preprint arXiv:2403.07815, 2024.
333
+ Baevski, A. and Auli, M. Adaptive input representations for neural language modeling. arXiv preprint arXiv:1809.10853, 2018.
334
+ Bai, S., Kolter, J. Z., and Koltun, V. An empirical evaluation of generic convolutional and recurrent networks for sequence modeling. arXiv preprint arXiv:1803.01271, 2018.
335
+ Bengio, Y., Ducharme, R., and Vincent, P. A neural probabilistic language model. Advances in neural information processing systems, 13, 2000.
336
+ Bommasani, R., Hudson, D. A., Adeli, E., Altman, R., Arora, S., von Arx, S., Bernstein, M. S., Bohg, J., Bosse-lut, A., Brunskill, E., et al. On the opportunities and risks of foundation models. arXiv preprint arXiv:2108.07258, 2021.
337
+ Box, G. Box and jenkins: time series analysis, forecasting and control. In A Very British Affair: Six Britons and the Development of Time Series Analysis During the 20th Century, pp. 161-215. Springer, 2013.
338
+ Box, G. E., Jenkins, G. M., Reinsel, G. C., and Ljung, G. M. Time series analysis: forecasting and control. John Wiley & Sons, 2015.
339
+ Dao, T., Fu, D., Ermon, S., Rudra, A., and Ré, C. Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems, 35:16344-16359, 2022.
340
+ Das, A., Kong, W., Leach, A., Sen, R., and Yu, R. Long-term forecasting with tide: Time-series dense encoder. arXiv preprint arXiv:2304.08424, 2023a.
341
+ Das, A., Kong, W., Sen, R., and Zhou, Y. A decoder-only foundation model for time-series forecasting. arXiv preprint arXiv:2310.10688, 2023b.
342
+ Esser, P., Kulal, S., Blattmann, A., Entezari, R., Müller, J., Saini, H., Levi, Y., Lorenz, D., Sauer, A., Boesel, F., et al. Scaling rectified flow transformers for high-resolution image synthesis. In *Forty-first International Conference on Machine Learning*, 2024.
343
+
344
+ Goldberger, A. L., Amaral, L. A., Glass, L., Hausdorff, J. M., Ivanov, P. C., Mark, R. G., Mietus, J. E., Moody, G. B., Peng, C.-K., and Stanley, H. E. Physiobank, physi toolkit, and physionet: components of a new research resource for complex physiologic signals. circulation, 101(23): e215-e220, 2000.
345
+ Goswami, M., Szafer, K., Choudhry, A., Cai, Y., Li, S., and Dubrawski, A. Moment: A family of open time-series foundation models. arXiv preprint arXiv:2402.03885, 2024.
346
+ Gruver, N., Finzi, M., Qiu, S., and Wilson, A. G. Large language models are zero-shot time series forecasters. arXiv preprint arXiv:2310.07820, 2023.
347
+ Gruver, N., Finzi, M., Qiu, S., and Wilson, A. G. Large language models are zero-shot time series forecasters. Advances in Neural Information Processing Systems, 36, 2024.
348
+ Hersbach, H., Bell, B., Berrisford, P., Hirahara, S., Horányi, A., Muñoz-Sabater, J., Nicolas, J., Peubey, C., Radu, R., Schepers, D., et al. The era5 global reanalysis. Quarterly Journal of the Royal Meteorological Society, 146(730): 1999-2049, 2020.
349
+ Hoo, S. B., Müller, S., Salinas, D., and Hutter, F. The tabular foundation model tabpfn outperforms specialized time series forecasting models based on simple features. arXiv preprint arXiv:2501.02945, 2025.
350
+ Hyndman, R. Forecasting: principles and practice. OTexts, 2018.
351
+ Ke, G., Meng, Q., Finley, T., Wang, T., Chen, W., Ma, W., Ye, Q., and Liu, T.-Y. Lightgbm: A highly efficient gradient boosting decision tree. Advances in neural information processing systems, 30, 2017.
352
+ Kim, T., Kim, J., Tae, Y., Park, C., Choi, J.-H., and Choo, J. Reversible instance normalization for accurate time-series forecasting against distribution shift. In International Conference on Learning Representations, 2021.
353
+ Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
354
+ Kollovieh, M., Lienen, M., Ludke, D., Schwinn, L., and Gunnemann, S. Flow matching with gaussian process priors for probabilistic time series forecasting. arXiv preprint arXiv:2410.03024, 2024.
355
+ Li, T., Tian, Y., Li, H., Deng, M., and He, K. Autoregressive image generation without vector quantization. arXiv preprint arXiv:2406.11838, 2024.
356
+
357
+ Liang, Y., Wen, H., Nie, Y., Jiang, Y., Jin, M., Song, D., Pan, S., and Wen, Q. Foundation models for time series analysis: A tutorial and survey. In Proceedings of the 30th ACM SIGKDD conference on knowledge discovery and data mining, pp. 6555-6565, 2024.
358
+ Lim, B., Arik, S. Ö., Loeff, N., and Pfister, T. Temporal fusion transformers for interpretable multi-horizon time series forecasting. International Journal of Forecasting, 37(4):1748-1764, 2021.
359
+ Lipman, Y., Chen, R. T., Ben-Hamu, H., Nickel, M., and Le, M. Flow matching for generative modeling. arXiv preprint arXiv:2210.02747, 2022.
360
+ Lipman, Y., Havasi, M., Holderrieth, P., Shaul, N., Le, M., Karrer, B., Chen, R. T., Lopez-Paz, D., Ben-Hamu, H., and Gat, I. Flow matching guide and code. arXiv preprint arXiv:2412.06264, 2024.
361
+ Liu, Y., Wu, H., Wang, J., and Long, M. Non-stationary transformers: Exploring the stationarity in time series forecasting. Advances in Neural Information Processing Systems, 35:9881-9893, 2022.
362
+ Liu, Y., Hu, T., Zhang, H., Wu, H., Wang, S., Ma, L., and Long, M. itransformer: Inverted transformers are effective for time series forecasting. arXiv preprint arXiv:2310.06625, 2023a.
363
+ Liu, Y., Li, C., Wang, J., and Long, M. Koopa: Learning nonstationary time series dynamics with koopman predictors. arXiv preprint arXiv:2305.18803, 2023b.
364
+ Liu, Y., Qin, G., Huang, X., Wang, J., and Long, M. Timer-xl: Long-context transformers for unified time series forecasting. arXiv preprint arXiv:2410.04803, 2024a.
365
+ Liu, Y., Zhang, H., Li, C., Huang, X., Wang, J., and Long, M. Timer: Generative pre-trained transformers are large time series models. In *Forty-first International Conference on Machine Learning*, 2024b.
366
+ Liu, Y., Zhang, K., Li, Y., Yan, Z., Gao, C., Chen, R., Yuan, Z., Huang, Y., Sun, H., Gao, J., et al. Sora: A review on background, technology, limitations, and opportunities of large vision models. arXiv preprint arXiv:2402.17177, 2024c.
367
+ Muñoz-Sabater, J., Dutra, E., Agustí-Panareda, A., Albergel, C., Arduino, G., Balsamo, G., Bouissetta, S., Choulga, M., Harrigan, S., Hersbach, H., et al. Era5-land: A state-of-the-art global reanalysis dataset for land applications. Earth system science data, 13(9):4349-4383, 2021.
368
+ Nie, Y., Nguyen, N. H., Sinthong, P., and Kalagnanam, J. A time series is worth 64 words: Long-term forecasting with transformers. arXiv preprint arXiv:2211.14730, 2022.
369
+
370
+ OpenAI, R. Gpt-4 technical report. arxiv 2303.08774. View in Article, 2:13, 2023.
371
+ Oreshkin, B. N., Carpov, D., Chapados, N., and Bengio, Y. N-beats: Neural basis expansion analysis for interpretable time series forecasting. arXiv preprint arXiv:1905.10437, 2019.
372
+ Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019.
373
+ Peebles, W. and Xie, S. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4195-4205, 2023.
374
+ PEMS. Traffic Dataset. http://pems.dot.ca.gov/.
375
+ Pope, R., Douglas, S., Chowdhery, A., Devlin, J., Bradbury, J., Heek, J., Xiao, K., Agrawal, S., and Dean, J. Efficiently scaling transformer inference. Proceedings of Machine Learning and Systems, 5:606-624, 2023.
376
+ Radford, A., Narasimhan, K., Salimans, T., Sutskever, I., et al. Improving language understanding by generative pre-training. OpenAI, 2018.
377
+ Ramesh, A., Pavlov, M., Goh, G., Gray, S., Voss, C., Radford, A., Chen, M., and Sutskever, I. Zero-shot text-to-image generation. In International conference on machine learning, pp. 8821-8831. Pmlr, 2021.
378
+ Rasley, J., Rajbhandari, S., Ruwase, O., and He, Y. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 3505-3506, 2020.
379
+ Rasul, K., Seward, C., Schuster, I., and Vollgraf, R. Autoregressive denoising diffusion models for multivariate probabilistic time series forecasting. In International Conference on Machine Learning, pp. 8857-8868. PMLR, 2021.
380
+ Rasul, K., Ashok, A., Williams, A. R., Khorasani, A., Adamopoulos, G., Bhagwatkar, R., Biloš, M., Ghonia, H., Hassen, N. V., Schneider, A., et al. Lag-llama: Towards foundation models for time series forecasting. arXiv preprint arXiv:2310.08278, 2023.
381
+ Rombach, R., Blattmann, A., Lorenz, D., Esser, P., and Ommer, B. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10684-10695, 2022.
382
+
383
+ Salinas, D., Flunkert, V., Gasthaus, J., and Januschowski, T. Deepar: Probabilistic forecasting with autoregressive recurrent networks. International journal of forecasting, 36(3):1181-1191, 2020.
384
+ Shen, L. and Kwok, J. Non-autoregressive conditional diffusion models for time series prediction. In International Conference on Machine Learning, pp. 31016-31029. PMLR, 2023.
385
+ Shi, J., Ma, Q., Ma, H., and Li, L. Scaling law for time series forecasting. arXiv preprint arXiv:2405.15124, 2024a.
386
+ Shi, X., Wang, S., Nie, Y., Li, D., Ye, Z., Wen, Q., and Jin, M. Time-moe: Billion-scale time series foundation models with mixture of experts. arXiv preprint arXiv:2409.16040, 2024b.
387
+ Shoeybi, M., Patwary, M., Puri, R., LeGresley, P., Casper, J., and Catanzaro, B. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053, 2019.
388
+ Sohl-Dickstein, J., Weiss, E., Maheswaranathan, N., and Ganguli, S. Deep unsupervised learning using nonequilibrium thermodynamics. In International conference on machine learning, pp. 2256-2265. PMLR, 2015.
389
+ Su, J., Ahmed, M., Lu, Y., Pan, S., Bo, W., and Liu, Y. Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063, 2024.
390
+ Tashiro, Y., Song, J., Song, Y., and Ermon, S. Ccsi: Conditional score-based diffusion models for probabilistic time series imputation. Advances in Neural Information Processing Systems, 34:24804-24816, 2021.
391
+ Tong, A., Fatras, K., Malkin, N., Huguet, G., Zhang, Y., Rector-Brooks, J., Wolf, G., and Bengio, Y. Improving and generalizing flow-based generative models with minibatch optimal transport. arXiv preprint arXiv:2302.00482, 2023.
392
+ Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.-A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.
393
+ Vovk, V., Gammerman, A., and Shafer, G. Algorithmic learning in a random world, volume 29. Springer, 2005.
394
+ Wei, J., Bosma, M., Zhao, V. Y., Guu, K., Yu, A. W., Lester, B., Du, N., Dai, A. M., and Le, Q. V. Finetuned language models are zero-shot learners. arXiv preprint arXiv:2109.01652, 2021.
395
+ Wen, R., Torkkola, K., Narayanaswamy, B., and Madeka, D. A multi-horizon quantile recurrent forecaster. arXiv preprint arXiv:1711.11053, 2017.
396
+
397
+ Woo, G., Liu, C., Kumar, A., Xiong, C., Savarese, S., and Sahoo, D. Unified training of universal time series forecasting transformers. arXiv preprint arXiv:2402.02592, 2024.
398
+ Wu, H., Xu, J., Wang, J., and Long, M. Autoformer: Decomposition transformers with auto-correlation for long-term series forecasting. Advances in Neural Information Processing Systems, 34:22419-22430, 2021.
399
+ Wu, H., Hu, T., Liu, Y., Zhou, H., Wang, J., and Long, M. Timesnet: Temporal 2d-variation modeling for general time series analysis. arXiv preprint arXiv:2210.02186, 2022.
400
+ Xiong, R., Yang, Y., He, D., Zheng, K., Zheng, S., Xing, C., Zhang, H., Lan, Y., Wang, L., and Liu, T. On layer normalization in the transformer architecture. In International Conference on Machine Learning, pp. 10524-10533. PMLR, 2020.
401
+ Zhang, R., Isola, P., Efros, A. A., Shechtman, E., and Wang, O. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 586-595, 2018.
402
+ Zhang, Y. and Yan, J. Crossformer: Transformer utilizing cross-dimension dependency for multivariate time series forecasting. In The eleventh international conference on learning representations, 2023.
403
+ Zhao, W. X., Zhou, K., Li, J., Tang, T., Wang, X., Hou, Y., Min, Y., Zhang, B., Zhang, J., Dong, Z., et al. A survey of large language models. arXiv preprint arXiv:2303.18223, 2023.
404
+
405
+ # A. Dataset Statistics
406
+
407
+ Large-scale datasets are of paramount importance for pre-training foundation models. Recent research has contributed significant time series datasets (Das et al., 2023b; Liu et al., 2024b; Shi et al., 2024b). While the scaling law of time series foundation models has been explored in the recent work (Shi et al., 2024a), the pre-training scale remains relatively limited. Given the heterogeneity of time series compared to other modalities, it raises the question of whether it is feasible to learn from enormous series. To address the question, we curated TimeBench with a trillion time points from various domains.
408
+
409
+ Unlike other modalities, most time series are unavailable on open websites or repositories. There are also limited domains that encompass typical and predictable time series, leading to slow progress on dataset construction. Therefore, we conducted tedious preprocessing, including missing values imputation, abnormalities exclusion, and normalization techniques. We conducted statistical analysis, examining time series through the lenses of intrinsic properties, e.g., non-stationarity, forecastability, and seasonality. This approach allows us to characterize the data quality inherent to time series, which affects the training stability of next-token prediction. We also adopt synthetic techniques to improve pattern diversity. Further, we adopt ERA5 (Muñoz-Sabater et al., 2021), including systematic real-world temporal observations.
410
+
411
+ The statistical details of TimeBench are summarized in Table 4. In addition to open-source datasets from research teams on time series foundation models (Woo et al., 2024; Ansari et al., 2024; Liu et al., 2024b;a), we collected substantial real-world time series from various domains such as finance, IoT, meteorology, and healthcare (Goldberger et al., 2000). These resources enable us to construct large-scale time-series corpora exceeding a trillion time points. The corpora include highly credible and predictable data with a wide range of frequencies, lengths, and numbers of variates, providing comprehensive temporal dynamics and variation patterns to facilitate downstream applications. To prevent data leakage, we exclude all datasets evaluated in Section 5.1 to make sure that Sundial conducts zero-shot forecasting.
412
+
413
+ Table 4. Key statistics of TimeBench, the pre-training dataset of Sundial.
414
+
415
+ <table><tr><td>Source</td><td>Chronos (2024)</td><td>ECG (2000)</td><td>Finance (Ours)</td><td>IoT (Ours)</td><td>LOSTA (2024)</td><td>Synthetic (2024)</td><td>ERA5 3h (2021)</td><td>ERA 12h (2021)</td><td>ERA5 Daily (2021)</td><td>ERA5 Weekly (2021)</td><td>ERA5 Monthly (2021)</td><td>ERA5 Quarterly (2021)</td><td>Total</td></tr><tr><td rowspan="2"># Pts. %</td><td>94B</td><td>48B</td><td>10.5B</td><td>5.8B</td><td>230B</td><td>0.5B</td><td>129B</td><td>32B</td><td>406B</td><td>58B</td><td>13.5B</td><td>4.5B</td><td>1032B</td></tr><tr><td>9.11 %</td><td>4.65 %</td><td>1.02 %</td><td>0.56 %</td><td>22.29 %</td><td>0.05 %</td><td>12.50 %</td><td>3.10 %</td><td>39.35 %</td><td>5.62 %</td><td>1.31 %</td><td>0.44 %</td><td>100%</td></tr></table>
416
+
417
+ # B. Implementation Details
418
+
419
+ All experiments are implemented using PyTorch (Paszke et al., 2019) and executed with 32 NVIDIA A100 GPUs. We employ the AdamW optimizer (Kingma & Ba, 2014) for model optimization. We adopt S3 format (Liu et al., 2024b) for univariate pre-training. During training, data from different domains is sampled according to a predefined ratio to balance the domain weightings and ensure diversity in the training data. We implement a global shuffle strategy by loading time series into a standard parquet format. We use variable-wise normalization to unify the scope of values.
420
+
421
+ On the FEV leaderboard (Ansari et al., 2024), which consists of short-term forecasting datasets, we train Sundial models by TimeFlow Loss with the prediction length of $F = 16$ . For the point forecasting (Wu et al., 2022) and GIFT-Eval (Aksu et al., 2024), which consist of forecasting datasets with a prediction length ranging from 6 to 900, we train Sundial models by TimeFlow Loss with the prediction length of $F = 720$ . For the required prediction length less than the model prediction length, we truncate the output generated by Sundial. For the required length more than the prediction horizon, we conduct rolling forecasting. Following Chronos (Ansari et al., 2024), we sample 20 raw predictions to calculate MASE and WQL on FEV. Being consistent to Moirai (Woo et al., 2024), we sample 100 raw predictions to calculate MASE and CRPS for GIFT-Eval. The sampling step is fixed as $K = 50$ . Configurations of Sundial in different sizes are provided in Table 5. We provide a model summary in Table 6, which summarizes several aspects of current time series foundation models.
422
+
423
+ # C. Supplementary Results
424
+
425
+ # C.1. Discussion of Mode Collapse
426
+
427
+ Mode collapse is a failure of representation learning, where a model generates a limited variety of outputs, ignoring the diversity in the training data. For time series foundation models, mode collapse stems from the heterogeneity of the time series distribution, e.g., a similar lookback time series goes into divergent trending. In other words, the semantics of time
428
+
429
+ Table 5. Model configurations of the Sundial family.
430
+
431
+ <table><tr><td>Model</td><td>Patch Size (P)</td><td>Context Length (T)</td><td>Prediction Length (F)</td><td>Layers (L)</td><td>Dimension (D, Dff)</td><td>MHA Heads H</td><td>TimeFlow (Dtf, Ltf)</td><td>Total Parameters #Count</td></tr><tr><td>SundialSmall</td><td>16</td><td>2880</td><td>{16, 720}</td><td>6</td><td>(512, 2048)</td><td>8</td><td>(512, 3)</td><td>32M</td></tr><tr><td>SundialBase</td><td>16</td><td>2880</td><td>{16, 720}</td><td>12</td><td>(768, 3072)</td><td>12</td><td>(768, 3)</td><td>128M</td></tr><tr><td>SundialLarge</td><td>16</td><td>2880</td><td>{16, 720}</td><td>24</td><td>(1024, 4096)</td><td>16</td><td>(1024, 6)</td><td>444M</td></tr></table>
432
+
433
+ * $D$ is the embedding dimension of Transformer. $D_{\mathrm{ff}}$ is the hidden dimension of FFN. $D_{\mathrm{tf}}$ is the hidden dimension of the flow-matching network. $L$ is the layer number of Transformer. $L_{\mathrm{tf}}$ is the layer number of the flow-matching network.
434
+
435
+ Table 6. Comparison of time series foundation models. Architecture denotes the Transformer category. Model Size presents parameter counts of different model sizes. Pre-training Scale measures pre-training datasets in time points. Token Level presents the graininess of time series tokens. Tokenization denotes what kind of values are embedded from time series. Context Length means the input length supported by the model. Probabilistic means generating multiple probable predictions, which is the opposite of deterministic forecasters.
436
+
437
+ <table><tr><td>Method</td><td>Sundial (Ours)</td><td>Time-MoE (2024b)</td><td>Timer-XL (2024a)</td><td>Moirai (2024)</td><td>MOMENT (2024)</td><td>LLMTime (2024)</td><td>Chronos (2024)</td><td>Lag-Llama (2023)</td><td>TimesFM (2023b)</td></tr><tr><td>Architecture</td><td>Decoder</td><td>Decoder</td><td>Decoder</td><td>Encoder</td><td>Encoder</td><td>Decoder</td><td>EncDec</td><td>Decoder</td><td>Decoder</td></tr><tr><td rowspan="3">Model Size</td><td>32M</td><td>113M</td><td>84M</td><td>14M</td><td>40M</td><td>-</td><td>46M</td><td>200M</td><td>17M</td></tr><tr><td>128M</td><td>453M</td><td></td><td>91M</td><td>125M</td><td></td><td>200M</td><td></td><td>70M</td></tr><tr><td>444M</td><td>2.4B</td><td></td><td>311M</td><td>385M</td><td></td><td>710M</td><td></td><td>200M</td></tr><tr><td>Pre-training Scale</td><td>1032B</td><td>300B</td><td>260B</td><td>231B</td><td>1.13B</td><td>-</td><td>84B</td><td>0.36B</td><td>100B</td></tr><tr><td>Token Level</td><td>Patch</td><td>Point</td><td>Patch</td><td>Patch</td><td>Patch</td><td>Point</td><td>Point</td><td>Point</td><td>Patch</td></tr><tr><td>Tokenization</td><td>Continuous</td><td>Continuous</td><td>Continuous</td><td>Continuous</td><td>Continuous</td><td>Discrete</td><td>Discrete</td><td>Continuous</td><td>Continuous</td></tr><tr><td>Context Length</td><td>≤2880</td><td>≤4096</td><td>≤2880</td><td>≤5000</td><td>=512</td><td>-</td><td>≤512</td><td>≤1024</td><td>≤512</td></tr><tr><td>Probabilistic</td><td>True</td><td>False</td><td>False</td><td>True</td><td>False</td><td>True</td><td>True</td><td>True</td><td>False</td></tr></table>
438
+
439
+ series patterns are highly unstable. It sometimes leads to over-smooth predictions from models optimized by MSE because the results are global-optimal for this loss (See showcases on the right of Figure 14-15). Such a training objective pre-defines a unimodal predictive distribution of data, which struggles to accommodate large-scale datasets like TimeBench.
440
+
441
+ Our work addresses this phenomenon through generative modeling. Generative forecasters learn flexible distributions without relying on probabilistic priors. We evaluate the distributional metric Continuous Ranked Probability Score (CRPS) to assess the quality of generated predictions across different training objectives. The results indicate that the predictive distribution modeled by TimeFlow is more coherent and diverse compared to alternative training objectives, particularly on the highly diverse GIFT-Eval (Aksu et al., 2024). It validates the effectiveness of TimeFlow in mitigating mode collapse.
442
+
443
+ Table 7. Zero-shot probabilistic forecasting performance using different training objectives. Averaged CRPS is reported here.
444
+
445
+ <table><tr><td>Objective</td><td>ETTh1</td><td>ETTh2</td><td>ETTm1</td><td>ETTm2</td><td>ECL</td><td>Weather</td><td>GIFT-Eval</td></tr><tr><td>TimeFlow Loss</td><td>0.0059</td><td>0.0037</td><td>0.0057</td><td>0.0029</td><td>0.0082</td><td>0.0021</td><td>0.5050</td></tr><tr><td>Diffusion Loss</td><td>0.0082</td><td>0.0053</td><td>0.0070</td><td>0.0039</td><td>0.0095</td><td>0.0032</td><td>0.5340</td></tr><tr><td>MSE Loss</td><td>0.0063</td><td>0.0040</td><td>0.0058</td><td>0.0032</td><td>0.0080</td><td>0.0023</td><td>0.6420</td></tr></table>
446
+
447
+ # C.2. Scaling Behavior Using More Data
448
+
449
+ We compare Sundial with other time series foundation models that are pre-trained with smaller datasets: Chronos (Ansari et al., 2024) is pre-trained on 94 billion time points, and Moirai is pre-trained on 230 billion time points. As their pre-training datasets are part of the subset of TimeBench, we also conduct pre-training on Sundial using these subsets. As shown in Table 8, these results highlight the scaling behavior of Sundial using larger datasets. Additionally, Sundial still achieves better zero-shot forecasting than its counterpart models with the same pre-training dataset.
450
+
451
+ Table 8. Zero-shot forecasting performance of models trained on different scales of datasets (measured in time points, pts, and 1B means a billion). We report the averaged results from four prediction lengths {96, 192, 336, 720} on Time-Series-Library (Wu et al., 2022).
452
+
453
+ <table><tr><td>Model (pts.)</td><td colspan="2">Chronos (94B)</td><td colspan="2">Moirai (230B)</td><td colspan="2">Sundial (94B)</td><td colspan="2">Sundial (230B)</td><td colspan="2">Sundial (1032B)</td></tr><tr><td>Dataset</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td></tr><tr><td>ETTh1</td><td>0.591</td><td>0.468</td><td>0.417</td><td>0.419</td><td>0.402</td><td>0.429</td><td>0.403</td><td>0.419</td><td>0.411</td><td>0.434</td></tr><tr><td>ETTh2</td><td>0.405</td><td>0.410</td><td>0.362</td><td>0.382</td><td>0.377</td><td>0.414</td><td>0.364</td><td>0.398</td><td>0.333</td><td>0.387</td></tr><tr><td>ETTm1</td><td>0.645</td><td>0.500</td><td>0.406</td><td>0.385</td><td>0.367</td><td>0.402</td><td>0.352</td><td>0.385</td><td>0.336</td><td>0.377</td></tr><tr><td>ETTm2</td><td>0.310</td><td>0.350</td><td>0.311</td><td>0.337</td><td>0.280</td><td>0.341</td><td>0.273</td><td>0.334</td><td>0.258</td><td>0.320</td></tr><tr><td>ECL</td><td>0.214</td><td>0.278</td><td>0.187</td><td>0.274</td><td>0.172</td><td>0.269</td><td>0.171</td><td>0.267</td><td>0.169</td><td>0.265</td></tr><tr><td>Weather</td><td>0.292</td><td>0.315</td><td>0.287</td><td>0.281</td><td>0.254</td><td>0.301</td><td>0.252</td><td>0.297</td><td>0.234</td><td>0.270</td></tr></table>
454
+
455
+ # C.3. Performance with Varying Lookback Lengths
456
+
457
+ Time series foundation models operate independently of training, functioning similarly to statistical methods. Given specific forecasting tasks, one of the most important hyperparameters is the lookback length. Unlike fixed-context models, Sundial offers flexibility for practitioners, allowing the context length to be dynamically adjusted during inference. In Figure 10, we present the performance of Sundial utilizing various lookback lengths. Based on our observations, we contend that performance is largely dependent on the forecasting task itself. Specifically, the size of the lookback window can be tuned to meet the forecasting horizon and data periodicity. Time series foundation models provide a training-free approach for rapid adjustments; still, they should enhance their fundamental long-context capabilities to handle high-frequency data.
458
+
459
+ ![](images/731d90995a1ff52462178da2012edf32eda9c315ca07a3eb60f0534530db0c25.jpg)
460
+ Figure 10. Zero-shot forecasting performance using different lookback lengths in $\{480,960,1440,1920,2400,2880\}$ . We report the averaged results from four prediction lengths $\{96,192,336,720\}$ on Time-Series-Library (Wu et al., 2022).
461
+
462
+ # C.4. Zero-Shot Results of Point Forecasting
463
+
464
+ Table 9 provides full zero-shot results on Time-Series-Library forecasting benchmark (Wu et al., 2022), including prediction horizons in $\{96,192,336,720\}$ . We build Sundial with different model sizes with configurations in Table 5. The context length is fixed as 2880. We truncate the model's predictions for tasks requiring a prediction length less than $F = 720$ .
465
+
466
+ We compare the most advanced time series foundation models based on their official checkpoints, including Time-MoE (Shi et al., 2024b), Timer (Liu et al., 2024a;b), Moirai (Woo et al., 2024), TimesFM (Das et al., 2023b), and Chronos (Ansari et al., 2024). We conduct zero-shot evaluations on datasets that are not included during the pre-training of the corresponding models. For each of the evaluated model, we use their maximum input length during inference. Metrics (MSE/MAE) are calculated from all predicted windows in the test split of each dataset following Liu et al. (2024a).
467
+
468
+ # C.5. Zero-Shot Results on GIFT-Eval and FEV Leaderboard
469
+
470
+ We evaluate our models on GIFT-Eval, a benchmark designed to comprehensively assess forecasting performance across diverse time series. GIFT-Eval includes 23 datasets covering 144,000 time series and 177 million data points, which constitute a total of 97 forecasting configurations. We use the official evaluation suite established by the research team of Salesforce and report aggregated results in Table 2. We evaluate the performance and inference time on the FEV leaderboard, which was originally proposed by Ansari et al. (2024) and established by AutoGluon, which comprises 27 datasets for zero-shot evaluation. We report aggregated metrics in Figure 4 and assess the inference time in Figure 5. We released the detailed results by submitting Sundial to their open benchmark<sup>2</sup>.
471
+
472
+ # D. Showcases
473
+
474
+ # D.1. Showcases of Sundial
475
+
476
+ Figure 11-13 present zero-shot forecasting showcases on all the datasets from FEV (Ansari et al., 2024) and TSLib (Wu et al., 2022). By generating 20 predictions with different initial noise, we estimate the median and $80\%$ prediction interval.
477
+
478
+ # D.2. Showcases of Generative Forecasters and Deterministic Forecasters
479
+
480
+ As we introduce generative modeling in time series foundation models, we compare zero-shot forecasting showcases from two types of models, including (1) Sundial, a generative forecaster pre-trained by TimeFlow, which can predict multiple future possibilities based on a lookback series. (2) Using the same backbone and TimeBench, a Transformer pre-trained by MSE Loss. As a deterministic forecaster, the model can only output the mean prediction. As depicted in Figure 14-15, the unimodal Gaussian prior specified by MSE can be infeasible to handle large-scale pre-training, manifested as sometimes over-smooth predictions in downstream forecasting tasks. Therefore, we hope this work can inspire future paradigms for pre-training time series foundation models and enhance their applicability to real-world scenarios.
481
+
482
+ # E. Limitations
483
+
484
+ Our models represent an initial effort to incorporate generative modeling into time series foundation models, which enables pre-training on heterogeneous time series without specifying any prior distribution. This approach mitigates mode collapse in representation learning and generates a diverse range of probable predictions compared to previous deterministic forecasters. Despite significant progress in enlarging model capacity, the Sundial family may still face hallucinations. The performance on very high-frequency data is not guaranteed, since TimeBench contains many middle- and low-frequency time series. Therefore, an important future direction is to generalize Sundial to multi-scale time series. This situation may also indicate new opportunities during inference. As we only adopt a naive sampling strategy that begins with random Gaussian noise, it leaves much room for future improvement in sampling strategy and post-processing, such as frequency normalization.
485
+
486
+ Another aspect of future development lies in model adaptation. Sundial is pre-trained in a univariate approach to address the discrepancy in variate numbers, which prevents it from explicitly utilizing variate correlations or covariate information. As an increasing number of studies address 2D dimensionality, multivariate pre-training is likely to be conducted for domain-specific time series foundation models. Lastly, while autoregressive models provide flexibility in the input context length, multiple steps of autoregression may still lead to over-smooth predictions and unreliable results.
487
+
488
+ Table 9. Zero-shot forecasting results of time series foundation models on long-term forecasting datasets (Wu et al., 2022). A lower MSE or MAE indicates a better prediction. Averaged results of four prediction lengths are reported here. $1^{\mathrm{st}}$ Count represents the number of wins achieved by a model under all prediction lengths and datasets. Results of baseline models are officially reported by Shi et al. (2024b). Datasets for pre-training are not evaluated on corresponding models, which are denoted by the dash $(-)$
489
+
490
+ <table><tr><td colspan="2">Models</td><td colspan="2">SundialSmall(Ours)</td><td colspan="2">SundialBase(Ours)</td><td colspan="2">SundialLarge(Ours)</td><td colspan="2">Time-MoEBase(2024b)</td><td colspan="2">Time-MoELarge(2024b)</td><td colspan="2">Time-MoEUltra(2024b)</td><td colspan="2">Timer-XL(2024a)</td><td colspan="2">MoiraiBase(2024)</td><td colspan="2">MoiraiLarge(2024)</td><td colspan="2">ChronosBase(2024)</td><td colspan="2">ChronosLarge(2024)</td><td colspan="2">TimesFM(2023b)</td></tr><tr><td colspan="2">Metric</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>MAE</td></tr><tr><td rowspan="4">ETTm1</td><td>96</td><td>0.292</td><td>0.342</td><td>0.280</td><td>0.334</td><td>0.273</td><td>0.329</td><td>0.338</td><td>0.368</td><td>0.309</td><td>0.357</td><td>0.281</td><td>0.341</td><td>0.317</td><td>0.356</td><td>0.363</td><td>0.356</td><td>0.380</td><td>0.361</td><td>0.454</td><td>0.408</td><td>0.457</td><td>0.403</td><td>0.361</td><td>0.370</td></tr><tr><td>192</td><td>0.337</td><td>0.376</td><td>0.321</td><td>0.366</td><td>0.312</td><td>0.357</td><td>0.353</td><td>0.388</td><td>0.346</td><td>0.381</td><td>0.305</td><td>0.358</td><td>0.358</td><td>0.388</td><td>0.375</td><td>0.412</td><td>0.383</td><td>0.567</td><td>0.477</td><td>0.530</td><td>0.450</td><td>0.414</td><td>0.405</td><td></td></tr><tr><td>336</td><td>0.370</td><td>0.401</td><td>0.350</td><td>0.389</td><td>0.343</td><td>0.378</td><td>0.381</td><td>0.413</td><td>0.373</td><td>0.408</td><td>0.369</td><td>0.395</td><td>0.386</td><td>0.401</td><td>0.416</td><td>0.392</td><td>0.436</td><td>0.400</td><td>0.662</td><td>0.525</td><td>0.577</td><td>0.481</td><td>0.445</td><td>0.429</td></tr><tr><td>720</td><td>0.418</td><td>0.433</td><td>0.394</td><td>0.418</td><td>0.397</td><td>0.413</td><td>0.504</td><td>0.493</td><td>0.475</td><td>0.477</td><td>0.469</td><td>0.472</td><td>0.430</td><td>0.431</td><td>0.460</td><td>0.418</td><td>0.462</td><td>0.420</td><td>0.900</td><td>0.591</td><td>0.660</td><td>0.526</td><td>0.512</td><td>0.471</td></tr><tr><td></td><td>Avg</td><td>0.354</td><td>0.388</td><td>0.336</td><td>0.377</td><td>0.331</td><td>0.369</td><td>0.394</td><td>0.415</td><td>0.376</td><td>0.405</td><td>0.356</td><td>0.391</td><td>0.373</td><td>0.392</td><td>0.406</td><td>0.385</td><td>0.422</td><td>0.391</td><td>0.645</td><td>0.500</td><td>0.555</td><td>0.465</td><td>0.433</td><td>0.418</td></tr><tr><td rowspan="4">ETTm2</td><td>96</td><td>0.178</td><td>0.260</td><td>0.170</td><td>0.256</td><td>0.172</td><td>0.255</td><td>0.201</td><td>0.291</td><td>0.197</td><td>0.286</td><td>0.198</td><td>0.288</td><td>0.189</td><td>0.277</td><td>0.205</td><td>0.273</td><td>0.211</td><td>0.274</td><td>0.199</td><td>0.274</td><td>0.197</td><td>0.271</td><td>0.202</td><td>0.270</td></tr><tr><td>192</td><td>0.235</td><td>0.304</td><td>0.229</td><td>0.300</td><td>0.227</td><td>0.296</td><td>0.258</td><td>0.334</td><td>0.250</td><td>0.322</td><td>0.235</td><td>0.312</td><td>0.241</td><td>0.315</td><td>0.275</td><td>0.316</td><td>0.281</td><td>0.318</td><td>0.261</td><td>0.322</td><td>0.254</td><td>0.314</td><td>0.289</td><td>0.321</td></tr><tr><td>336</td><td>0.287</td><td>0.342</td><td>0.281</td><td>0.337</td><td>0.275</td><td>0.331</td><td>0.324</td><td>0.373</td><td>0.337</td><td>0.375</td><td>0.293</td><td>0.348</td><td>0.286</td><td>0.348</td><td>0.329</td><td>0.350</td><td>0.341</td><td>0.355</td><td>0.326</td><td>0.366</td><td>0.313</td><td>0.353</td><td>0.360</td><td>0.366</td></tr><tr><td>720</td><td>0.360</td><td>0.390</td><td>0.351</td><td>0.387</td><td>0.343</td><td>0.378</td><td>0.488</td><td>0.464</td><td>0.480</td><td>0.461</td><td>0.427</td><td>0.428</td><td>0.375</td><td>0.402</td><td>0.437</td><td>0.411</td><td>0.485</td><td>0.428</td><td>0.455</td><td>0.439</td><td>0.416</td><td>0.415</td><td>0.462</td><td>0.430</td></tr><tr><td></td><td>Avg</td><td>0.265</td><td>0.324</td><td>0.258</td><td>0.320</td><td>0.254</td><td>0.315</td><td>0.317</td><td>0.365</td><td>0.316</td><td>0.361</td><td>0.288</td><td>0.344</td><td>0.273</td><td>0.336</td><td>0.311</td><td>0.337</td><td>0.329</td><td>0.343</td><td>0.310</td><td>0.350</td><td>0.295</td><td>0.338</td><td>0.328</td><td>0.346</td></tr><tr><td rowspan="4">ETTh1</td><td>96</td><td>0.341</td><td>0.381</td><td>0.348</td><td>0.385</td><td>0.346</td><td>0.383</td><td>0.357</td><td>0.381</td><td>0.350</td><td>0.382</td><td>0.349</td><td>0.379</td><td>0.369</td><td>0.391</td><td>0.376</td><td>0.392</td><td>0.381</td><td>0.388</td><td>0.440</td><td>0.393</td><td>0.441</td><td>0.390</td><td>0.414</td><td>0.404</td></tr><tr><td>192</td><td>0.381</td><td>0.408</td><td>0.393</td><td>0.418</td><td>0.386</td><td>0.410</td><td>0.384</td><td>0.404</td><td>0.388</td><td>0.412</td><td>0.395</td><td>0.413</td><td>0.405</td><td>0.413</td><td>0.412</td><td>0.413</td><td>0.434</td><td>0.415</td><td>0.492</td><td>0.426</td><td>0.502</td><td>0.524</td><td>0.465</td><td>0.434</td></tr><tr><td>336</td><td>0.405</td><td>0.424</td><td>0.422</td><td>0.440</td><td>0.410</td><td>0.426</td><td>0.411</td><td>0.434</td><td>0.411</td><td>0.430</td><td>0.447</td><td>0.453</td><td>0.418</td><td>0.423</td><td>0.433</td><td>0.428</td><td>0.485</td><td>0.445</td><td>0.550</td><td>0.462</td><td>0.576</td><td>0.467</td><td>0.503</td><td>0.456</td></tr><tr><td>720</td><td>0.433</td><td>0.458</td><td>0.481</td><td>0.493</td><td>0.438</td><td>0.459</td><td>0.449</td><td>0.477</td><td>0.427</td><td>0.455</td><td>0.457</td><td>0.462</td><td>0.423</td><td>0.441</td><td>0.447</td><td>0.444</td><td>0.611</td><td>0.510</td><td>0.882</td><td>0.591</td><td>0.835</td><td>0.583</td><td>0.511</td><td>0.481</td></tr><tr><td></td><td>Avg</td><td>0.390</td><td>0.418</td><td>0.411</td><td>0.434</td><td>0.395</td><td>0.420</td><td>0.400</td><td>0.424</td><td>0.394</td><td>0.419</td><td>0.412</td><td>0.426</td><td>0.404</td><td>0.417</td><td>0.417</td><td>0.419</td><td>0.480</td><td>0.439</td><td>0.591</td><td>0.468</td><td>0.588</td><td>0.466</td><td>0.473</td><td>0.443</td></tr><tr><td rowspan="4">ETTh2</td><td>96</td><td>0.272</td><td>0.332</td><td>0.271</td><td>0.333</td><td>0.269</td><td>0.330</td><td>0.305</td><td>0.359</td><td>0.302</td><td>0.354</td><td>0.292</td><td>0.352</td><td>0.283</td><td>0.342</td><td>0.294</td><td>0.330</td><td>0.296</td><td>0.330</td><td>0.308</td><td>0.343</td><td>0.320</td><td>0.345</td><td>0.315</td><td>0.349</td></tr><tr><td>192</td><td>0.329</td><td>0.374</td><td>0.327</td><td>0.376</td><td>0.325</td><td>0.373</td><td>0.351</td><td>0.386</td><td>0.364</td><td>0.385</td><td>0.347</td><td>0.379</td><td>0.340</td><td>0.379</td><td>0.365</td><td>0.375</td><td>0.361</td><td>0.371</td><td>0.384</td><td>0.392</td><td>0.406</td><td>0.399</td><td>0.388</td><td>0.395</td></tr><tr><td>336</td><td>0.357</td><td>0.399</td><td>0.354</td><td>0.402</td><td>0.354</td><td>0.400</td><td>0.391</td><td>0.418</td><td>0.417</td><td>0.425</td><td>0.406</td><td>0.419</td><td>0.366</td><td>0.400</td><td>0.376</td><td>0.390</td><td>0.390</td><td>0.390</td><td>0.429</td><td>0.430</td><td>0.492</td><td>0.453</td><td>0.422</td><td>0.427</td></tr><tr><td>720</td><td>0.401</td><td>0.442</td><td>0.381</td><td>0.435</td><td>0.389</td><td>0.443</td><td>0.419</td><td>0.454</td><td>0.537</td><td>0.496</td><td>0.439</td><td>0.447</td><td>0.397</td><td>0.431</td><td>0.416</td><td>0.433</td><td>0.423</td><td>0.418</td><td>0.501</td><td>0.477</td><td>0.603</td><td>0.511</td><td>0.443</td><td>0.454</td></tr><tr><td></td><td>Avg</td><td>0.340</td><td>0.387</td><td>0.333</td><td>0.387</td><td>0.334</td><td>0.387</td><td>0.366</td><td>0.404</td><td>0.405</td><td>0.415</td><td>0.371</td><td>0.399</td><td>0.347</td><td>0.388</td><td>0.362</td><td>0.382</td><td>0.367</td><td>0.377</td><td>0.405</td><td>0.410</td><td>0.455</td><td>0.427</td><td>0.392</td><td>0.406</td></tr><tr><td rowspan="4">ECL</td><td>96</td><td>0.134</td><td>0.231</td><td>0.132</td><td>0.229</td><td>0.130</td><td>0.227</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>192</td><td>0.154</td><td>0.251</td><td>0.152</td><td>0.250</td><td>0.150</td><td>0.247</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td></tr><tr><td>336</td><td>0.174</td><td>0.271</td><td>0.173</td><td>0.271</td><td>0.170</td><td>0.268</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td><td></td></tr><tr><td>720</td><td>0.215</td><td>0.307</td><td>0.218</td><td>0.311</td><td>0.214</td><td>0.307</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td><td></td></tr><tr><td></td><td>Avg</td><td>0.169</td><td>0.265</td><td>0.169</td><td>0.265</td><td>0.166</td><td>0.262</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td><td></td></tr><tr><td rowspan="4">Weather</td><td>96</td><td>0.158</td><td>0.206</td><td>0.157</td><td>0.205</td><td>0.157</td><td>0.208</td><td>0.160</td><td>0.214</td><td>0.159</td><td>0.213</td><td>0.157</td><td>0.211</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td><td></td></tr><tr><td>192</td><td>0.205</td><td>0.253</td><td>0.205</td><td>0.251</td><td>0.207</td><td>0.256</td><td>0.210</td><td>0.260</td><td>0.215</td><td>0.266</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td><td></td><td></td></tr><tr><td>336</td><td>0.254</td><td>0.290</td><td>0.253</td><td>0.289</td><td>0.259</td><td>0.295</td><td>0.274</td><td>0.309</td><td>0.291</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>720</td><td>0.315</td><td>0.336</td><td>0.320</td><td>0.336</td><td>0.327</td><td>0.342</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td>Avg</td><td>0.233</td><td>0.271</td><td>0.234</td><td>0.270</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="2">\( 1^{\text{st}} \) Count</td><td>7</td><td>2</td><td>8</td><td>5</td><td>16</td><td>16</td><td>0</td><td>1</td><td>0</td><td>0</td><td>2</td><td>1</td><td>1</td><td>3</td><td>0</td><td>2</td><td>0</td><td>6</td><td>0</td><td>0</td><td></td><td></td><td></td><td></td></tr></table>
491
+
492
+ * Traffic (PEMS) is not evaluated because it is included in the pre-training datasets of these time series foundation models.
493
+
494
+ # F. Societal Impacts
495
+
496
+ # F.1. Real-World Applications
497
+
498
+ In this work, we present Sundial, a family of time series foundation models to facilitate out-of-the-box forecasting. Our models employ native tokenization for continuous-valued time series and incorporate a flexible training objective, proposed as TimeFlow Loss, to enable probabilistic forecasting. With an unprecedented model capacity and a trillion-scale dataset, our models can be used directly or adapted for various forecasting scenarios, such as energy planning, weather forecasting, and financial risk prevention. With multiple predictions generation and a just-in-time inference speed, our model enhances the reliability of decision-making and streamlines the forecasting pipeline for practitioners. This paper primarily focuses on scientific research and does not present any evident negative social impact.
499
+
500
+ # F.2. Academic Research
501
+
502
+ We curate TimeBench, a trillion-level time series dataset for pre-training foundation models for time series analysis, which we believe will be beneficial to the research community. Technically, we propose a TimeFlow Loss to facilitate the learning of flexible next-patch distributions. Conditioned on the lookback representations acquired by autoregressive Transformers, our model is endowed with a novel generative capability for probabilistic forecasting, enhancing representation learning of Transformers without the need for discrete tokenization. Through pre-training on an unprecedented scale, we identify subtle scalability bottlenecks that are not solely attributable to architectural design but are predominantly influenced by the training objectives of foundation models. The proposed TimeFlow Loss applied to autoregressive and generative models may provide insights for the future development of time series foundation models.
503
+
504
+ ![](images/729a3ed4c22a54f0606fb7d4dd83c108ea43b36cf93cc8cb787516bf1ed6202f.jpg)
505
+
506
+ ![](images/12de3d2e84e7ba3dab07ff6a47e30a1010aacca476e059cb6f75ef43ad614763.jpg)
507
+
508
+ ![](images/368aaf8f1ea1a0e483d7551d056b7215639fabb12d824b1b33709a860389569d.jpg)
509
+
510
+ ![](images/e592aea85283ac127dc8678578be688fcf86d3d26878487562a93e2f3f757918.jpg)
511
+
512
+ ![](images/300b21aeb340ee6cddc000e4f5998efd7912123a6c9ce7ca63873d43df4646cc.jpg)
513
+
514
+ ![](images/3e01bfed058bef3925df861e846765fff3a23d8a645b20bec6c958fdcf28c256.jpg)
515
+
516
+ ![](images/8aa0e2344f3ff84232cf45e2806f2cca9e1b4c38172c5487801029e8072b73fc.jpg)
517
+
518
+ ![](images/8f328cfeae5b2d551894b530115b3b259624ec32a65cd0bee3e6283962d97a28.jpg)
519
+
520
+ ![](images/daa169983d538a5441b6aaf8adaedd6e64f2213f04bf5087ec3efd4e7a265e7e.jpg)
521
+
522
+ ![](images/0cdc5b1251aaba5d3fd8ab9501628255a06d619bb3d5920162f1f086c3eae2a4.jpg)
523
+
524
+ ![](images/f979cd8e8ae7d1bd7d694c7cec7c5adee10095c785c49e31743ddcdfb3251ec7.jpg)
525
+ Figure 11. Showcases of zero-shot predictions from Sundial (Base) on the FEV leaderboard (Ansari et al., 2024).
526
+
527
+ ![](images/2d03685fa59c3e23fcf32a56cd60006d2c02d7c0590d0fc006a4088bce03fa82.jpg)
528
+
529
+ ![](images/9b096c920e9d0850362ab681fe3abaca72a5711b452b8741f84ddc776b92a394.jpg)
530
+
531
+ ![](images/8032dfd8d6429184b140f395680c05b825049bea98eee170a460f7c7aceba192.jpg)
532
+
533
+ ![](images/62ea1ef12f5d36827076b0afcec272d155658e04f6df97304eb04fb11ea4dfac.jpg)
534
+
535
+ ![](images/cf57c64a2dc34330f032eb70b9389f2859ec1d8b21b802d3c29f4f0ccb7634e5.jpg)
536
+
537
+ ![](images/7736734ec3c3b63c3963b3d5d5ba737de7fd289c20e7b6cf73a94252c2b32c27.jpg)
538
+
539
+ ![](images/f1107956eb5bf4e3c0f660ffd3c93af9953b20e1b62936c58619a2401c670464.jpg)
540
+
541
+ ![](images/e511ec3cf7077b12d4817696f2f2d965b63c7aa12995ffc6399c5bb73e8b9c55.jpg)
542
+
543
+ ![](images/4adff9d82da963452a9344335b07299cb20ccbdd1451bfee2ba8ed1ba7bf0401.jpg)
544
+
545
+ ![](images/cfe841de63aecb817da0aa0bcbbfb95cd6f8a83d5a68d903791b5e0fd28ed3ff.jpg)
546
+
547
+ ![](images/b479a93232977f59b59e0e27d782aa16ae3e4a7586bb7e44351a457a02a08bb3.jpg)
548
+
549
+ ![](images/24990db382e9085eecb915ce8fb6191530c083013e65d19e2dc57764733c3c4c.jpg)
550
+ Figure 12. Showcases of zero-shot predictions from Sundial (Base) on the FEV leaderboard (Ansari et al., 2024).
551
+
552
+ ![](images/7aaacbb88b6951644d86fc4dd520990b7e1f0dd4a44c62381ae632c0b3e18cba.jpg)
553
+
554
+ ![](images/f25f8f6604f863c0d7e50314733b92c39c9405c757bdbcca58e1d44ef1ffa4cf.jpg)
555
+
556
+ ![](images/abae5f445e76fc079610d3af8049c07ae3840870027ca33cbf3c26a49f53f2d8.jpg)
557
+
558
+ ![](images/7098bc341fa70dbfd08823fc4f4d6c943c7b350225add794f3e0c720596f72a1.jpg)
559
+
560
+ ![](images/a916413f97f62464c2c83ef31d216a903dd8bb802af48927ef8bf940bb674b99.jpg)
561
+
562
+ ![](images/13af4d07bf26469f417148b2d814bbaf2ad1be01e54debe73cd1093c31c017e6.jpg)
563
+
564
+ ![](images/ad543ff428b5abf5d07ffc4d5636f94db8a4d20a1b2217f096192ecd5dc478b9.jpg)
565
+
566
+ ![](images/132ba8bb1e4de7a1cba78bef7f7e74adae542b9925cc3ce3b406371aca6d9b68.jpg)
567
+
568
+ ![](images/f8458c0662db1effcb89bcd48bc887e2315e0e893bbf6f7c6249e5eb8186d4aa.jpg)
569
+
570
+ ![](images/a2097d2a9e2862c91e4fdfe3cbd69048203d1377b8f35ae75ab1dcb8782f4ff2.jpg)
571
+
572
+ ![](images/61589809c0c5f50bb9327c660ee631a080da7db0d6667c11e258ba04291c9dbe.jpg)
573
+
574
+ ![](images/6897731af8e0ca2d1f4bee7421addf8a33e549208d82966167655d5b5af90515.jpg)
575
+ Figure 13. Showcases of zero-shot predictions from Sundial (Base) on long-term forecasting datasets (Wu et al., 2022).
576
+
577
+ ![](images/7699ad7b91c201337918072912dd45afc3934b53c8758685ab18e3f24c083081.jpg)
578
+
579
+ ![](images/b34b8914e8db9398de4267809f2d4af8927290aef2de667acdf6b3385ac045e0.jpg)
580
+
581
+ ![](images/75664d3861d78cec386194f4141a30e7821bb26c9a1c01b65bcd254604af06e0.jpg)
582
+
583
+ ![](images/9b4858432bb1c2add0b41d47a478d6a4135b4611552d59f36f7e30e2b2e5009a.jpg)
584
+
585
+ ![](images/4e720ae08b255b2336728b755680589f7f2d835cfa881757ff60b21544c9d43a.jpg)
586
+
587
+ ![](images/c7f7c1027e44b97e1222e4801c93f3cabda2cccbdad0a2492263879a39a25ca9.jpg)
588
+
589
+ ![](images/a4e3703b58d78c66124dca04715bc9f44df49f770527ca5f7d9939a1d073449a.jpg)
590
+
591
+ ![](images/186785df2a74029374e6c41a9e760b7160f6c3b1fc33f967bd5bf8227a9ea5aa.jpg)
592
+
593
+ ![](images/85273c7a1f2542414787faf3b8c2aeb4b81b68dae17cfe287cf42ea0bb8acd5c.jpg)
594
+
595
+ ![](images/f81ab02a018012789da491f2b93ebbc7c09451a9ce4add5304a029a67ee94b52.jpg)
596
+
597
+ ![](images/8695c05c97f5a6d2de950790f2454ce634280c88af6c45b92a79e8e13bd56e40.jpg)
598
+
599
+ ![](images/bfa5cc6d76165dc48bbf0f2fd79618aff0fb387b3458aee00469bd31c1ab0719.jpg)
600
+ Figure 14. Showcases of Sundial (Left) and the same Transformer backbone pre-trained by MSE Loss (Right). MSE Loss optimizes a deterministic forecaster: given a lookback series, the model can only produce one prediction as the estimation of mean values. This objective may fail to accommodate divergent future variations during large-scale pre-training, leading to mode collapse and over-smooth results (as illustrated in the fourth row). TimeFlow optimizes a generative forecaster: it generates various possibilities observed in the pre-training dataset. Based on these raw predictions, we can estimate the underlying complicated distribution and different statistics. Besides, the greater concentration in generated predictions, the higher the model's confidence in its predictions.
601
+
602
+ ![](images/ade94e6180a5785f210d7ed8fc78322f7d35a0979174ca4e2c1c0eedecdfb1a5.jpg)
603
+
604
+ ![](images/ce4f3d780413488286477320d47d702f0356ba73ac886a0a94248e887ac92956.jpg)
605
+
606
+ ![](images/f4b80e699b8648c7ca4c2e00fa05326ee6a7994434e90be4bc489f802670e28b.jpg)
607
+
608
+ ![](images/264b75c0780b7345b987f7759072b0365b117cc92a167decc7a68855a41f66ac.jpg)
609
+
610
+ ![](images/28da6254aaea072a1101430616cac43f710976ba472acad235658d891f40b2c4.jpg)
611
+
612
+ ![](images/d79015bf38987747e971773eaa53c766591b423594378e500c10c65b5db747ad.jpg)
613
+
614
+ ![](images/49dc69a9d3d762cfa4cc9c55df00f6ef424af97816fde790800211d326def6a0.jpg)
615
+
616
+ ![](images/cd89d42b0b4ea46fd3f6d027dec3be124006b1ab070235347d79e38f52011541.jpg)
617
+
618
+ ![](images/49e04248b97621f979a57fa710818a3385b16e61c92ce1b7db2657df84954136.jpg)
619
+
620
+ ![](images/7e4632053ca84af08b984a51bdf9aa871b3239951d265ae0a68eb212b321c890.jpg)
621
+
622
+ ![](images/0fe7fc2773b19da14a93bfecf559f7febee1a99f4bdc935e4b06bb5ff8f1b9f5.jpg)
623
+
624
+ ![](images/4a663fc695ae9e212f5c99ad77329a821819ae2ee2e6e3294bd9084469d77493.jpg)
625
+ Figure 15. Supplementary showcases of Sundial (Left) and the same Transformer backbone pre-trained by MSE Loss (Right).
626
+
627
+ ![](images/1220e787f92340dee82367d96256618d25e61ecd5351e4fe9be96d7eca024416.jpg)
2502.00xxx/2502.00816/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5accd6c89919a1dbe83a4e8c96b437a4fdbf0c5d17624844c95e85e9df70c6f
3
+ size 2507124
2502.00xxx/2502.00816/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2502.00xxx/2502.00873/9de04ed4-cd03-45c2-bb87-7052345f2dbb_content_list.json ADDED
The diff for this file is too large to render. See raw diff