Add Batch d02db13b-4870-408c-9816-1c052c26c094
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2301.07xxx/2301.07041/6205e212-76b4-448b-8e7a-3afedc0f900b_content_list.json +0 -0
- 2301.07xxx/2301.07041/6205e212-76b4-448b-8e7a-3afedc0f900b_model.json +0 -0
- 2301.07xxx/2301.07041/6205e212-76b4-448b-8e7a-3afedc0f900b_origin.pdf +3 -0
- 2301.07xxx/2301.07041/full.md +0 -0
- 2301.07xxx/2301.07041/images.zip +3 -0
- 2301.07xxx/2301.07041/layout.json +0 -0
- 2301.07xxx/2301.07067/5d614258-f6ef-41d8-9f9c-214fcaff1ae1_content_list.json +0 -0
- 2301.07xxx/2301.07067/5d614258-f6ef-41d8-9f9c-214fcaff1ae1_model.json +0 -0
- 2301.07xxx/2301.07067/5d614258-f6ef-41d8-9f9c-214fcaff1ae1_origin.pdf +3 -0
- 2301.07xxx/2301.07067/full.md +0 -0
- 2301.07xxx/2301.07067/images.zip +3 -0
- 2301.07xxx/2301.07067/layout.json +0 -0
- 2301.07xxx/2301.07069/de66c068-ed86-43dd-884a-8341256d9fc9_content_list.json +0 -0
- 2301.07xxx/2301.07069/de66c068-ed86-43dd-884a-8341256d9fc9_model.json +0 -0
- 2301.07xxx/2301.07069/de66c068-ed86-43dd-884a-8341256d9fc9_origin.pdf +3 -0
- 2301.07xxx/2301.07069/full.md +428 -0
- 2301.07xxx/2301.07069/images.zip +3 -0
- 2301.07xxx/2301.07069/layout.json +0 -0
- 2301.07xxx/2301.07093/1fff58a4-716b-4c60-9000-c7a7e90020b4_content_list.json +0 -0
- 2301.07xxx/2301.07093/1fff58a4-716b-4c60-9000-c7a7e90020b4_model.json +0 -0
- 2301.07xxx/2301.07093/1fff58a4-716b-4c60-9000-c7a7e90020b4_origin.pdf +3 -0
- 2301.07xxx/2301.07093/full.md +774 -0
- 2301.07xxx/2301.07093/images.zip +3 -0
- 2301.07xxx/2301.07093/layout.json +0 -0
- 2301.07xxx/2301.07094/d2878099-987f-4b33-b5e3-9a40b28616f0_content_list.json +0 -0
- 2301.07xxx/2301.07094/d2878099-987f-4b33-b5e3-9a40b28616f0_model.json +0 -0
- 2301.07xxx/2301.07094/d2878099-987f-4b33-b5e3-9a40b28616f0_origin.pdf +3 -0
- 2301.07xxx/2301.07094/full.md +0 -0
- 2301.07xxx/2301.07094/images.zip +3 -0
- 2301.07xxx/2301.07094/layout.json +0 -0
- 2301.07xxx/2301.07137/fe0279f1-2ecb-4373-83b4-236be8ac16ba_content_list.json +1907 -0
- 2301.07xxx/2301.07137/fe0279f1-2ecb-4373-83b4-236be8ac16ba_model.json +0 -0
- 2301.07xxx/2301.07137/fe0279f1-2ecb-4373-83b4-236be8ac16ba_origin.pdf +3 -0
- 2301.07xxx/2301.07137/full.md +393 -0
- 2301.07xxx/2301.07137/images.zip +3 -0
- 2301.07xxx/2301.07137/layout.json +0 -0
- 2301.07xxx/2301.07184/5668b376-c13b-4815-86e0-6a33d65c52df_content_list.json +0 -0
- 2301.07xxx/2301.07184/5668b376-c13b-4815-86e0-6a33d65c52df_model.json +0 -0
- 2301.07xxx/2301.07184/5668b376-c13b-4815-86e0-6a33d65c52df_origin.pdf +3 -0
- 2301.07xxx/2301.07184/full.md +0 -0
- 2301.07xxx/2301.07184/images.zip +3 -0
- 2301.07xxx/2301.07184/layout.json +0 -0
- 2301.07xxx/2301.07255/37aa1763-1477-4988-8975-d9d015e39afb_content_list.json +0 -0
- 2301.07xxx/2301.07255/37aa1763-1477-4988-8975-d9d015e39afb_model.json +0 -0
- 2301.07xxx/2301.07255/37aa1763-1477-4988-8975-d9d015e39afb_origin.pdf +3 -0
- 2301.07xxx/2301.07255/full.md +0 -0
- 2301.07xxx/2301.07255/images.zip +3 -0
- 2301.07xxx/2301.07255/layout.json +0 -0
- 2301.07xxx/2301.07277/9592a50b-d6aa-4260-b4c3-a053b4776d58_content_list.json +1342 -0
.gitattributes
CHANGED
|
@@ -11541,3 +11541,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 11541 |
2301.11xxx/2301.11924/bf587a85-868e-4d5f-92b6-85bb2573bb8a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11542 |
2301.13xxx/2301.13819/a95f2479-244b-4a79-9bd4-406589a300f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11543 |
2302.04xxx/2302.04662/c06d6da4-a13b-4e95-a392-6337d44974de_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11541 |
2301.11xxx/2301.11924/bf587a85-868e-4d5f-92b6-85bb2573bb8a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11542 |
2301.13xxx/2301.13819/a95f2479-244b-4a79-9bd4-406589a300f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11543 |
2302.04xxx/2302.04662/c06d6da4-a13b-4e95-a392-6337d44974de_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11544 |
+
2301.07xxx/2301.07041/6205e212-76b4-448b-8e7a-3afedc0f900b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11545 |
+
2301.07xxx/2301.07067/5d614258-f6ef-41d8-9f9c-214fcaff1ae1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11546 |
+
2301.07xxx/2301.07069/de66c068-ed86-43dd-884a-8341256d9fc9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11547 |
+
2301.07xxx/2301.07093/1fff58a4-716b-4c60-9000-c7a7e90020b4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11548 |
+
2301.07xxx/2301.07094/d2878099-987f-4b33-b5e3-9a40b28616f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11549 |
+
2301.07xxx/2301.07137/fe0279f1-2ecb-4373-83b4-236be8ac16ba_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11550 |
+
2301.07xxx/2301.07184/5668b376-c13b-4815-86e0-6a33d65c52df_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11551 |
+
2301.07xxx/2301.07255/37aa1763-1477-4988-8975-d9d015e39afb_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11552 |
+
2301.07xxx/2301.07277/9592a50b-d6aa-4260-b4c3-a053b4776d58_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11553 |
+
2301.07xxx/2301.07302/99dde7a7-2b71-48da-8d7a-34760b31f82c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11554 |
+
2301.07xxx/2301.07325/69b703e7-9387-496c-acfd-8455572537a5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11555 |
+
2301.07xxx/2301.07340/6ca2b74e-c90e-4e16-92ff-c139805d5e65_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11556 |
+
2301.07xxx/2301.07388/0b9e0dfd-bf78-48c7-9be5-dd24a108012c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11557 |
+
2301.07xxx/2301.07405/1ea040af-0063-4c62-ae27-343b3c693e8b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11558 |
+
2301.07xxx/2301.07507/71b2168c-5cc4-4ea5-a179-f81fd7acdc9b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11559 |
+
2301.07xxx/2301.07525/f9d93994-be5c-4031-a6f2-a15035131a31_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11560 |
+
2301.07xxx/2301.07573/9b439216-6c7b-4fbd-9838-2720acd05064_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11561 |
+
2301.07xxx/2301.07597/800ac535-d06e-464e-b6c2-a0f67d4dd31e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11562 |
+
2301.07xxx/2301.07608/6f87a829-bc6f-4d55-84b6-b862ac49d8e5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11563 |
+
2301.07xxx/2301.07642/ef17755d-47af-48e6-b527-aacf91d76de3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11564 |
+
2301.07xxx/2301.07668/f7b38faf-2b2f-4574-a1af-34015b6c4987_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11565 |
+
2301.07xxx/2301.07673/4e54cbba-1e34-4c46-967c-83e76e56dfc4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11566 |
+
2301.07xxx/2301.07681/691cf97e-54f5-4641-b4b5-ee8a5ff57e19_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11567 |
+
2301.07xxx/2301.07733/3489db0b-2c84-4f51-84d6-47bfda654b5e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11568 |
+
2301.07xxx/2301.07779/42f905a0-9568-4df4-842f-622916d9184e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11569 |
+
2301.07xxx/2301.07784/130816e2-0675-427c-a68f-46824643b16b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11570 |
+
2301.07xxx/2301.07829/b4f46903-18b4-4314-ac9f-a6e3bea5a696_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11571 |
+
2301.07xxx/2301.07944/ac5e54b5-a57e-493d-a351-ccf1ce35c083_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11572 |
+
2301.07xxx/2301.07945/893ee8fa-3142-4b72-87ce-241f6cd8f6ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11573 |
+
2301.07xxx/2301.07999/285b8e39-1ace-44cd-bcf0-7ae4f11f4a55_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11574 |
+
2301.08xxx/2301.08028/af0dd9c4-aa0e-47db-9b6c-435a1571486e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11575 |
+
2301.08xxx/2301.08072/8c3d3b23-8084-42e9-b3f9-854275d4c9ba_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11576 |
+
2301.08xxx/2301.08125/bcca7cbc-9e97-46fc-97e9-3aecaa90b3ce_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11577 |
+
2301.08xxx/2301.08128/0af7cd54-a814-4be9-8270-af268791f9fd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11578 |
+
2301.08xxx/2301.08160/dc06ef4a-41a7-408e-9a04-cfd10e69c441_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11579 |
+
2301.08xxx/2301.08170/eb352e70-15bf-4d51-b4d2-71029400c8bb_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11580 |
+
2301.08xxx/2301.08189/df629c3d-5b5f-4a4c-a4f7-853cc48b9e7a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11581 |
+
2301.08xxx/2301.08210/db2db6bc-7976-4707-985e-20b5b92e7474_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11582 |
+
2301.08xxx/2301.08227/e6078782-0ced-448a-b7f4-5071fdf25e34_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11583 |
+
2301.08xxx/2301.08230/480d86c7-968a-4660-ac4e-07c4f03a11a5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11584 |
+
2301.08xxx/2301.08243/269eb592-801f-4f4d-8f44-a00d0700277e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11585 |
+
2301.08xxx/2301.08247/0f1952e2-7ec6-4af0-b719-266a38c3d322_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11586 |
+
2301.08xxx/2301.08330/2841d028-8025-43c3-b236-398ea3877186_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11587 |
+
2301.08xxx/2301.08343/ab1ee4e1-a88b-443b-b152-2ba4abfd530e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11588 |
+
2301.08xxx/2301.08419/c368ade4-079c-49b6-8a9d-438fb4901c53_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11589 |
+
2301.08xxx/2301.08525/18616713-5722-474c-b657-7c6698f6cbfa_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11590 |
+
2301.08xxx/2301.08556/6ec13738-0d9e-4e3f-890f-d04f69cdcddc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11591 |
+
2301.08xxx/2301.08571/abd29906-41ad-4431-ae2c-6be629f8abd5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11592 |
+
2301.08xxx/2301.08653/7102618a-26b6-40e5-966b-e389260b69de_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11593 |
+
2301.08xxx/2301.08721/2fcb10a3-96d4-4f69-9b4c-ff628513ae60_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11594 |
+
2301.08xxx/2301.08727/08882b91-5c61-4841-a117-58002b64ff29_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11595 |
+
2301.08xxx/2301.08730/eebda3f7-1357-4834-9a1e-94db68cb9d67_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11596 |
+
2301.08xxx/2301.08739/c00b6ad7-9010-43f7-a3aa-2131c3a2c207_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11597 |
+
2301.08xxx/2301.08745/1a34ac7d-2261-46fc-a61e-ee7ed1fbea0d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11598 |
+
2301.08xxx/2301.08801/dcac17fe-d7e5-42a2-9657-f6b398f88256_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11599 |
+
2301.08xxx/2301.08826/bda15a09-6727-4e58-aaa3-49dd405e02d9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11600 |
+
2301.08xxx/2301.08859/734106a2-c78d-4a84-bb7c-85b90ac60fc8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11601 |
+
2301.08xxx/2301.08863/555ddf71-ca6e-46d2-b00d-b2b5426bc10d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11602 |
+
2301.08xxx/2301.08871/cc4eeddb-3199-4d75-b3da-2de17a6f6611_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11603 |
+
2301.08xxx/2301.08881/97deb75e-47d8-4ac9-b8a8-0de32ab23592_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11604 |
+
2301.10xxx/2301.10009/7cf7c42b-3065-41b3-8d3e-0df540d0a6a2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11605 |
+
2301.11xxx/2301.11798/0596d9d4-ac78-41b4-ac49-76c5e295c3ee_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11606 |
+
2302.05xxx/2302.05446/b607bcfb-e70d-47ff-941f-8344d9b9e7e1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 11607 |
+
2303.01xxx/2303.01983/ebf3b923-2b29-49ff-8ca6-10580a2646a5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2301.07xxx/2301.07041/6205e212-76b4-448b-8e7a-3afedc0f900b_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07041/6205e212-76b4-448b-8e7a-3afedc0f900b_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07041/6205e212-76b4-448b-8e7a-3afedc0f900b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:92c42f98ac38950c0d0b322a805a9fc02332b84569ccca9d8d2f67cdf96bf2d0
|
| 3 |
+
size 312536
|
2301.07xxx/2301.07041/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07041/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:05eea93ecd7e18368ab397a82e200a69f2d173be9aed24630a585413345b6620
|
| 3 |
+
size 228277
|
2301.07xxx/2301.07041/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07067/5d614258-f6ef-41d8-9f9c-214fcaff1ae1_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07067/5d614258-f6ef-41d8-9f9c-214fcaff1ae1_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07067/5d614258-f6ef-41d8-9f9c-214fcaff1ae1_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8851e5c72a88b1ed11f67e505dc24e5caad93d5080e53899e9dc32451fb53143
|
| 3 |
+
size 1309210
|
2301.07xxx/2301.07067/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07067/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc7b668d8649108ff5f696da7e1318367eb75e45de001ddb074c01bba1005c0f
|
| 3 |
+
size 1707684
|
2301.07xxx/2301.07067/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07069/de66c068-ed86-43dd-884a-8341256d9fc9_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07069/de66c068-ed86-43dd-884a-8341256d9fc9_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07069/de66c068-ed86-43dd-884a-8341256d9fc9_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0051360b837eac99a3b01525e1f2f847410f36fe26d62c362c32eba087976c05
|
| 3 |
+
size 946811
|
2301.07xxx/2301.07069/full.md
ADDED
|
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Prompting Large Language Model for Machine Translation: A Case Study
|
| 2 |
+
|
| 3 |
+
Biao Zhang Barry Haddow Alexandra Birch
|
| 4 |
+
|
| 5 |
+
School of Informatics, University of Edinburgh b.zhang@ed.ac.uk, bhaddow@inf.ed.ac.uk, a.birch@ed.ac.uk
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Research on prompting has shown excellent performance with little or even no supervised training across many tasks. However, prompting for machine translation is still under-explored in the literature. We fill this gap by offering a systematic study on prompting strategies for translation, examining various factors for prompt template and demonstration example selection. We further explore the use of monolingual data and the feasibility of cross-lingual, cross-domain, and sentence-to-document transfer learning in prompting. Extensive experiments with GLM-130B (Zeng et al., 2022) as the testbed show that 1) the number and the quality of prompt examples matter, where using suboptimal examples degenerates translation; 2) several features of prompt examples, such as semantic similarity, show significant Spearman correlation with their prompting performance; yet, none of the correlations are strong enough; 3) using pseudo parallel prompt examples constructed from monolingual data via zero-shot prompting could improve translation; and 4) improved performance is achievable by transferring knowledge from prompt examples selected in other settings. We finally provide an analysis on the model outputs and discuss several problems that prompting still suffers from.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Large language models (LLMs) pretrained on massive unlabeled corpora have shown impressive emergent abilities under model scaling which enable prompting for downstream applications (Brown et al., 2020; Kaplan et al., 2020; Wei et al., 2022b; Zhang et al., 2022a; Chowdhery et al., 2022). Different from task-specific finetuning, prompting constructs task-specific prompts by rephrasing test examples with descriptive task instructions and executes the task by feeding prompts to LLMs directly. It can be further enhanced
|
| 14 |
+
|
| 15 |
+
through in-context learning by providing a few labeled examples (or prompt examples) as a demonstration (Brown et al., 2020). As a new paradigm, prompting LLMs has achieved state-of-the-art performance over a range of natural language processing (NLP) tasks (Chung et al., 2022; Goyal et al., 2022; Wei et al., 2022c; Chowdhery et al., 2022).
|
| 16 |
+
|
| 17 |
+
In this paper, we focus on prompting LLMs for machine translation (MT). MT represents a complex task requiring transforming a source input into its semantically equivalent target output in a different language, which combines sequence understanding and generation. It offers a unique platform to assess the cross-lingual generation capability of LLMs, and the assessment may shed light on pretraining/finetuning algorithm design for achieving universal LLMs (Chowdhery et al., 2022). While a few studies have reported translation results (Brown et al., 2020; Chowdhery et al., 2022), a systematic study on how prompting works for MT is still missing in the literature.
|
| 18 |
+
|
| 19 |
+
We aim at filling this gap by thoroughly examining different prompting setups using the recently released GLM-130B (Zeng et al., 2022), particularly concerning three aspects: the prompting strategy, the use of unlabeled/monolingual data, and the feasibility of transfer learning. Prompting has shown varying sensitivity to the choice of prompt templates and examples (Zhao et al., 2021). For MT, prior studies adopted different templates (Brown et al., 2020; Wei et al., 2022a; Chowdhery et al., 2022), and we reevaluate them to figure out the optimal one. We further design a set of features for prompt examples and explore which one(s) could explain the prompting performance, according to which we develop the example selection strategy.
|
| 20 |
+
|
| 21 |
+
Since leveraging monolingual data to improve MT has long been of interest, we would like to determine whether and how such data can be used in prompt example construction. We make a step in this direction by studying the effect of data aug-
|
| 22 |
+
|
| 23 |
+
mentation using back-/forward-translation (Sennrich et al., 2016b; Zhang and Zong, 2016) via zero-shot prompting. In addition, neural MT and pretrained LLMs have shown encouraging transfer abilities (Devlin et al., 2019; Arivazhagan et al., 2019; Zhang et al., 2020; Xue et al., 2021) but transfer learning for prompting has received little attention. Whether prompt examples are transferable across different settings, such as from one domain/language pair to another and from sentence-level examples to document-level translation, is yet to be addressed.
|
| 24 |
+
|
| 25 |
+
We address the above concerns with GLM-130B as the testbed and conduct extensive experiments on FLORES and WMT evaluation sets. We mainly study translation for three languages: English, German and Chinese. We also provide a quantitative and qualitative analysis to disclose problems when prompting for MT, which might offer insights for future study. Our main findings are listed as below:
|
| 26 |
+
|
| 27 |
+
- Prompting performance varies greatly across templates, and language-specific templates mainly work when translating into languages. LLMs are pretrained on. An English template in a simple form works best for MT.
|
| 28 |
+
- Several features of prompt examples, such as sequence length, language model score, and semantic similarity, correlate significantly with its prompting performance while the correlation strength is weak in general. Selecting examples based on these features can outperform the random strategy, but not consistently.
|
| 29 |
+
- Using monolingual examples for prompting hurts translation. By contrast, constructing pseudo parallel examples via back-/forward-translation is a good option. Back-translation performs better and is more robust.
|
| 30 |
+
- Prompting shows some degree of transferability. Using demonstrations from other settings can improve translation over the zero-shot counterpart, while the superiority of a demonstration in one setting can hardly generalize to another.
|
| 31 |
+
- Prompting for MT still suffers from copying, mistranslation of entities, hallucination, inferior direct non-English translation, and prompt trap where translating the prompt itself via prompting becomes non-trivial.
|
| 32 |
+
|
| 33 |
+
# 2 Setup
|
| 34 |
+
|
| 35 |
+
Prompting for MT Given a pretrained and fixed LLM $\mathcal{L}$ , MT prompting first converts each test input $X$ to a prompt according to a template $\mathcal{T}$ and then generate the translation $Y$ by feeding the prompt to $\mathcal{L}$ . In this study, we consider zero-shot and few-shot prompting for translation.
|
| 36 |
+
|
| 37 |
+
Zero-shot prompting only has access to the test input $X$ , while few-shot prompting assume that a few extra labeled examples (or prompt/demonstration examples) $\mathcal{D}^P = \{X_i', Y_i'\}_{i=1}^K$ are available and can be used as a demonstration. Particularly, we adopt the following template for zero-shot prompting based on the results in Section 3:
|
| 38 |
+
|
| 39 |
+
$$
|
| 40 |
+
[ \text {s r c} ]: X [ \text {t g t} ]: \tag {1}
|
| 41 |
+
$$
|
| 42 |
+
|
| 43 |
+
where [src] and [tgt] denote test language(s), i.e., the source and target language name of the test language pair, respectively. For few-show prompting, we concatenate the given prompt examples:
|
| 44 |
+
|
| 45 |
+
$$
|
| 46 |
+
\begin{array}{l} [ \text {p s r c} ]: X _ {1} ^ {\prime} [ \text {p t g t} ]: Y _ {1} ^ {\prime} \dots [ \text {p s r c} ]: X _ {K} ^ {\prime} \\ [ p t g t ]: Y _ {K} ^ {\prime} [ s r c ]: X [ t g t ]: \tag {2} \\ \end{array}
|
| 47 |
+
$$
|
| 48 |
+
|
| 49 |
+
where [psrc] and [ptgt] denote prompt language(s), i.e., the source and target language name of the prompt example, respectively. By default, prompt examples and test data are in the same language pair. However, when considering crosslingual transfer for prompting, prompt examples might be in a different language pair.
|
| 50 |
+
|
| 51 |
+
We also explore template language, which denotes the language in which the template is expressed. For example, the Chinese template “中文: $X$ 英文:” represents the Chinese counterpart of the following English template “Chinese: $X$ English:”.
|
| 52 |
+
|
| 53 |
+
Setting We experiment with GLM-130B, a LLM with 130B parameters pretrained on Chinese and English monolingual corpora, which was reported to outperform GPT-3 and OPT-175B on several NLP tasks (Zeng et al., 2022). Note GLM-130B is a raw LLM without any further finetuning. We use its INT4-quantized version, which is more affordable and suffers little performance degradation. We adopt beam search for MT with a beam size of 2, and perform experiments with 4 RTX 3090 and A100-40G GPUs.
|
| 54 |
+
|
| 55 |
+
We work on three languages: English (En), German (De), and Chinese (Zh). We perform major
|
| 56 |
+
|
| 57 |
+
<table><tr><td rowspan="2">ID</td><td rowspan="2">Template (in English)</td><td colspan="2">English</td><td colspan="2">German</td><td colspan="2">Chinese</td></tr><tr><td>w/o</td><td>w/</td><td>w/o</td><td>w/</td><td>w/o</td><td>w/</td></tr><tr><td>A</td><td>[src]: [input]◇[tgt]:</td><td>38.78</td><td>31.17</td><td>-26.15</td><td>-16.48</td><td>14.82</td><td>-1.08</td></tr><tr><td>B</td><td>[src]◇[tgt]:</td><td>-88.62</td><td>-85.35</td><td>-135.97</td><td>-99.65</td><td>-66.55</td><td>-85.84</td></tr><tr><td>C</td><td>[src]◇Translate to [tgt]:</td><td>-87.63</td><td>-68.75</td><td>-106.30</td><td>-73.23</td><td>-63.38</td><td>-70.91</td></tr><tr><td>D</td><td>[src]◇Translate from [src] to [tgt]:</td><td>-113.80</td><td>-89.16</td><td>-153.80</td><td>-130.65</td><td>-76.79</td><td>-67.71</td></tr><tr><td>E</td><td>[src]: [input]◇Translate to [tgt]:</td><td>20.81</td><td>16.69</td><td>-24.33</td><td>-5.68</td><td>-8.61</td><td>-30.38</td></tr><tr><td>F</td><td>[src]: [input]◇Translate from [src] to [tgt]:</td><td>-27.14</td><td>-6.88</td><td>-34.36</td><td>-9.22</td><td>-32.22</td><td>-44.95</td></tr></table>
|
| 58 |
+
|
| 59 |
+
Table 1: COMET scores averaged over 6 language pairs for zero-shot prompting with different templates and different template languages on Wiki Ablation sets. w/ and w/o denote whether adding line breaks into the template or not; $\diamond$ indicates the position of the line break. [src] and [tgt] denote source and target test language name, respectively, and [input] denotes the test input; all of them are placeholders. English, German and Chinese indicate template languages. Best results are shown in bold.
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
Figure 1: COMET scores for few-shot prompting as a function of the number of prompt examples ( $K = 1, 5, 10, 20$ ) on Wiki Ablation sets. For each setup, we randomly sample 100 times from the example pool and show the performance distribution via box plots. Dashed red line denotes the zero-shot baseline; blue curve and shadow area denote the mean and standard deviation.
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
|
| 70 |
+
analysis on FLORES (Wiki domain, En-De-Zh, NLLB Team et al., 2022) and WMT21 (News domain, En-De, En-Zh, Akhbardeh et al., 2021), and also report results on Multi-Domain (IT, Law and Medical domain, De-En, Aharoni and Goldberg, 2020) to examine domain robustness and transfer ability, and PDC (News domain, $\mathrm{Zh}\rightarrow \mathrm{En}$ , Sun et al., 2022) for document-level translation. To understand the relation between prompt examples and their prompting performance, we construct an Ablation set for Wiki, WMT and Multi-Domain (IT and Medical) based on the dev set of FLORES, WMT21 and Multi-Domain, separately, where we randomly sample 100 instances as the ablation test set and use the rest as the default example selection pool. To distinguish, we will refer to the official dev and test set as Full set. Detailed statistics are listed in Table 9, Appendix.
|
| 71 |
+
|
| 72 |
+
We evaluate translation performance using both a surface-based metric, detokenized BLEU $\uparrow$ from SacreBLEU (Post, 2018), and a model-based metric, COMET $\uparrow$ from un babel-comet with wmt20-comet-da (Rei et al., 2020).
|
| 73 |
+
|
| 74 |
+
# 3 Prompting Strategy for MT
|
| 75 |
+
|
| 76 |
+
To perform MT, prompting needs to cast the translation problem into a language modeling problem
|
| 77 |
+
|
| 78 |
+
via the prompt. Thus, the format of the prompt, including its wording, directly affects how LLM understands the task and its behavior. For MT, we are interested in the following research questions:
|
| 79 |
+
|
| 80 |
+
- Which template should we use for MT prompting? And what language for the template?
|
| 81 |
+
- Does demonstration matter for MT prompting? How to select optimal prompt examples?
|
| 82 |
+
|
| 83 |
+
We address them through extensive experiments on Wiki Ablation sets.
|
| 84 |
+
|
| 85 |
+
Zero-shot prompting performance varies greatly across templates. We start with zero-shot prompting and explore the effect of different templates. Depending on how to describe MT and partially inspired by prior studies (Brown et al., 2020; Chowdhery et al., 2022; Wei et al., 2022a), we compare 6 templates and evaluate them on the Wiki Ablation sets covering 6 language pairs (En $\leftrightarrow$ De, En $\leftrightarrow$ Zh, De $\leftrightarrow$ Zh). Table 1 shows the results (we list detailed results in Table 10, Appendix). The template affects zero-shot quality substantially, and the simple template $\mathbb{A}$ in English specifying just the source and target language name achieves the best overall results. In follow-up experiments, we thus focus on template $\mathbb{A}$ .
|
| 86 |
+
|
| 87 |
+
<table><tr><td rowspan="2">Feature</td><td colspan="2">BLEU</td><td colspan="2">COMET</td></tr><tr><td>HQ</td><td>+ LQ</td><td>HQ</td><td>+ LQ</td></tr><tr><td>SLength</td><td>0.21</td><td>0.31</td><td>0.14</td><td>0.26</td></tr><tr><td>TLength</td><td>0.23</td><td>0.32</td><td>0.17</td><td>0.29</td></tr><tr><td>LMScore</td><td>0.20</td><td>0.33</td><td>0.14</td><td>0.31</td></tr><tr><td>MTScore</td><td>0.04</td><td>0.14</td><td>0.11</td><td>0.19</td></tr><tr><td>SemScore</td><td>0.19</td><td>0.30</td><td>0.16</td><td>0.30</td></tr><tr><td>CaseSemScore-Src</td><td>0.14</td><td>0.29</td><td>0.11</td><td>0.28</td></tr><tr><td>CaseSemScore-Tgt</td><td>0.14</td><td>0.30</td><td>0.14</td><td>0.31</td></tr></table>
|
| 88 |
+
|
| 89 |
+
Table 2: Spearman's $\rho$ between demonstration features and their prompting performance for 1-shot prompting on Wiki Ablation sets. We randomly sample 600 demonstrations from each pool to calculate the correlation. ${HQ}$ : examples are from the default high-quality pool; ${LQ}$ : examples are from the low-quality pool based on WikiMatrix.v1.
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
Figure 2: Visualization between COMET and LMScore for $I$ -shot prompting on Wiki Ablation sets. While correlations are significant, data points are scattered like clouds.
|
| 93 |
+
|
| 94 |
+
Language-specific template delivers mixed results. Table 1 also shows the prompting results of German and Chinese templates, which often largely underperform their English counterparts. Since German is not a major pretraining language in GLM-130B, a German template degenerates the translation substantially. By contrast, a Chinese template yields improved performance when translating into Chinese (see Table 10). Still, an English template works best on average.
|
| 95 |
+
|
| 96 |
+
The preference of GLM-130B to English template also shows that the level of language understanding and cross-lingual ability in GLM-130B varies across languages, even though it's pretrained on the same amount of monolingual Chinese and English tokens. This might be caused by the fact that English is used more globally than Chinese, but might also suggest that improving the language understanding of LLM requires more advanced training algorithms beyond scaling training data.
|
| 97 |
+
|
| 98 |
+
Using more prompt examples for demonstration improves translation significantly on aver
|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
Figure 3: Inference time per token in seconds for zero-/few-shot prompting on Wiki En-De Ablation sets. Numbers are averaged over 3 runs with 3 distinct demonstrations on 4 A100-40G GPUs.
|
| 102 |
+
|
| 103 |
+
age. We next study few-shot prompting following the template $\mathbb{A}$ but in format (2) with $K$ varying from 1 to 20. We evaluate multiple demonstrations for each $K$ via random sampling to reduce data biases. Figure 1 shows that the more examples used, the better average performance (more results are shown in Figure 5, Appendix), albeit at the cost of using more GPU memory and increasing the inference time per token as in Figure 3.
|
| 104 |
+
|
| 105 |
+
The performance of demonstration is not stable. However, we also see high performance variance under the same $K$ . It's possible that a demonstration with 5 examples outperforms its 10 or 20 counterpart. Figure 1 also shows that 1-shot prompting underperforms zero-shot prompting in many cases, even on average. This echoes with previous findings on other NLP tasks (Zhao et al., 2021; Liu et al., 2022) and also highlights the significance of developing effective example selection strategies.
|
| 106 |
+
|
| 107 |
+
Note that few-shot prompting greatly improves translation into Chinese. The reason based on our manual analysis is that the zero-shot baseline tends to translate into traditional Chinese with messy codes, where prompt examples help (the reference text is always simplified Chinese).
|
| 108 |
+
|
| 109 |
+
Several features correlate with prompting performance significantly yet weakly. We thus turn to explore example selection for prompting. Our idea is to extract a couple of diverse features from demonstration and examine whether any of them are informative enough to be used as an indicator for the selection. In this study, we simplify our analysis by focusing on 1-shot prompting, which ignores the ordering of prompt examples (we leave few-shot analysis to future). Particularly, we extract and analyze 7 features of a demonstration:
|
| 110 |
+
|
| 111 |
+
S(T)Length the number of source (target) tokens;
|
| 112 |
+
|
| 113 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Wiki</td><td colspan="2">WMT</td></tr><tr><td>BLEU</td><td>COMET</td><td>BLEU</td><td>COMET</td></tr><tr><td>Zero-Shot</td><td>24.08</td><td>33.92</td><td>20.38</td><td>17.97</td></tr><tr><td colspan="5">1-Shot Translation (high-quality pool)</td></tr><tr><td>Random</td><td>26.31</td><td>48.29</td><td>21.27</td><td>30.70</td></tr><tr><td>SemScore</td><td>26.73</td><td>49.34</td><td>21.82</td><td>31.28</td></tr><tr><td>LMScore</td><td>26.48</td><td>47.92</td><td>21.59</td><td>30.81</td></tr><tr><td>TLength</td><td>26.54</td><td>48.73</td><td>21.29</td><td>30.68</td></tr><tr><td colspan="5">5-Shot Translation (high-quality pool)</td></tr><tr><td>Random</td><td>27.46</td><td>51.11</td><td>21.82</td><td>33.87</td></tr><tr><td>SemScore</td><td>27.36</td><td>51.66</td><td>22.37</td><td>34.30</td></tr><tr><td>LMScore</td><td>27.17</td><td>50.65</td><td>22.04</td><td>35.19</td></tr><tr><td>TLength</td><td>27.08</td><td>50.50</td><td>21.75</td><td>34.29</td></tr><tr><td colspan="5">1-shot Translation (Low-quality Pool)</td></tr><tr><td>Random</td><td>24.75</td><td>38.86</td><td>22.06</td><td>30.70</td></tr><tr><td>Ours</td><td>24.94</td><td>39.88</td><td>22.23</td><td>30.87</td></tr></table>
|
| 114 |
+
|
| 115 |
+
Table 3: BLEU and COMET scores for zero-shot and few-shot prompting on Wiki and WMT Full sets with different selection strategies. Ours: the proposed combined strategy; Random: random sampling; SemScore, LMScore and TLength denote selecting top-ranked examples based on the corresponding feature values. We select 3 demonstrations for each translation direction and report average performance; the final score is further averaged over different language pairs. Underlined results denote the best in each section, while Bold results are the overall best.
|
| 116 |
+
|
| 117 |
+
LMScore GLM-130B-based, length-normalized log likelihood of the demonstration;
|
| 118 |
+
|
| 119 |
+
MTScore translation quality of the prompt example from COMET QE model wmt20-comet-que-da (Rei et al., 2020);
|
| 120 |
+
|
| 121 |
+
SemScore semantic score based on the cosine similarity of the demonstration's source and target sentence embeddings from LASER2 (Heffernan et al., 2022);
|
| 122 |
+
|
| 123 |
+
CaseSemScore-Src similarity to the input that averages over SemScores between the test input and the demonstration's source;
|
| 124 |
+
|
| 125 |
+
CaseSemScore-Tgt similar to CaseSemScore-Src but compares to demonstration's target;
|
| 126 |
+
|
| 127 |
+
We sample multiple demonstrations randomly and inspect the Spearman's correlation between feature values and prompting performance. We consider high-quality and low-quality pool for sampling.
|
| 128 |
+
|
| 129 |
+
Table 2 summarizes the results and Figure 2 illustrates the relation between COMET and LMScore (more results are given in Table 11 and Figures 6, 7, Appendix). With the high-quality pool, different demonstrations yield similar translation results
|
| 130 |
+
|
| 131 |
+
(see blue points) despite their feature values varying greatly. Several features show insignificant and inconsistent correlation, particularly for $\mathrm{De}\rightarrow \mathrm{En}$ and $\mathrm{Zh}\rightarrow \mathrm{En}$ . This suggests developing selection policy for high-quality example pool is non-trivial.
|
| 132 |
+
|
| 133 |
+
After mixing with demonstrations from the low-quality pool, the significance gets strengthened. LMScore and CaseSemScore-Tgt shows the highest correlation on average followed by TLength and SemScore. MTScore behaves much worse which might be caused by its instability on sentence-level evaluation (Moghe et al., 2022). However, we didn't see significant difference in terms of Spearman's $\rho$ between input-relevant and input-agnostic features (Agrawal et al., 2022), neither among surface-based, LLM-based or semantic-based features. Surprisingly, the simple feature, S/TLength, yields reasonably high correlation. We argue that long examples could offer LLM with more signals about the task's input and output space. This finding suggests that researchers should select long unlabeled sentences for annotation to improve prompting. Yet, most Spearman's $\rho$ s are much smaller than 0.5, indicating a weak/fragile relation.
|
| 134 |
+
|
| 135 |
+
In general, selecting prompt examples of high translation quality, high semantic similarity, high LLM likelihood, long sequence length and high similarity to test inputs are all preferable strategies. Unfortunately, none of them can guarantee optimal translation performance.
|
| 136 |
+
|
| 137 |
+
Using prompt examples selected based on the proposed features yields improved performance. We next verify the above findings on the Full sets. We explore selection strategies based on SemScore, LMScore and TLength (i.e. use top-ranked examples) as they show high average correlation. We didn't analyze CaseSemScore-Tgt as it's more complicated and doesn't make significant difference. Note we excluded too long (more than 100 tokens) or too short (less than 10 tokens) examples during selection. We also consider 5-shot prompting, where we concatenate top-ranked 5 examples in an ascending order (Liu et al., 2022).
|
| 138 |
+
|
| 139 |
+
Table 3 shows that, with high-quality pool, adopting the feature-based strategy is likely to outperform the random baseline, and the SemScore-based strategy performs well across different settings (detailed results are available in Table 13 and 14, Appendix). These strategies also generalize to 5-shot prompting to some extent. For selection from low-quality pool, we propose a combined strategy: we
|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
Figure 4: COMET scores for few-shot prompting with monolingual data on Wiki Ablation sets. Random Example: random sentence pairs; Source/Target Example Only: only use source or target data for prompting; Source/Target Example Aug: use pseudo-parallel data instead constructed via zero-shot prompting. For each setup, we randomly sample 50 demonstrations and report average performance.
|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
|
| 158 |
+
first choose top-11K examples according to SemScore to filter out poor examples, the top-1K of which are also dropped as they tend to be uninformative (see Table 12 in Appendix); then we re-rank the rest with LMScore and retain top-1K examples, upon which we further apply the TLength-based strategy. In Table 3, this combined strategy outperforms the random one by varying degrees.
|
| 159 |
+
|
| 160 |
+
# 4 Monolingual Data for Prompting
|
| 161 |
+
|
| 162 |
+
A longstanding concern in MT is how to utilize unlabeled data to improve translation. While prompting enables few-shot learning reducing the data requirement, exploring whether demonstration could benefit from monolingual examples is still valuable, both for MT study and for understanding of the role of demonstration in prompting.
|
| 163 |
+
|
| 164 |
+
Min et al. (2022) argue that the key role of demonstration lies in its support of the input space, the label space and the prompt format, rather than the genuineness of the examples. They found that randomly replacing labels in demonstration barely hurts performance on classification tasks. We reexamine this argument in the context of MT by studying the following three prompting settings: 1) random examples constructing sentence pairs from monolingual sources and targets randomly; 2) source/target example only using monolingual source/target alone for prompting.
|
| 165 |
+
|
| 166 |
+
Directly using monolingual data for demonstration doesn't work. Figure 4 (top) shows a totally different story (see Figures 8 and 9 in Ap-
|
| 167 |
+
|
| 168 |
+
pendix for more results): monolingual example-based demonstration almost always hurts translation, and the more examples used, the more degeneration yielded. Using random examples misleads the prompting and performs the worst in general; compared to target-only examples, using source examples yields slightly better results except translating into Chinese. This indicates that the genuine source-target mapping should be retained in the demonstration, and also indicates that MT features unique challenges which deserves more attention when studying prompting.
|
| 169 |
+
|
| 170 |
+
Pseudo parallel examples by forward-/back-translation benefits prompting. Inspired by data augmentation in MT (Sennrich et al., 2016b; Zhang and Zong, 2016), we next resort to constructing pseudo parallel data. We first adopt GLM-130B to translate the source or target examples via zero-shot prompting, and then use the generated parallel examples as demonstration. Despite low quality, Figure 4 (bottom) shows that this is an effective way to improve prompting, and using more examples often produces better results. We also observe that back-translation (i.e. translating target monolingual examples) performs better and behaves more robustly than forward-translation (i.e. translating source examples instead), which even approaches prompting with real parallel examples.
|
| 171 |
+
|
| 172 |
+
# 5 Transfer Learning for Prompting
|
| 173 |
+
|
| 174 |
+
After obtaining a performant demonstration, we are interested in to what extent its capability
|
| 175 |
+
|
| 176 |
+
<table><tr><td rowspan="2">Setting</td><td colspan="2">Correlation</td><td colspan="2">Δ Quality</td></tr><tr><td>BLEU</td><td>COMET</td><td>BLEU</td><td>COMET</td></tr><tr><td>Source Shared</td><td>0.08</td><td>0.10</td><td>+0.59</td><td>+7.03</td></tr><tr><td>Target Shared</td><td>0.20</td><td>0.24</td><td>+1.32</td><td>+9.67</td></tr><tr><td>Reversed</td><td>0.15</td><td>0.06</td><td>+1.41</td><td>+11.56</td></tr></table>
|
| 177 |
+
|
| 178 |
+
could be transferred across different settings, especially from one domain/language pair to another and from sentence-level to document-level translation. While previous studies demonstrate the feasibility with continuous prompts on classification tasks (Wang et al., 2021), transfer for hard prompting on MT has never been investigated.
|
| 179 |
+
|
| 180 |
+
Assume that demonstrations $D_{1}$ and $D_{2}$ are selected in setting $S_{1}$ and that $D_{1}$ performs better (i.e. $D_{1} > D_{2}$ ). We have the following research questions:
|
| 181 |
+
|
| 182 |
+
- Could we also expect $D_{1} > D_{2}$ in setting $S_{2}$ ?
|
| 183 |
+
- Whether using demonstrations from $S_{1}$ could outperform zero-shot prompting in $S_{2}$ ?
|
| 184 |
+
|
| 185 |
+
We next study these questions through experiments with 1-shot prompting.
|
| 186 |
+
|
| 187 |
+
The superiority of a demonstration doesn't generalize across settings. If the ranking $D_{1} > D_{2}$ holds across settings, the results of the same set of demonstrations in different settings should show high and significant Spearman's correlation. Unfortunately, the correlations in Table 4 and 5 are very weak and often insignificant (more results are given in Table 15, 16, and 17), even for the same language pairs in different directions (Reversed) and for similar domains (Wiki $\Rightarrow$ WMT). This suggests that we will need setting-specific demonstration to get the optimal translation quality.
|
| 188 |
+
|
| 189 |
+
Using out-of-setting demonstrations can benefit translation. However, we can still gain from using out-of-setting demonstrations as demonstrated by the positive gains in Table 4 and 5, where we
|
| 190 |
+
|
| 191 |
+
Table 4: Spearman's $\rho$ and relative performance for crosslingual transfer under $I$ -shot prompting on Wiki Ablation sets (among En, De and Zh). When studying transfer from language pair $S_{1}$ to $S_{2}$ , we randomly sample 300 demonstrations from the default pool of $S_{1}$ , and then evaluate them on the Ablation test sets for $S_{1}$ and $S_{2}$ respectively, based on which we compute the correlation. The performance is also averaged. $\Delta$ Quality: relative quality against the zero-shot baseline. Blue cells indicate positive gains. Source/Target Shared: average result for transfer settings where the source/target language is shared; Reversed: average result for the same language pair but in different directions.
|
| 192 |
+
|
| 193 |
+
<table><tr><td colspan="2">Transfer from Wiki to ⇒</td><td>WMT</td><td>IT</td><td>Medical</td></tr><tr><td rowspan="2">Correlation</td><td>En→De</td><td>0.09</td><td>0.14</td><td>0.27‡</td></tr><tr><td>De→En</td><td>0.23‡</td><td>0.20‡</td><td>0.13</td></tr><tr><td rowspan="2">Δ Quality</td><td>En→De</td><td>+4.00</td><td>+19.52</td><td>+7.80</td></tr><tr><td>De→En</td><td>+0.10</td><td>+19.46</td><td>+1.24</td></tr></table>
|
| 194 |
+
|
| 195 |
+
Table 5: Spearman's $\rho$ and relative performance (in COMET) for cross-domain transfer under 1-shot prompting. We explore transfer from Wiki to Multi-Domain using the Ablation sets. Correlation and performance are calculated in the same way as in cross-lingual transfer, except that we sample 200 demonstrations. ${}^{ \ddagger }$ : statistically significant at $p < {0.01}$ ; Gray cells indicate insignificance.
|
| 196 |
+
|
| 197 |
+
<table><tr><td>Method</td><td>d-BLEU</td><td>TC</td><td>CP</td><td>PT</td><td>TCP</td></tr><tr><td>Zero-Shot</td><td>30.2</td><td>47.5</td><td>38.7</td><td>41.6</td><td>42.4</td></tr><tr><td>SemScore</td><td>30.5</td><td>53.0</td><td>34.4</td><td>43.2</td><td>42.9</td></tr><tr><td>LMScore</td><td>30.5</td><td>53.0</td><td>36.8</td><td>42.9</td><td>43.7</td></tr></table>
|
| 198 |
+
|
| 199 |
+
Table 6: Results for transfer learning from sentence-level demonstration to document-level translation under 1-shot prompting on PDC $\mathrm{{Zh}} \rightarrow \mathrm{{En}}$ Full sets. We split each test document in PDC into non-overlapped chunks, each of which contains about 4 sentences. SemScore/LMScore: prompt example selection strategy; we apply them to PDC's default pool. We select 3 demonstrations and report average performance. d-BLEU: document-level BLEU; TC/CP/PT/TCP(↑): document-specific metrics proposed in (Sun et al., 2022).
|
| 200 |
+
|
| 201 |
+
find that transfer in target-shared and reversed settings is relatively easier, and that transfer across distant domains can be successful particularly when in setting example pool is of low quality. This is also supported by the transfer to document-level translation, where both BLEU and document-specific evaluation get improved as shown in Table 6. Results in Table 19 show that the transfer is unstable and could deliver negative results, i.e. worse than zero-shot prompting, partially resonating with previous findings (Lin et al., 2021). We leave the question of how to select prompt examples in transfer learning setups to future.
|
| 202 |
+
|
| 203 |
+
# 6 Discussion
|
| 204 |
+
|
| 205 |
+
Although prompting enables translation with decent performance, it still suffers from many (well-known) problems. Here, we briefly explain the problems we observed from the model's outputs.
|
| 206 |
+
|
| 207 |
+
Prompting sometimes rejects translating the input. Instead, it emits either empty or off-target outputs, i.e. translating in a wrong target language. This occurs frequently when translating into Chinese, where the model often translates into traditional Chinese with messy codes, causing unstable performance. Besides overly relying on a language
|
| 208 |
+
|
| 209 |
+
<table><tr><td>Source</td><td>根据三江源国家公园管理局长江源园区可可西里管理处统计,藏羚羊回迁数量总体呈逐年上升态势,2019年藏羚羊回迁数量为4860只,比2018年增加338只。</td></tr><tr><td>Reference</td><td rowspan="2">Statistics from the Sanjiangyuan National Park Administration Yangtze River Origin Park Hoh Xil Management Office show that the number of Tibetan antelopes on the return migration route has been increasing each year, with 4,860 counted in 2019, an increase of 338 over 2018. According to the三江源国家公园管理局长江源园区可可西里管理处, the total number of re-migration of the Tibetan antelope has been on the rise since 2018, with 4,860 re-migrating in 2109, an increase of 338 compared to 2808.</td></tr><tr><td>GLM-130B (1-shot)</td></tr><tr><td>Prompt in Prompt</td><td>English: Dominic Raab has defended the Government's decision to re-introduce quarantine measures on Spain at short notice. Translate from English to Chinese: Chinese:</td></tr><tr><td>Reference</td><td>针对政府突然做出重新对西班牙实施隔离措施的决定,Dominic Raab做出了辩解。从英文翻译成中文:</td></tr><tr><td>GLM-130B (zero-shot)</td><td>多米尼克·拉布(Dominic Raab)对政府决定重新引入西班牙的检疫措施表示支持。Translate from English to Chinese:</td></tr></table>
|
| 210 |
+
|
| 211 |
+
Table 7: Case study of translation errors by prompting. Top: copying (in red), mistranslation of date (in blue), misunderstanding of source (wave lines); Bottom: prompt trap where the model fails to translate the prompt phrase (in bold).
|
| 212 |
+
|
| 213 |
+
<table><tr><td rowspan="2">Setting</td><td colspan="2">0-shot</td><td colspan="2">1-shot</td></tr><tr><td>De→Zh</td><td>Zh→De</td><td>De→Zh</td><td>Zh→De</td></tr><tr><td>Direct</td><td>2.80</td><td>10.05</td><td>47.23</td><td>11.75</td></tr><tr><td>Pivoting</td><td>19.23</td><td>19.53</td><td>48.25</td><td>25.31</td></tr></table>
|
| 214 |
+
|
| 215 |
+
Table 8: COMET scores for direct vs. pivoting translation for De $\leftrightarrow$ Zh on Wiki Full sets. In 1-shot prompting, we randomly sample 3 demonstrations and report average performance. Pivoting: source $\rightarrow$ English $\rightarrow$ target.
|
| 216 |
+
|
| 217 |
+
model, prompting tends to under-translate the input, copy source phrases, produce code-switched output, mistranslate entities (e.g. dates) and generate hallucination, as illustrated in Table 7.
|
| 218 |
+
|
| 219 |
+
We also observe a phenomenon specific to prompting: prompt trap where prompting behaves unpredictable when its input is mixed with prompt template phrases. In the second case in Table 7, the model copies the template phrases, rather than translating them into Chinese. This means that translating prompt itself (not just the input) becomes non-trivial, and that users may attack prompting-based translation systems by manipulating the input format.
|
| 220 |
+
|
| 221 |
+
We find that the translation quality between German and Chinese is very poor (see Table 13). We argue that the cross-lingual ability of GLM-130B mainly centers around English (although GLM-130B was pretrained on Chinese as well), and thus explore pivoting translation instead. Table 8 shows that pivoting through English greatly improves non-English translation. It's still unclear whether the current LLM pretraining recipe could achieve promising non-English-centric cross-lingual ability. We might need to consider adding parallel data into the LLM pretraining or finetuning.
|
| 222 |
+
|
| 223 |
+
# 7 Related Work
|
| 224 |
+
|
| 225 |
+
The capability of prompting heavily depends on its surface representation, where small modifications to the prompt could cause high variance in its performance. This inspires researchers to develop advanced prompting strategies to get the most from LLMs. Gao et al. (2021) proposed to generate prompt templates automatically using T5 (Xue et al., 2021) rather than adopting manual templates. Liu et al. (2022) reported selecting prompt examples close to the test input via a kNN-based retriever, Sorensen et al. (2022) resorted to an information-theoretic approach based on mutual information, while Zhang et al. (2022b) formulated example selection as a sequential decision problem and solved it by reinforcement learning. For reasoning tasks, Wei et al. (2022c) developed chain-of-thought (CoT) prompting letting the model output the intermediate reasoning steps, which inspires researchers to further explore CoT selection (Fu et al., 2022) and decomposition (Zhou et al., 2022). In contrast to the studies just mentioned, which focus on NLP tasks other than MT, we explore prompting strategies exclusively for translation.
|
| 226 |
+
|
| 227 |
+
Prompting uses instructions to guide LLMs, which is closely related to neural MT with special prefixes. In multilingual NMT, a target language tag is often appended to the source input to indicate the translation direction (Johnson et al., 2017; Arivazhagan et al., 2019; Zhang et al., 2020). Special attribute tags can also be used to control properties of the model output, such as politeness (Sennrich et al., 2016a), diversity (Shu et al., 2019), and quality (Caswell et al., 2019). Besides, retrieved phrases and sentences can be augmented to the input to improve translation quality (Zhang
|
| 228 |
+
|
| 229 |
+
et al., 2018; Gu et al., 2018). With the popularity of prompting LLMs, researchers see value in incorporating prompts into neural MT (Li et al., 2022; Tan et al., 2021; Garcia and First, 2022). Still, these methods rely on pretraining or finetuning the model rather than prompting frozen LLMs.
|
| 230 |
+
|
| 231 |
+
Very recently, concurrent to our work, Vilar et al. (2022) examined the capability of prompting PaLM for translation and discovered that prompting with high-quality examples even chosen randomly performs on par with or better than the one using input-relevant examples. By contrast, Agrawal et al. (2022) explored strategies to select input-specific examples, and observed that input-relevant examples based on n-gram overlap significantly improves the capability of prompts. Our study resonates with both their findings and also explains their conflict: while the quality and input-based semantic similarity correlate with prompting performance significantly, the correlation strength is unfortunately not strong enough so using them as indicators to select examples may produce mixed results. Note that apart from example selection, we also studied using monolingual data and transfer learning for MT prompting, which, to the best of our knowledge, have never been explored before.
|
| 232 |
+
|
| 233 |
+
# 8 Conclusion and Future Work
|
| 234 |
+
|
| 235 |
+
In this paper, we presented a systematic study on prompting for MT, exploring topics ranging from prompting strategy, the use of unlabelled monolingual data, to transfer learning. We found that prompt template and demonstration example selection both have substantial impact on translation. Some prompt example features correlate significantly with prompting performance; treating them as criteria for example selection benefits translation to some extent but not consistently as the correlations are not strong enough.
|
| 236 |
+
|
| 237 |
+
Prompting for MT requires retaining the source-target mapping signals in the demonstration. Directly applying monolingual data for prompting sounds interesting but doesn't work. Constructing pseudo parallel prompt examples by back-/forward-translation via zero-shot prompting is a simple yet effective solution. Regarding transfer learning, we saw positive results when applying a (sentence-level) demonstration to other domains, other language pairs or document-level translation. Unfortunately, the optimality of the demonstration doesn't generalize across settings and the transfer perfor
|
| 238 |
+
|
| 239 |
+
mance is also unstable. We argue that MT provides a set of unique challenges and call for more efforts on evaluating prompting LLMs for MT.
|
| 240 |
+
|
| 241 |
+
Prompting also faces a number of other issues, like off-target generation and prompt traps, which we plan to address in the future. We are also interested in examining whether our findings can generalize to other LLMs, like GPT-3, OPT and PaLM. We would also like to explore further how to improve the cross-lingual ability in LLM.
|
| 242 |
+
|
| 243 |
+
# Limitations
|
| 244 |
+
|
| 245 |
+
Our study heavily depends on the INT-4 quantized GLM-130B, which, unlike GPT and PaLM, was pretrained with both bidirectional and unidirectional training objectives. The quantization might weaken the model's capability and deteriorate some unknown aspects. It's unclear how our findings generalize to other pretrained LLMs. In addition, we mainly work on three languages due to resource constraints, and in experiments, results vary greatly across language pairs. Increasing the coverage of experimental languages would make the results more reliable.
|
| 246 |
+
|
| 247 |
+
# Acknowledgments
|
| 248 |
+
|
| 249 |
+
This work was funded by UK Research and Innovation (UKRI) under the UK government's Horizon Europe funding guarantee [grant number 10039436 -UTTER]. The computations described in this research were performed using the Baskerville Tier 2 HPC service (https://www.baskerville.ac.uk/). Baskerville was funded by the EPSRC and UKRI through the World Class Labs scheme (EP/T022221/1) and the Digital Research Infrastructure programme (EP/W032244/1) and is operated by Advanced Research Computing at the University of Birmingham.
|
| 250 |
+
|
| 251 |
+
# References
|
| 252 |
+
|
| 253 |
+
Sweta Agrawal, Chunting Zhou, Mike Lewis, Luke Zettlemoyer, and Marjan Ghazvininejad. 2022. Incontext examples selection for machine translation. arXiv preprint arXiv:2212.02437.
|
| 254 |
+
Roee Aharoni and Yoav Goldberg. 2020. Unsupervised domain clusters in pretrained language models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7747-7763, Online. Association for Computational Linguistics.
|
| 255 |
+
|
| 256 |
+
Farhad Akhbardeh, Arkady Arkhangorodsky, Magdalena Biesialska, Ondrej Bojar, Rajen Chatterjee, Vishrav Chaudhary, Marta R. Costa-jussa, Cristina España-Bonet, Angela Fan, Christian Federmann, Markus Freitag, Yvette Graham, Roman Grundkiewicz, Barry Haddow, Leonie Harter, Kenneth Heafield, Christopher Homan, Matthias Huck, Kwabena Amponsah-Kaakyire, Jungo Kasai, Daniel Khashabi, Kevin Knight, Tom Kocmi, Philipp Koehn, Nicholas Lourie, Christof Monz, Makoto Morishita, Masaaki Nagata, Ajay Nagesh, Toshiaki Nakazawa, Matteo Negri, Santanu Pal, Al-lahsera Auguste Tapo, Marco Turchi, Valentin Vydrin, and Marcos Zampieri. 2021. Findings of the 2021 conference on machine translation (wmt21). In Proceedings of the Sixth Conference on Machine Translation, pages 1-88, Online. Association for Computational Linguistics.
|
| 257 |
+
Naveen Arivazhagan, Ankur Bapna, Orhan First, Dmitry Lepikhin, Melvin Johnson, Maxim Krikun, Mia Xu Chen, Yuan Cao, George Foster, Colin Cherry, Wolfgang Macherey, Zhifeng Chen, and Yonghui Wu. 2019. Massively multilingual neural machine translation in the wild: Findings and challenges.
|
| 258 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems, volume 33, pages 1877-1901. Curran Associates, Inc.
|
| 259 |
+
Isaac Caswell, Ciprian Chelba, and David Grangier. 2019. Tagged back-translation. In Proceedings of the Fourth Conference on Machine Translation (Volume 1: Research Papers), pages 53-63, Florence, Italy. Association for Computational Linguistics.
|
| 260 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311.
|
| 261 |
+
Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Eric Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. 2022. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416.
|
| 262 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of
|
| 263 |
+
|
| 264 |
+
deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 265 |
+
Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, and Tushar Khot. 2022. Complexity-based prompting for multi-step reasoning. arXiv preprint arXiv:2210.00720.
|
| 266 |
+
Tianyu Gao, Adam Fisch, and Danqi Chen. 2021. Making pre-trained language models better few-shot learners. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3816-3830, Online. Association for Computational Linguistics.
|
| 267 |
+
Xavier Garcia and Orhan First. 2022. Using natural language prompts for machine translation. arXiv preprint arXiv:2202.11822.
|
| 268 |
+
Tanya Goyal, Junyi Jessy Li, and Greg Durrett. 2022. News summarization and evaluation in the era of gpt-3. arXiv preprint arXiv:2209.12356.
|
| 269 |
+
Jiatao Gu, Yong Wang, Kyunghyun Cho, and Victor O.K. Li. 2018. Search engine guided neural machine translation. In Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence and Thirtieth Innovative Applications of Artificial Intelligence Conference and Eighth AAAI Symposium on Educational Advances in Artificial Intelligence, AAAI'18/IAAI'18/EAAI'18. AAAI Press.
|
| 270 |
+
Kevin Heffernan, Onur Celebi, and Holger Schwenk. 2022. Bitext mining using distilled sentence representations for low-resource languages. arXiv preprint arXiv:2205.12654.
|
| 271 |
+
Melvin Johnson, Mike Schuster, Quoc Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Viégas, Martin Wattenberg, Greg Corrado, Macduff Hughes, and Jeffrey Dean. 2017. Google's multilingual neural machine translation system: Enabling zero-shot translation. Transactions of the Association for Computational Linguistics, 5(0):339-351.
|
| 272 |
+
Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361.
|
| 273 |
+
Yafu Li, Yongjing Yin, Jing Li, and Yue Zhang. 2022. Prompt-driven neural machine translation. In Findings of the Association for Computational Linguistics: ACL 2022, pages 2579-2590, Dublin, Ireland. Association for Computational Linguistics.
|
| 274 |
+
|
| 275 |
+
Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, et al. 2021. Few-shot learning with multilingual language models. arXiv preprint arXiv:2112.10668.
|
| 276 |
+
Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. 2022. What makes good in-context examples for GPT-3? In Proceedings of Deep Learning Inside Out (DeeLIO 2022): The 3rd Workshop on Knowledge Extraction and Integration for Deep Learning Architectures, pages 100–114, Dublin, Ireland and Online. Association for Computational Linguistics.
|
| 277 |
+
Sewon Min, Xinxi Lyu, Ari Holtzman, Mikel Artetxe, Mike Lewis, Hannaneh Hajishirzi, and Luke Zettle-moyer. 2022. Rethinking the role of demonstrations: What makes in-context learning work? arXiv preprint arXiv:2202.12837.
|
| 278 |
+
Nikita Moghe, Tom Sherborne, Mark Steedman, and Alexandra Birch. 2022. Extrinsic evaluation of machine translation metrics. arXiv preprint arXiv:2212.10297.
|
| 279 |
+
NLLB Team, Marta R. Costa-jussa, James Cross, Onur Celebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. 2022. No language left behind: Scaling human-centered machine translation.
|
| 280 |
+
Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191, Belgium, Brussels. Association for Computational Linguistics.
|
| 281 |
+
Ricardo Rei, Craig Stewart, Ana C Farinha, and Alon Lavie. 2020. COMET: A neural framework for MT evaluation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2685-2702, Online. Association for Computational Linguistics.
|
| 282 |
+
Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong, and Francisco Guzmán. 2021. WikiMatrix: Mining 135M parallel sentences in 1620 language pairs from Wikipedia. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 1351-1361, Online. Association for Computational Linguistics.
|
| 283 |
+
Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016a. Controlling politeness in neural machine
|
| 284 |
+
|
| 285 |
+
translation via side constraints. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 35-40, San Diego, California. Association for Computational Linguistics.
|
| 286 |
+
Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016b. Improving neural machine translation models with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computational Linguistics.
|
| 287 |
+
Raphael Shu, Hideki Nakayama, and Kyunghyun Cho. 2019. Generating diverse translations with sentence codes. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1823-1827, Florence, Italy. Association for Computational Linguistics.
|
| 288 |
+
Taylor Sorensen, Joshua Robinson, Christopher Rytting, Alexander Shaw, Kyle Rogers, Alexia Delorey, Mahmoud Khalil, Nancy Fulda, and David Wingate. 2022. An information-theoretic approach to prompt engineering without ground truth labels. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 819-862, Dublin, Ireland. Association for Computational Linguistics.
|
| 289 |
+
Zewei Sun, Mingxuan Wang, Hao Zhou, Chengqi Zhao, Shujian Huang, Jiajun Chen, and Lei Li. 2022. Rethinking document-level neural machine translation. In Findings of the Association for Computational Linguistics: ACL 2022, pages 3537-3548, Dublin, Ireland. Association for Computational Linguistics.
|
| 290 |
+
Zhixing Tan, Xiangwen Zhang, Shuo Wang, and Yang Liu. 2021. Msp: Multi-stage prompting for making pre-trained language models better translators. arXiv preprint arXiv:2110.06609.
|
| 291 |
+
David Vilar, Markus Freitag, Colin Cherry, Jiaming Luo, Viresh Ratnakar, and George Foster. 2022. Prompting palm for translation: Assessing strategies and performance. arXiv preprint arXiv:2211.09102.
|
| 292 |
+
Chengyu Wang, Jianing Wang, Minghui Qiu, Jun Huang, and Ming Gao. 2021. TransPrompt: Towards an automatic transferable prompting framework for few-shot text classification. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 2792-2802, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 293 |
+
Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V Le. 2022a. Finetuned language models are zero-shot learners. In International Conference on Learning Representations.
|
| 294 |
+
|
| 295 |
+
Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, et al. 2022b. Emergent abilities of large language models. arXiv preprint arXiv:2206.07682.
|
| 296 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. 2022c. Chain of thought prompting elicits reasoning in large language models. arXiv preprint arXiv:2201.11903.
|
| 297 |
+
Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, and Colin Raffel. 2021. mT5: A massively multilingual pre-trained text-to-text transformer. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 483-498, Online. Association for Computational Linguistics.
|
| 298 |
+
Aohan Zeng, Xiao Liu, Zhengxiao Du, Zihan Wang, Hanyu Lai, Ming Ding, Zhuoyi Yang, Yifan Xu, Wendi Zheng, Xiao Xia, Weng Lam Tam, Zixuan Ma, Yufei Xue, Jidong Zhai, Wenguang Chen, Peng Zhang, Yuxiao Dong, and Jie Tang. 2022. Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414.
|
| 299 |
+
Biao Zhang, Philip Williams, Ivan Titov, and Rico Sennrich. 2020. Improving massively multilingual neural machine translation and zero-shot translation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1628-1639, Online. Association for Computational Linguistics.
|
| 300 |
+
Jiajun Zhang and Chengqing Zong. 2016. Exploiting source-side monolingual data in neural machine translation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1535-1545.
|
| 301 |
+
Jingyi Zhang, Masao Utiyama, Eiichro Sumita, Graham Neubig, and Satoshi Nakamura. 2018. Guiding neural machine translation with retrieved translation pieces. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1325-1335, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 302 |
+
Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. 2022a. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068.
|
| 303 |
+
Yiming Zhang, Shi Feng, and Chenhao Tan. 2022b. Active example selection for in-context learning. arXiv preprint arXiv:2211.04486.
|
| 304 |
+
Zihao Zhao, Eric Wallace, Shi Feng, Dan Klein, and Sameer Singh. 2021. Calibrate before use: Improving few-shot performance of language models. In
|
| 305 |
+
|
| 306 |
+
Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 12697-12706. PMLR.
|
| 307 |
+
Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Olivier Bousquet, Quoc Le, and Ed Chi. 2022. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625.
|
| 308 |
+
|
| 309 |
+
# A Appendix
|
| 310 |
+
|
| 311 |
+
<table><tr><td>Dataset</td><td>Language(s)</td><td>Test Set</td><td>Selection Pool (Default)</td><td>Source (#sample)</td></tr><tr><td rowspan="3">Wiki</td><td>English</td><td>100</td><td>897</td><td>FLORES eng_Latn. dev (997)</td></tr><tr><td>German</td><td>100</td><td>897</td><td>FLORES deu_Latn. dev (997)</td></tr><tr><td>Chinese</td><td>100</td><td>897</td><td>FLORES zho_Hans. dev (997)</td></tr><tr><td>WMT</td><td>English-German</td><td>100</td><td>2900</td><td>newstest2013 (3000)</td></tr><tr><td>IT</td><td>German-English</td><td>100</td><td>1900</td><td>Multi-Domain Dev Set (2000)</td></tr><tr><td>Medical</td><td>German-English</td><td>100</td><td>1900</td><td>Multi-Domain Dev Set (2000)</td></tr></table>
|
| 312 |
+
|
| 313 |
+
(a) Ablation Sets
|
| 314 |
+
|
| 315 |
+
<table><tr><td>Dataset</td><td>Languages</td><td>Source</td><td>Test Set</td><td>High-quality Pool (Default)</td><td>Low-quality Pool</td></tr><tr><td rowspan="3">Wiki</td><td>English</td><td>FLORES</td><td>eng_Latn.devtest (1012)</td><td>eng_Latn.dev (997)</td><td>En-Zh* (0.79M)</td></tr><tr><td>German</td><td>FLORES</td><td>deu_Latn.devtest (1012)</td><td>deu_Latn.dev (997)</td><td>De-En* (1.57M)</td></tr><tr><td>Chinese</td><td>FLORES</td><td>zho_Hans.devtest (1012)</td><td>zho_Hans.dev (997)</td><td>De-Zh* (0.13M)</td></tr><tr><td rowspan="2">WMT</td><td>English-German</td><td>WMT</td><td>newstest2021 (1002/1000)</td><td>newstest2020 (1418)</td><td></td></tr><tr><td>English-Chinese</td><td>WMT</td><td>newstest2021 (1002/1948)</td><td>newstest2020 (1418)</td><td></td></tr><tr><td>IT</td><td>German-English</td><td>Multi-Domain</td><td>Test Set (2000)</td><td>-</td><td>Train Set (0.22M)</td></tr><tr><td>Law</td><td>German-English</td><td>Multi-Domain</td><td>Test Set (2000)</td><td>-</td><td>Train Set (0.47M)</td></tr><tr><td>Medical</td><td>German-English</td><td>Multi-Domain</td><td>Test Set (2000)</td><td>-</td><td>Train Set (0.25M)</td></tr><tr><td>PDC</td><td>Chinese-English</td><td>News</td><td>Test Set (4858/148 Docs)</td><td>Dev Set (2881)</td><td>-</td></tr></table>
|
| 316 |
+
|
| 317 |
+
(b) Full Sets
|
| 318 |
+
|
| 319 |
+
Table 9: Statistics of Ablation sets and Full sets. Numbers in brackets denote the number of instances. $\star$ : data from WikiMatrix.v1 (Schwenk et al., 2021).
|
| 320 |
+
|
| 321 |
+
<table><tr><td rowspan="3">ID</td><td colspan="7">BLEU</td><td colspan="7">COMET</td></tr><tr><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td><td rowspan="2">Avg</td><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td><td rowspan="2">Avg</td></tr><tr><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td></tr><tr><td colspan="15">English Template Without Line Break</td></tr><tr><td>A</td><td>38.00</td><td>23.10</td><td>23.30</td><td>12.10</td><td>31.50</td><td>27.90</td><td>25.98</td><td>70.83</td><td>41.95</td><td>4.34</td><td>15.92</td><td>35.68</td><td>63.98</td><td>38.78</td></tr><tr><td>B</td><td>8.30</td><td>9.00</td><td>2.80</td><td>2.40</td><td>6.60</td><td>8.20</td><td>6.22</td><td>-45.75</td><td>-70.27</td><td>-140.43</td><td>-119.82</td><td>-112.38</td><td>-43.10</td><td>-88.62</td></tr><tr><td>C</td><td>30.60</td><td>2.10</td><td>5.50</td><td>1.10</td><td>1.10</td><td>8.30</td><td>8.12</td><td>29.78</td><td>-142.36</td><td>-117.20</td><td>-117.14</td><td>-120.57</td><td>-58.32</td><td>-87.63</td></tr><tr><td>D</td><td>26.10</td><td>0.00</td><td>5.10</td><td>0.00</td><td>0.20</td><td>0.60</td><td>5.33</td><td>-1.20</td><td>-160.59</td><td>-124.15</td><td>-157.62</td><td>-130.51</td><td>-108.71</td><td>-113.80</td></tr><tr><td>E</td><td>35.90</td><td>18.20</td><td>26.10</td><td>9.60</td><td>16.00</td><td>22.30</td><td>21.35</td><td>68.06</td><td>5.41</td><td>27.53</td><td>-6.46</td><td>-5.58</td><td>35.93</td><td>20.81</td></tr><tr><td>F</td><td>33.50</td><td>5.60</td><td>25.10</td><td>0.80</td><td>0.20</td><td>9.10</td><td>12.38</td><td>61.09</td><td>-62.31</td><td>22.71</td><td>-112.79</td><td>-50.84</td><td>-20.71</td><td>-27.14</td></tr><tr><td colspan="15">English Template With Line Break</td></tr><tr><td>A</td><td>36.60</td><td>21.80</td><td>25.10</td><td>11.40</td><td>26.90</td><td>26.90</td><td>24.78</td><td>67.97</td><td>37.41</td><td>7.24</td><td>9.46</td><td>4.89</td><td>60.08</td><td>31.17</td></tr><tr><td>B</td><td>7.70</td><td>7.70</td><td>5.00</td><td>2.70</td><td>13.20</td><td>10.00</td><td>7.72</td><td>-85.97</td><td>-81.79</td><td>-126.58</td><td>-113.27</td><td>-55.64</td><td>-48.82</td><td>-85.35</td></tr><tr><td>C</td><td>28.00</td><td>4.40</td><td>7.70</td><td>0.70</td><td>13.30</td><td>14.50</td><td>11.43</td><td>36.10</td><td>-99.01</td><td>-118.99</td><td>-133.39</td><td>-74.19</td><td>-23.00</td><td>-68.75</td></tr><tr><td>D</td><td>25.20</td><td>1.60</td><td>4.20</td><td>0.10</td><td>4.90</td><td>5.40</td><td>6.90</td><td>13.96</td><td>-121.58</td><td>-125.36</td><td>-148.29</td><td>-78.78</td><td>-74.91</td><td>-89.16</td></tr><tr><td>E</td><td>35.70</td><td>20.00</td><td>24.40</td><td>3.90</td><td>28.30</td><td>20.30</td><td>22.10</td><td>66.08</td><td>22.21</td><td>15.62</td><td>-55.41</td><td>13.36</td><td>38.30</td><td>16.69</td></tr><tr><td>F</td><td>33.60</td><td>9.30</td><td>23.60</td><td>3.00</td><td>6.70</td><td>17.90</td><td>15.68</td><td>57.46</td><td>-45.84</td><td>14.73</td><td>-69.69</td><td>-30.63</td><td>32.68</td><td>-6.88</td></tr><tr><td colspan="15">German Template Without Line Break</td></tr><tr><td>A</td><td>20.00</td><td>15.70</td><td>1.60</td><td>3.10</td><td>0.70</td><td>7.10</td><td>8.03</td><td>23.09</td><td>4.61</td><td>-70.84</td><td>-47.51</td><td>-65.61</td><td>-0.66</td><td>-26.15</td></tr><tr><td>B</td><td>5.60</td><td>2.10</td><td>0.10</td><td>1.60</td><td>0.20</td><td>1.10</td><td>1.78</td><td>-82.99</td><td>-152.26</td><td>-174.72</td><td>-132.06</td><td>-162.79</td><td>-110.99</td><td>-135.97</td></tr><tr><td>C</td><td>4.60</td><td>5.40</td><td>0.30</td><td>3.70</td><td>0.00</td><td>4.10</td><td>3.02</td><td>-57.63</td><td>-108.36</td><td>-120.99</td><td>-125.18</td><td>-135.21</td><td>-90.42</td><td>-106.30</td></tr><tr><td>D</td><td>3.50</td><td>0.10</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.10</td><td>0.62</td><td>-115.55</td><td>-168.13</td><td>-166.07</td><td>-169.21</td><td>-161.27</td><td>-142.57</td><td>-153.80</td></tr><tr><td>E</td><td>17.30</td><td>19.00</td><td>0.20</td><td>8.50</td><td>2.30</td><td>19.60</td><td>11.15</td><td>14.19</td><td>6.47</td><td>-100.92</td><td>-25.14</td><td>-50.42</td><td>9.85</td><td>-24.33</td></tr><tr><td>F</td><td>6.30</td><td>4.80</td><td>0.20</td><td>7.30</td><td>0.10</td><td>11.70</td><td>5.07</td><td>3.88</td><td>-65.86</td><td>-44.76</td><td>-27.91</td><td>-60.31</td><td>-11.22</td><td>-34.36</td></tr><tr><td colspan="15">German Template With Line Break</td></tr><tr><td>A</td><td>25.40</td><td>20.20</td><td>6.40</td><td>3.50</td><td>8.00</td><td>9.20</td><td>12.12</td><td>38.47</td><td>31.45</td><td>-80.14</td><td>-47.22</td><td>-50.26</td><td>8.84</td><td>-16.48</td></tr><tr><td>B</td><td>15.60</td><td>7.80</td><td>2.60</td><td>1.00</td><td>0.50</td><td>0.80</td><td>4.72</td><td>-20.65</td><td>-81.28</td><td>-125.21</td><td>-137.02</td><td>-125.31</td><td>-108.45</td><td>-99.65</td></tr><tr><td>C</td><td>15.40</td><td>5.70</td><td>5.70</td><td>3.00</td><td>6.00</td><td>6.70</td><td>7.08</td><td>-23.46</td><td>-80.15</td><td>-86.27</td><td>-104.10</td><td>-87.18</td><td>-58.23</td><td>-73.23</td></tr><tr><td>D</td><td>2.80</td><td>0.50</td><td>0.00</td><td>0.00</td><td>0.10</td><td>1.10</td><td>0.75</td><td>-95.30</td><td>-154.76</td><td>-140.51</td><td>-155.91</td><td>-137.36</td><td>-100.08</td><td>-130.65</td></tr><tr><td>E</td><td>24.70</td><td>19.50</td><td>10.40</td><td>8.50</td><td>11.10</td><td>17.20</td><td>15.23</td><td>35.12</td><td>3.95</td><td>-62.48</td><td>-18.32</td><td>-27.61</td><td>35.26</td><td>-5.68</td></tr><tr><td>F</td><td>7.60</td><td>17.20</td><td>0.50</td><td>8.60</td><td>3.90</td><td>11.30</td><td>8.18</td><td>13.01</td><td>9.10</td><td>-43.63</td><td>-10.88</td><td>-46.46</td><td>23.54</td><td>-9.22</td></tr><tr><td colspan="15">Chinese Template Without Line Break</td></tr><tr><td>A</td><td>37.60</td><td>15.50</td><td>28.30</td><td>2.10</td><td>33.40</td><td>15.10</td><td>22.00</td><td>67.41</td><td>-5.40</td><td>45.24</td><td>-74.78</td><td>53.71</td><td>2.72</td><td>14.82</td></tr><tr><td>B</td><td>23.60</td><td>6.30</td><td>14.50</td><td>0.50</td><td>19.30</td><td>1.90</td><td>11.02</td><td>-6.41</td><td>-90.63</td><td>-12.10</td><td>-159.66</td><td>-9.24</td><td>-121.29</td><td>-66.55</td></tr><tr><td>C</td><td>11.40</td><td>3.20</td><td>14.30</td><td>0.40</td><td>20.80</td><td>5.00</td><td>9.18</td><td>-32.55</td><td>-114.57</td><td>-9.91</td><td>-140.54</td><td>2.89</td><td>-85.58</td><td>-63.38</td></tr><tr><td>D</td><td>17.10</td><td>6.40</td><td>15.90</td><td>0.20</td><td>19.60</td><td>1.90</td><td>10.18</td><td>-34.15</td><td>-101.69</td><td>-24.36</td><td>-166.15</td><td>-9.20</td><td>-125.20</td><td>-76.79</td></tr><tr><td>E</td><td>29.00</td><td>8.00</td><td>27.00</td><td>0.40</td><td>34.90</td><td>16.10</td><td>19.23</td><td>35.55</td><td>-63.09</td><td>37.06</td><td>-119.13</td><td>54.14</td><td>3.80</td><td>-8.61</td></tr><tr><td>F</td><td>31.70</td><td>3.70</td><td>24.80</td><td>0.10</td><td>27.20</td><td>11.80</td><td>16.55</td><td>35.65</td><td>-105.74</td><td>22.97</td><td>-129.71</td><td>5.61</td><td>-34.09</td><td>-34.22</td></tr><tr><td colspan="15">Chinese Template With Line Break</td></tr><tr><td>A</td><td>26.80</td><td>14.70</td><td>24.70</td><td>3.30</td><td>33.80</td><td>22.90</td><td>21.03</td><td>24.46</td><td>-84.74</td><td>24.76</td><td>-64.07</td><td>52.65</td><td>40.45</td><td>-1.08</td></tr><tr><td>B</td><td>23.70</td><td>6.30</td><td>11.90</td><td>0.10</td><td>14.40</td><td>0.60</td><td>9.50</td><td>-11.65</td><td>-102.50</td><td>-63.95</td><td>-161.96</td><td>-46.84</td><td>-128.12</td><td>-85.84</td></tr><tr><td>C</td><td>12.10</td><td>3.00</td><td>13.80</td><td>0.80</td><td>21.20</td><td>9.90</td><td>10.13</td><td>-36.39</td><td>-105.55</td><td>-42.16</td><td>-151.06</td><td>-15.41</td><td>-74.90</td><td>-70.91</td></tr><tr><td>D</td><td>14.10</td><td>3.20</td><td>15.10</td><td>0.20</td><td>20.00</td><td>2.50</td><td>9.18</td><td>-19.15</td><td>-106.69</td><td>-19.34</td><td>-154.73</td><td>-11.51</td><td>-94.82</td><td>-67.71</td></tr><tr><td>E</td><td>28.60</td><td>8.00</td><td>26.50</td><td>0.90</td><td>32.30</td><td>21.40</td><td>19.62</td><td>8.71</td><td>-118.14</td><td>15.34</td><td>-124.30</td><td>21.18</td><td>14.91</td><td>-30.38</td></tr><tr><td>F</td><td>26.90</td><td>3.40</td><td>26.10</td><td>0.20</td><td>25.80</td><td>16.00</td><td>16.40</td><td>11.58</td><td>-120.31</td><td>10.33</td><td>-129.61</td><td>-21.19</td><td>-20.52</td><td>-44.95</td></tr></table>
|
| 322 |
+
|
| 323 |
+
Table 10: Detailed zero-shot results for prompting with different templates and different template languages on Wiki Ablation sets. Template $\mathbb{A}$ in English achieves the overall best performance measured by BLEU and COMET. Avg: average result over different language pairs. Best results in each section are underlined; best results in each column are in bold.
|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
|
| 327 |
+

|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
|
| 331 |
+

|
| 332 |
+
|
| 333 |
+

|
| 334 |
+
|
| 335 |
+

|
| 336 |
+
|
| 337 |
+

|
| 338 |
+
|
| 339 |
+

|
| 340 |
+
|
| 341 |
+

|
| 342 |
+
|
| 343 |
+

|
| 344 |
+
|
| 345 |
+

|
| 346 |
+
Figure 5: COMET (top) and BLEU (bottom) scores for few-shot prompting as a function of the number of prompt examples $(K = 1,5,10,20)$ on Wiki Ablation sets. For each setup, we randomly sample 100 times from the example pool and show the performance distribution via box plots. Dashed red line denotes the zero-shot baseline; blue curve and shadow area denote the mean and standard deviation.
|
| 347 |
+
|
| 348 |
+

|
| 349 |
+
|
| 350 |
+

|
| 351 |
+
Figure 6: Scatter plotting between BLEU and LMScore for 1-shot prompting on Wiki De $\leftrightarrow$ En, En $\leftrightarrow$ Zh Ablation sets.
|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
|
| 357 |
+

|
| 358 |
+
|
| 359 |
+

|
| 360 |
+
(a) COMET vs. LMScore
|
| 361 |
+
|
| 362 |
+

|
| 363 |
+
Figure 7: Scatter plotting between COMET/BLEU and LMScore for $l$ -shot prompting on Wiki De←Zh Ablation sets.
|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
(b) BLEU vs.LMScore
|
| 367 |
+
|
| 368 |
+

|
| 369 |
+
|
| 370 |
+
<table><tr><td rowspan="3">Method</td><td colspan="6">High-quality Examples</td><td colspan="6">Plusll Low-quality Examples</td></tr><tr><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td><td rowspan="2">Avg</td><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td>En ↔ Zh</td></tr><tr><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td></tr><tr><td colspan="13">Correlation with COMET</td></tr><tr><td>SLength</td><td>0.02</td><td>0.18‡</td><td>0.24‡</td><td>0.12‡</td><td>0.26‡</td><td>0.01</td><td>0.14</td><td>0.09‡</td><td>0.20‡</td><td>0.52‡</td><td>0.44‡</td><td>0.24‡</td></tr><tr><td>TLength</td><td>-0.01</td><td>0.23‡</td><td>0.19‡</td><td>0.27‡</td><td>0.29‡</td><td>0.06</td><td>0.17</td><td>0.06†</td><td>0.35‡</td><td>0.41‡</td><td>0.57‡</td><td>0.25‡</td></tr><tr><td>LMScore</td><td>0.06</td><td>0.23‡</td><td>0.01</td><td>0.20‡</td><td>0.12‡</td><td>0.21‡</td><td>0.14</td><td>0.19‡</td><td>0.38‡</td><td>0.35‡</td><td>0.51‡</td><td>0.16‡</td></tr><tr><td>MTScore</td><td>0.01</td><td>0.05</td><td>0.11‡</td><td>0.12‡</td><td>0.06</td><td>0.28‡</td><td>0.11</td><td>0.13‡</td><td>0.04</td><td>0.30‡</td><td>0.23‡</td><td>0.18‡</td></tr><tr><td>SemScore</td><td>0.11‡</td><td>0.17‡</td><td>0.11‡</td><td>0.15‡</td><td>0.10‡</td><td>0.31‡</td><td>0.16</td><td>0.12‡</td><td>0.24‡</td><td>0.42‡</td><td>0.50‡</td><td>0.17‡</td></tr><tr><td>CaseSemScore-Src</td><td>-0.01</td><td>0.20‡</td><td>0.22‡</td><td>0.08†</td><td>0.18‡</td><td>-0.03</td><td>0.11</td><td>0.08‡</td><td>0.29‡</td><td>0.53‡</td><td>0.49‡</td><td>0.26‡</td></tr><tr><td>CaseSemScore-Tgt</td><td>-0.01</td><td>0.22‡</td><td>0.25‡</td><td>0.14‡</td><td>0.21‡</td><td>0.05</td><td>0.14</td><td>0.09‡</td><td>0.32‡</td><td>0.53‡</td><td>0.53‡</td><td>0.27‡</td></tr><tr><td colspan="13">Correlation with BLEU</td></tr><tr><td>SLength</td><td>0.20‡</td><td>0.27‡</td><td>0.21‡</td><td>0.11‡</td><td>0.33‡</td><td>0.12‡</td><td>0.21</td><td>0.23‡</td><td>0.30‡</td><td>0.51‡</td><td>0.35‡</td><td>0.29‡</td></tr><tr><td>TLength</td><td>0.15‡</td><td>0.32‡</td><td>0.16‡</td><td>0.22‡</td><td>0.40‡</td><td>0.12‡</td><td>0.23</td><td>0.15‡</td><td>0.38‡</td><td>0.41‡</td><td>0.47‡</td><td>0.33‡</td></tr><tr><td>LMScore</td><td>0.14‡</td><td>0.17‡</td><td>0.10‡</td><td>0.24‡</td><td>0.27‡</td><td>0.26‡</td><td>0.20</td><td>0.23‡</td><td>0.30‡</td><td>0.39‡</td><td>0.46‡</td><td>0.27‡</td></tr><tr><td>MTScore</td><td>0.03</td><td>-0.05</td><td>0.04</td><td>0.09†</td><td>0.03</td><td>0.12‡</td><td>0.04</td><td>0.11‡</td><td>-0.04</td><td>0.26‡</td><td>0.19‡</td><td>0.17‡</td></tr><tr><td>SemScore</td><td>0.13‡</td><td>0.11‡</td><td>0.15‡</td><td>0.20‡</td><td>0.25‡</td><td>0.29‡</td><td>0.19</td><td>0.13‡</td><td>0.20‡</td><td>0.45‡</td><td>0.45‡</td><td>0.28‡</td></tr><tr><td>CaseSemScore-Src</td><td>0.16‡</td><td>0.15‡</td><td>0.18‡</td><td>0.03</td><td>0.28‡</td><td>0.03</td><td>0.14</td><td>0.20‡</td><td>0.29‡</td><td>0.51‡</td><td>0.36‡</td><td>0.31‡</td></tr><tr><td>CaseSemScore-Tgt</td><td>0.14‡</td><td>0.17‡</td><td>0.16‡</td><td>0.05</td><td>0.24‡</td><td>0.09†</td><td>0.14</td><td>0.18‡</td><td>0.30‡</td><td>0.49‡</td><td>0.39‡</td><td>0.29‡</td></tr></table>
|
| 371 |
+
|
| 372 |
+
Table 11: Detailed Spearman's $\rho$ between demonstration features and their prompting performance (COMET and BLEU) for $l$ -shot prompting on Wiki Ablation sets. We randomly sample 600 demonstrations from each pool to calculate the correlation. High-quality examples are from the default selection pool while Low-quality examples are from WikiMatrix.v1. $\dagger / \ddagger$ : statistically significant at $p < 0.05 / 0.01$ . Gray cells indicate insignificance; Red cells indicate $\rho > 0.5$ .
|
| 373 |
+
|
| 374 |
+
<table><tr><td rowspan="4">En→Zh</td><td>Source</td><td>Coordinates: 19°43' 10" S 63°18' 00" E / 19.71944°S 63.30000°E / -19.71944; 63.30000</td></tr><tr><td>Target</td><td>坐标: 19°43' 10" S 63°18' 00" E / 19.71944°S 63.30000°E / -19.71944; 63.30000</td></tr><tr><td>Source</td><td>SAO 40012 is HD 277559.</td></tr><tr><td>Target</td><td>SAO 40012是HD 277559。</td></tr><tr><td rowspan="4">En→De</td><td>Source</td><td>2002 and 2004.</td></tr><tr><td>Target</td><td>2002 und 2004.</td></tr><tr><td>Source</td><td>Brinton, Lauren and Leslie Arnovick.</td></tr><tr><td>Target</td><td>Brinton, Lauren und Leslie Arnovick.</td></tr></table>
|
| 375 |
+
|
| 376 |
+
Table 12: Top-ranked parallel examples according to SemScore on WikiMatrix.v1 En-De and En-Zh. Despite showing high semantic similarity, these examples are not very informative. We thus dropped them at selection.
|
| 377 |
+
|
| 378 |
+

|
| 379 |
+
(a) COMET
|
| 380 |
+
Figure 8: Results for few-shot prompting with monolingual data on Wiki Ablation sets for De $\leftrightarrow$ Zh.
|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
(b) BLEU
|
| 384 |
+
|
| 385 |
+
<table><tr><td rowspan="3">Method</td><td colspan="7">BLEU</td><td colspan="6">COMET</td></tr><tr><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td><td rowspan="2">Avg</td><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td></tr><tr><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td></tr><tr><td>Zero-Shot</td><td>37.80</td><td>20.50</td><td>21.70</td><td>9.60</td><td>28.60</td><td>26.30</td><td>24.08</td><td>68.30</td><td>29.96</td><td>2.80</td><td>10.05</td><td>29.17</td><td>63.25</td></tr><tr><td colspan="14">1-Shot Translation (high-quality pool)</td></tr><tr><td>Random</td><td>37.67</td><td>21.23</td><td>28.70</td><td>9.07</td><td>34.87</td><td>26.30</td><td>26.31</td><td>68.77</td><td>35.56</td><td>47.23</td><td>11.75</td><td>60.69</td><td>65.75</td></tr><tr><td>SemScore</td><td>38.40</td><td>21.37</td><td>29.17</td><td>9.47</td><td>35.50</td><td>26.50</td><td>26.73</td><td>69.04</td><td>36.06</td><td>48.79</td><td>14.63</td><td>60.54</td><td>66.98</td></tr><tr><td>LMScore</td><td>37.80</td><td>21.43</td><td>28.13</td><td>9.40</td><td>35.40</td><td>26.73</td><td>26.48</td><td>68.55</td><td>35.49</td><td>43.54</td><td>13.14</td><td>59.84</td><td>66.98</td></tr><tr><td>TLength</td><td>37.00</td><td>21.80</td><td>28.57</td><td>9.47</td><td>35.90</td><td>26.53</td><td>26.54</td><td>67.79</td><td>37.00</td><td>45.66</td><td>13.63</td><td>61.87</td><td>66.45</td></tr><tr><td colspan="14">5-Shot Translation (high-quality pool)</td></tr><tr><td>Random</td><td>39.03</td><td>22.00</td><td>29.37</td><td>10.07</td><td>37.07</td><td>27.20</td><td>27.46</td><td>70.30</td><td>36.46</td><td>51.77</td><td>16.74</td><td>63.77</td><td>67.62</td></tr><tr><td>SemScore</td><td>38.13</td><td>21.93</td><td>30.50</td><td>10.20</td><td>36.87</td><td>26.50</td><td>27.36</td><td>70.12</td><td>38.40</td><td>52.29</td><td>16.88</td><td>64.40</td><td>67.85</td></tr><tr><td>LMScore</td><td>38.87</td><td>22.03</td><td>30.20</td><td>9.97</td><td>35.83</td><td>26.13</td><td>27.17</td><td>69.74</td><td>37.01</td><td>51.01</td><td>16.63</td><td>61.74</td><td>67.74</td></tr><tr><td>TLength</td><td>38.57</td><td>22.00</td><td>29.50</td><td>10.00</td><td>35.90</td><td>26.53</td><td>27.08</td><td>68.94</td><td>37.16</td><td>50.80</td><td>15.80</td><td>63.01</td><td>67.29</td></tr><tr><td colspan="14">1-shot Translation (Low-quality Pool)</td></tr><tr><td>Random</td><td>36.73</td><td>20.53</td><td>22.23</td><td>8.23</td><td>34.63</td><td>26.13</td><td>24.75</td><td>66.82</td><td>34.15</td><td>10.11</td><td>-1.94</td><td>57.97</td><td>66.08</td></tr><tr><td>Ours</td><td>37.90</td><td>21.27</td><td>20.50</td><td>9.37</td><td>34.47</td><td>26.17</td><td>24.94</td><td>68.46</td><td>33.78</td><td>0.19</td><td>12.07</td><td>58.05</td><td>66.75</td></tr></table>
|
| 386 |
+
|
| 387 |
+
Table 13: Detailed test results for zero-shot and few-shot prompting on Wiki Full sets with different selection strategies. Ours: the proposed combined strategy; Random: random sampling; SemScore, LMScore and TLength denote selecting top-ranked examples based on the corresponding feature values. We select 3 demonstrations for each setup and report the average. Avg: average result over language pairs. Underlined results denote the best in each section, while Bold results are the overall best.
|
| 388 |
+
|
| 389 |
+
<table><tr><td rowspan="3">Method</td><td colspan="5">BLEU</td><td colspan="5">COMET</td></tr><tr><td colspan="2">De ↔ En</td><td colspan="2">En ↔ Zh</td><td rowspan="2">Avg</td><td colspan="2">De ↔ En</td><td colspan="2">En ↔ Zh</td><td rowspan="2">Avg</td></tr><tr><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td></tr><tr><td>Zero-Shot</td><td>28.30</td><td>15.70</td><td>20.70</td><td>16.80</td><td>20.38</td><td>46.01</td><td>13.32</td><td>4.63</td><td>7.92</td><td>17.97</td></tr><tr><td colspan="11">1-Shot Translation (high-quality pool)</td></tr><tr><td>Random</td><td>25.63</td><td>16.37</td><td>26.03</td><td>17.03</td><td>21.27</td><td>45.90</td><td>16.89</td><td>40.88</td><td>19.14</td><td>30.70</td></tr><tr><td>SemScore</td><td>26.90</td><td>16.03</td><td>26.30</td><td>18.07</td><td>21.82</td><td>46.39</td><td>15.13</td><td>41.13</td><td>22.49</td><td>31.28</td></tr><tr><td>LMScore</td><td>27.53</td><td>15.70</td><td>25.43</td><td>17.70</td><td>21.59</td><td>47.47</td><td>17.53</td><td>38.95</td><td>19.29</td><td>30.81</td></tr><tr><td>TLength</td><td>25.60</td><td>16.33</td><td>25.80</td><td>17.43</td><td>21.29</td><td>43.47</td><td>18.24</td><td>42.17</td><td>18.82</td><td>30.68</td></tr><tr><td colspan="11">5-Shot Translation (high-quality pool)</td></tr><tr><td>Random</td><td>26.40</td><td>17.10</td><td>26.23</td><td>17.53</td><td>21.82</td><td>48.36</td><td>20.19</td><td>43.97</td><td>22.95</td><td>33.87</td></tr><tr><td>SemScore</td><td>27.30</td><td>16.57</td><td>26.93</td><td>18.67</td><td>22.37</td><td>49.33</td><td>18.83</td><td>43.49</td><td>25.54</td><td>34.30</td></tr><tr><td>LMScore</td><td>25.90</td><td>16.87</td><td>26.47</td><td>18.93</td><td>22.04</td><td>47.77</td><td>20.83</td><td>44.76</td><td>27.41</td><td>35.19</td></tr><tr><td>TLength</td><td>25.80</td><td>17.03</td><td>26.55</td><td>17.63</td><td>21.75</td><td>47.34</td><td>20.78</td><td>45.17</td><td>23.85</td><td>34.29</td></tr><tr><td colspan="11">1-shot Translation (Low-quality Pool)</td></tr><tr><td>Random</td><td>27.33</td><td>15.53</td><td>25.30</td><td>20.07</td><td>22.06</td><td>45.29</td><td>14.21</td><td>36.83</td><td>26.49</td><td>30.70</td></tr><tr><td>Ours</td><td>27.63</td><td>15.97</td><td>25.23</td><td>20.10</td><td>22.23</td><td>47.16</td><td>15.01</td><td>34.48</td><td>26.82</td><td>30.87</td></tr></table>
|
| 390 |
+
|
| 391 |
+
Table 14: Detailed test results on WMT Full sets.
|
| 392 |
+
|
| 393 |
+
<table><tr><td rowspan="3" colspan="2">Method</td><td colspan="6">BLEU</td><td colspan="6">COMET</td></tr><tr><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td></tr><tr><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td></tr><tr><td rowspan="6">Prompt Language</td><td>De→En</td><td>-</td><td>0.06</td><td>0.08</td><td>0.12†</td><td>0.13†</td><td>0.13†</td><td>-</td><td>-0.02</td><td>0.09</td><td>0.12†</td><td>-0.01</td><td>0.21‡</td></tr><tr><td>En→De</td><td>0.07</td><td>-</td><td>0.14‡</td><td>0.19‡</td><td>0.17‡</td><td>0.11†</td><td>0.01</td><td>-</td><td>0.07</td><td>0.21‡</td><td>0.14‡</td><td>0.17‡</td></tr><tr><td>De→Zh</td><td>-0.08</td><td>0.06</td><td>-</td><td>0.14‡</td><td>0.24‡</td><td>-0.05</td><td>0.02</td><td>0.15‡</td><td>-</td><td>0.08</td><td>0.40‡</td><td>0.02</td></tr><tr><td>Zh→De</td><td>0.00</td><td>0.26‡</td><td>0.26‡</td><td>-</td><td>0.05</td><td>0.01</td><td>-0.03</td><td>0.21‡</td><td>0.22‡</td><td>-</td><td>0.13†</td><td>0.15‡</td></tr><tr><td>En→Zh</td><td>0.01</td><td>-0.01</td><td>0.24‡</td><td>0.25‡</td><td>-</td><td>0.19‡</td><td>0.04</td><td>-0.01</td><td>0.22‡</td><td>0.21‡</td><td>-</td><td>0.03</td></tr><tr><td>Zh→En</td><td>0.15‡</td><td>-0.16‡</td><td>0.14‡</td><td>0.34‡</td><td>0.15‡</td><td>-</td><td>0.25‡</td><td>0.09</td><td>0.14‡</td><td>0.21‡</td><td>0.03</td><td>-</td></tr></table>
|
| 394 |
+
|
| 395 |
+
Table 15: Detailed Spearman's $\rho$ for cross-lingual transfer under 1-shot prompting on Wiki Ablation sets. Gray cells indicate insignificance.
|
| 396 |
+
|
| 397 |
+

|
| 398 |
+
|
| 399 |
+

|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
|
| 403 |
+

|
| 404 |
+
|
| 405 |
+

|
| 406 |
+
Figure 9: BLEU scores for few-shot prompting with monolingual data on Wiki Ablation sets.
|
| 407 |
+
|
| 408 |
+

|
| 409 |
+
|
| 410 |
+

|
| 411 |
+
|
| 412 |
+

|
| 413 |
+
|
| 414 |
+
<table><tr><td rowspan="3" colspan="2">Method</td><td colspan="6">BLEU</td><td colspan="6">COMET</td></tr><tr><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td><td colspan="2">De ↔ En</td><td colspan="2">De ↔ Zh</td><td colspan="2">En ↔ Zh</td></tr><tr><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td><td>→</td><td>←</td></tr><tr><td rowspan="6">Prompt Language</td><td>De→En</td><td>-</td><td>-0.32</td><td>5.02</td><td>-0.86</td><td>1.29</td><td>0.00</td><td>-</td><td>-1.08</td><td>35.04</td><td>2.71</td><td>7.00</td><td>-0.01</td></tr><tr><td>En→De</td><td>-0.69</td><td>-</td><td>3.88</td><td>-0.69</td><td>1.21</td><td>-0.41</td><td>-0.46</td><td>-</td><td>26.01</td><td>1.56</td><td>6.31</td><td>-2.40</td></tr><tr><td>De→Zh</td><td>-0.63</td><td>-0.48</td><td>-</td><td>-0.65</td><td>4.38</td><td>0.04</td><td>0.92</td><td>-3.68</td><td>-</td><td>4.16</td><td>23.51</td><td>-0.34</td></tr><tr><td>Zh→De</td><td>-0.66</td><td>-0.86</td><td>6.84</td><td>-</td><td>3.23</td><td>0.19</td><td>0.71</td><td>-6.15</td><td>43.67</td><td>-</td><td>17.54</td><td>0.51</td></tr><tr><td>En→Zh</td><td>-1.54</td><td>-1.17</td><td>6.23</td><td>-1.44</td><td>-</td><td>-1.50</td><td>-6.00</td><td>-4.47</td><td>41.77</td><td>-1.79</td><td>-</td><td>-2.20</td></tr><tr><td>Zh→En</td><td>-1.12</td><td>-1.00</td><td>1.78</td><td>-1.11</td><td>4.81</td><td>-</td><td>-2.63</td><td>-3.85</td><td>15.25</td><td>3.90</td><td>25.29</td><td>-</td></tr></table>
|
| 415 |
+
|
| 416 |
+
Table 16: Detailed translation results (relative against the zero-shot baseline) for cross-lingual transfer under 1-shot prompting on Wiki Ablation sets. Blue cells indicate positive gains.
|
| 417 |
+
|
| 418 |
+
<table><tr><td colspan="2">Transfer from Wiki to ⇒</td><td>WMT</td><td>IT</td><td>Medical</td></tr><tr><td rowspan="2">Correlation</td><td>En→De</td><td>0.05</td><td>0.11</td><td>0.15†</td></tr><tr><td>De→En</td><td>-0.25‡</td><td>0.19‡</td><td>0.07</td></tr><tr><td rowspan="2">Δ Quality</td><td>En→De</td><td>-0.45</td><td>+0.88</td><td>-0.21</td></tr><tr><td>De→En</td><td>-0.43</td><td>+1.00</td><td>+0.77</td></tr></table>
|
| 419 |
+
|
| 420 |
+
Table 17: Spearman's $\rho$ and relative performance (in BLEU) for cross-domain transfer under 1-shot prompting.
|
| 421 |
+
|
| 422 |
+
<table><tr><td rowspan="2">Setting</td><td colspan="2">0-shot</td><td colspan="2">1-shot</td></tr><tr><td>De→Zh</td><td>Zh→De</td><td>De→Zh</td><td>Zh→De</td></tr><tr><td>Direct</td><td>21.70</td><td>9.60</td><td>28.70</td><td>9.07</td></tr><tr><td>Pivoting</td><td>24.4</td><td>11.5</td><td>29.47</td><td>11.47</td></tr></table>
|
| 423 |
+
|
| 424 |
+
Table 18: BLEU scores for direct vs. pivoting translation for De←Zh on Wiki Full sets.
|
| 425 |
+
|
| 426 |
+
<table><tr><td rowspan="2">Method</td><td colspan="4">BLEU</td><td colspan="4">COMET</td></tr><tr><td>IT</td><td>Law</td><td>Medical</td><td>Avg</td><td>IT</td><td>Law</td><td>Medical</td><td>Avg</td></tr><tr><td>Zero-Shot</td><td>32.4</td><td>28.5</td><td>31.3</td><td>30.7</td><td>12.39</td><td>32.85</td><td>33.99</td><td>26.41</td></tr><tr><td colspan="9">1-shot Translation (Low-quality Pool)</td></tr><tr><td>Random</td><td>33.70</td><td>27.33</td><td>30.80</td><td>30.61</td><td>29.12</td><td>30.22</td><td>34.08</td><td>31.14</td></tr><tr><td>Ours</td><td>32.93</td><td>27.60</td><td>33.23</td><td>31.26</td><td>29.95</td><td>29.60</td><td>41.37</td><td>33.64</td></tr><tr><td colspan="9">Cross-domain Transfer</td></tr><tr><td>Wiki⇒Multi-Domain</td><td>32.90</td><td>26.73</td><td>31.87</td><td>30.50</td><td>25.08</td><td>33.27</td><td>37.85</td><td>32.07</td></tr><tr><td>WMT⇒Multi-Domain</td><td>30.87</td><td>25.37</td><td>31.43</td><td>29.22</td><td>12.98</td><td>30.34</td><td>34.80</td><td>26.04</td></tr></table>
|
| 427 |
+
|
| 428 |
+
Table 19: Cross-domain transfer results on Multi-Domain Full sets under 1-shot prompting. We adopt the SemScore-based strategy for example selection using the default Wiki/WMT Full candidate pool. Results are averaged over 3 different demonstrations.
|
2301.07xxx/2301.07069/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:830a36c860a09db45f2cb6f3d871425689d30647117a3a1d0320bce759e2a8cf
|
| 3 |
+
size 1983249
|
2301.07xxx/2301.07069/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07093/1fff58a4-716b-4c60-9000-c7a7e90020b4_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07093/1fff58a4-716b-4c60-9000-c7a7e90020b4_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07093/1fff58a4-716b-4c60-9000-c7a7e90020b4_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60f5b51588b0b166a907aefb9641e6a81c16f9afbfbe2510eb92745f8f3e8551
|
| 3 |
+
size 15697142
|
2301.07xxx/2301.07093/full.md
ADDED
|
@@ -0,0 +1,774 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GLIGEN: Open-Set Grounded Text-to-Image Generation
|
| 2 |
+
|
| 3 |
+
Yuheng Li $^{1§}$ , Haotian Liu $^{1§}$ , Qingyang Wu $^{2}$ , Fangzhou Mu $^{1}$ , Jianwei Yang $^{3}$ , Jianfeng Gao $^{3}$ , Chunyuan Li $^{3¶}$ , Yong Jae Lee $^{1¶}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>University of Wisconsin-Madison <sup>2</sup>Columbia University <sup>3</sup>Microsoft
|
| 6 |
+
|
| 7 |
+
https://gligen.github.io/
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
(a)
|
| 11 |
+
Caption: "A woman sitting in a restaurant with a pizza in front of her." Grounded text: table, pizza, person, wall, car, paper, chair, window, bottle, cup
|
| 12 |
+
|
| 13 |
+
(a)
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
(b)
|
| 23 |
+
Caption: "A dog / bird / helmet / backpack is on the grass" Grounded image: red inset
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
(c)
|
| 33 |
+
Caption: "Elon Musk and Emma Watson on a movie poster"
|
| 34 |
+
|
| 35 |
+
(c)
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
(d)
|
| 45 |
+
Ca
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
y girl / monkey / Homer Simpson / is scratching her/its head" Grounded keypoints: plotted dots on the left image
|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
Grounded text: Elon Musk, Emma Watson; Grounded style image: blue inset
|
| 54 |
+
(e)
|
| 55 |
+
Caption: "A vibrant colorful bird sitting on tree branch" Grounded depth map: the left image
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
(f)
|
| 65 |
+
Caption: "A young boy with white powder on his face looks away" Grounded HED map: the left image
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
(g)
|
| 75 |
+
Figure 1. GLIGEN enables versatile grounding capabilities for a frozen text-to-image generation model, by feeding different grounding conditions. GLIGEN supports (a) text entity + box, (b) image entity + box, (c) image style and text + box, (d) keypoints, (e) depth map, (f) edge map, (g) normal map, and (h) semantic map.
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
|
| 79 |
+

|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
(h)
|
| 85 |
+
Caption: "A living room filled with lots of furniture and plants" Grounded semantic map: the left image
|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
|
| 93 |
+
# Abstract
|
| 94 |
+
|
| 95 |
+
Large-scale text-to-image diffusion models have made amazing advances. However, the status quo is to use text input alone, which can impede controllability. In this work, we propose GLIGEN, Grounded-Language-to-Image Generation, a novel approach that builds upon and extends the functionality of existing pre-trained text-to-image diffusion models by enabling them to also be conditioned on grounding inputs. To preserve the vast concept knowledge of
|
| 96 |
+
|
| 97 |
+
the pre-trained model, we freeze all of its weights and inject the grounding information into new trainable layers via a gated mechanism. Our model achieves open-world grounded text2img generation with caption and bounding box condition inputs, and the grounding ability generalizes well to novel spatial configurations and concepts. GLIGEN's zero-shot performance on COCO and LVIS outperforms existing supervised layout-to-image baselines by a large margin.
|
| 98 |
+
|
| 99 |
+
# 1. Introduction
|
| 100 |
+
|
| 101 |
+
Image generation research has witnessed huge advances in recent years. Over the past couple of years, GANs [14] were the state-of-the-art, with their latent space and conditional inputs being well-studied for controllable manipulation [48, 60] and generation [27, 29, 47, 82]. Text conditional autoregressive [52, 74] and diffusion [51, 56] models have demonstrated astonishing image quality and concept coverage, due to their more stable learning objectives and large-scale training on web image-text paired data. These models have gained attention even among the general public due to their practical use cases (e.g., art design and creation).
|
| 102 |
+
|
| 103 |
+
Despite exciting progress, existing large-scale text-to-image generation models cannot be conditioned on other input modalities apart from text, and thus lack the ability to precisely localize concepts, use reference images, or other conditional inputs to control the generation process. The current input, i.e., natural language alone, restricts the way that information can be expressed. For example, it is difficult to describe the precise location of an object using text, whereas bounding boxes / keypoints can easily achieve this, as shown in Figure 1. While conditional diffusion models [10,53,55] and GANs [26,37,48,71] that take in input modalities other than text for inpainting, layout2img generation, etc., do exist, they rarely combine those inputs for controllable text2img generation.
|
| 104 |
+
|
| 105 |
+
Moreover, prior generative models—regardless of the generative model family—are usually independently trained on each task-specific dataset. In contrast, in the recognition field, the long-standing paradigm has been to build recognition models [32, 42, 84] by starting from a foundation model pretrained on large-scale image data [4, 16, 17] or image-text pairs [33, 50, 75]. Since diffusion models have been trained on billions of image-text pairs [53], a natural question is: Can we build upon existing pretrained diffusion models and endow them with new conditional input modalities? In this way, analogous to the recognition literature, we may be able to achieve better performance on other generation tasks due to the vast concept knowledge that the pretrained models have, while acquiring more controllability over existing text-to-image generation models.
|
| 106 |
+
|
| 107 |
+
With the above aims, we propose a method for providing new grounding conditional inputs to pretrained text-to-image diffusion models. As shown in Figure 1, we still retain the text caption as input, but also enable other input modalities such as bounding boxes for grounding concepts, grounding reference images, grounding part keypoints, etc. The key challenge is preserving the original vast concept knowledge in the pretrained model while learning to inject the new grounding information. To prevent knowledge forgetting, we propose to freeze the original model weights and add new trainable gated Transformer layers [67] that take in the new grounding input (e.g., bounding box). During training,
|
| 108 |
+
|
| 109 |
+
we gradually fuse the new grounding information into the pretrained model using a gated mechanism [1]. This design enables flexibility in the sampling process during generation for improved quality and controllability; for example, we show that using the full model (all layers) in the first half of the sampling steps and only using the original layers (without the gated Transformer layers) in the latter half can lead to generation results that accurately reflect the grounding conditions while also having high image quality.
|
| 110 |
+
|
| 111 |
+
In our experiments, we primarily study grounded text2img generation with bounding boxes, inspired by the recent scaling success of learning grounded language-image understanding models with boxes in GLIP [34]. To enable our model to ground open-world vocabulary concepts [32,34,76,79], we use the same pre-trained text encoder (for encoding the caption) to encode each phrase associated with each grounded entity (i.e., one phrase per bounding box) and feed the encoded tokens into the newly inserted layers with their encoded location information. Due to the shared text space, we find that our model can generalize to unseen objects even when only trained on the COCO [41] dataset. Its generalization on LVIS [15] outperforms a strong fully-supervised baseline by a large margin. To further improve our model's grounding ability, we unify the object detection and grounding data formats for training, following GLIP [34]. With larger training data, our model's generalization is consistently improved.
|
| 112 |
+
|
| 113 |
+
Contributions. 1) We propose a new text2img generation method that endows new grounding controllability over existing text2img diffusion models. 2) By preserving the pretrained weights and learning to gradually integrate the new localization layers, our model achieves open-world grounded text2img generation with bounding box inputs, i.e., synthesis of novel localized concepts unobserved in training. 3) Our model's zero-shot performance on layout2img tasks significantly outperforms the prior state-of-the-art, demonstrating the power of building upon large pretrained generative models for downstream tasks.
|
| 114 |
+
|
| 115 |
+
# 2. Related Work
|
| 116 |
+
|
| 117 |
+
Large scale text-to-image generation models. State-of-the-art models in this space are either autoregressive [13, 52, 69, 74] or diffusion [45, 51, 53, 56, 81]. Among autoregressive models, DALL-E [52] is one of the breakthrough works that demonstrates zero-shot abilities, while Parti [74] demonstrates the feasibility of scaling up autoregressive models. Diffusion models have also shown very promising results. DALL-E 2 [51] generates images from the CLIP [50] image space, while Imagen [56] finds the benefit of using pretrained language models. The concurrent Muse [6] demonstrates that masked modeling can achieve SoTA-level generation performance with higher inference speed. However, all of these models usually only take a caption as the input, which
|
| 118 |
+
|
| 119 |
+
can be difficult for conveying other information such as the precise location of an object. Make-A-Scene [13] also incorporates semantic maps into its text-to-image generation, by training an encoder to tokenize semantic masks to condition the generation. However, it can only operate in a closed-set (of 158 categories), whereas our grounded entities can be open-world. A concurrent work eDiff-I [3] shows that by changing the attention map, one can generate objects that roughly follow a semantic map input. However, We believe our interface with boxes is simpler, and more importantly, our method allows other conditioning inputs such as keypoints, edge map, inference images, etc., which are hard to manipulate through attention.
|
| 120 |
+
|
| 121 |
+
Image generation from layouts. Given bounding boxes labeled with object categories, the task is to generate a corresponding image [24, 39, 61-63, 72, 78], which is the reverse task of object detection. Layout2Im [78] formulated the problem and combined a VAE object encoder, an LSTM [22] object fuser, and an image decoder to generate the image, using global and object-level adversarial losses [14] to enforce realism and layout correspondence. LostGAN [61, 62] generates a mask representation which is used to normalize features, taking inspiration from StyleGAN [28]. LAMA [39] improves the intermediate mask quality for better image quality. Transformer [66] based methods [24, 72] have also been explored. Critically, existing layout2image methods are closed-set, i.e., they can only generate limited localized visual concepts observed in the training set such as the 80 categories in COCO. In contrast, our method represents the first work for open-set grounded image generation. A concurrent work ReCo [73] also demonstrates open-set abilities by building upon a pretraned Stable Diffusion model [53]. However, it finetunes the original model weights, which has the potential to lead to knowledge forgetting. Furthermore, it only demonstrates box grounding results whereas we show results on more modalities as shown in the Figure 1.
|
| 122 |
+
|
| 123 |
+
Other conditional image generation. For GANs, various conditioning information have been explored; e.g., text [65, 70, 80], box [61, 62, 78], semantic masks [36, 47], images [8, 38, 83]. For diffusion models, LDM [53] proposes a unified approach for conditional generation by injecting the condition via cross-attention layers. Palette [55] performs image-to-image tasks using diffusion models. These models are usually trained from scratch independently. In our work, we investigate how to build upon existing models pretrained on large-scale web data, to enable new open-set grounded image generation capabilities in a cost-effective manner.
|
| 124 |
+
|
| 125 |
+
# 3. Preliminaries on Latent Diffusion Models
|
| 126 |
+
|
| 127 |
+
Diffusion-based methods are one of the most effective model families for text2image tasks, among which latent diffusion model (LDM) [53] and its successor Stable Diff
|
| 128 |
+
|
| 129 |
+
fusion are the most powerful models publicly available to the research community. To reduce the computational costs of vanilla diffusion model training, LDM proceeds in two stages. The first stage learns a bidirectional mapping network to obtain the latent representation $\mathbf{z}$ of the image $\mathbf{x}$ . The second stage trains a diffusion model on the latent $\mathbf{z}$ . Since the first stage model produces a fixed bidirectional mapping between $\mathbf{x}$ and $\mathbf{z}$ , from hereon, we focus on the latent generation space of LDM for simplicity.
|
| 130 |
+
|
| 131 |
+
Training Objective. Starting from noise $z_{T}$ , the model gradually produces less noisy samples $z_{T-1}, z_{T-2}, \dots, z_{0}$ , conditioned on caption $c$ at every time step $t$ . To learn such a model $f_{\theta}$ parameterized by $\theta$ , for each step, the LDM training objective solves the denoising problem on latent representations $z$ of the image $x$ :
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\min _ {\boldsymbol {\theta}} \mathcal {L} _ {\mathrm {L D M}} = \mathbb {E} _ {\boldsymbol {z}, \epsilon \sim \mathcal {N} (\mathbf {0}, \mathbf {I}), t} \left[ \| \epsilon - f _ {\boldsymbol {\theta}} \left(\boldsymbol {z} _ {t}, t, \boldsymbol {c}\right) \| _ {2} ^ {2} \right], \tag {1}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
where $t$ is uniformly sampled from time steps $\{1,\dots ,T\}$ $\mathbf{z}_t$ is the step- $t$ noisy variant of input $\pmb{z}$ , and $f_{\theta}(*,t,\pmb {c})$ is the $(t,c)$ -conditioned denoising autoencoder.
|
| 138 |
+
|
| 139 |
+
Network Architecture. The core of the network architecture is how to encode the conditions, based on which a cleaner version of $\mathbf{z}$ is produced. (i) Denoising Autoencoder. $f_{\theta}(*,t,c)$ is implemented via UNet [54]. It takes in a noisy latent $\mathbf{z}$ , as well as information from time step $t$ and condition $c$ . It consists of a series of ResNet [19] and Transformer [67] blocks. (ii) Condition Encoding. In the original LDM, a BERT-like [9] network is trained from scratch to encode each caption into a sequence of text embeddings, $f_{\mathrm{text}}(c)$ , which is fed into (1) to replace $c$ . The caption feature is encoded via a fixed CLIP [50] text encoder in Stable Diffusion. Time $t$ is first mapped to time embedding $\phi(t)$ , then injected into the UNet. The caption feature is used in a cross attention layer within each Transformer block. The model learns to predict the noise, following (1).
|
| 140 |
+
|
| 141 |
+
With large-scale training, the model $f_{\theta}(*, t, c)$ is well trained to denoise $z$ based on the caption information only. Though impressive language-to-image generation results have been shown with LDM by pretraining on internet-scale data, it remains challenging to synthesize images where additional grounding input can be instructed, and is thus the focus of our paper.
|
| 142 |
+
|
| 143 |
+
# 4. Open-set Grounded Image Generation
|
| 144 |
+
|
| 145 |
+
# 4.1. Grounding Instruction Input
|
| 146 |
+
|
| 147 |
+
For grounded text-to-image generation, there are a variety of ways to ground the generation process via an additional condition. We denote the semantic information of the grounding entity as $e$ , which can be described either through
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
Figure 2. Illustration of grounding token construction process for the bounding box with text case.
|
| 151 |
+
|
| 152 |
+
text or an example image; and as $l$ the grounding spatial configuration described with e.g., a bounding box, a set of keypoints, or an edge map, etc. Note that in certain cases, both semantic and spatial information can be represented with $l$ alone (e.g., edge map), in which a single map can represent what objects may be present in the image and where. We define the instruction to a grounded text-to-image model as a composition of the caption and grounded entities:
|
| 153 |
+
|
| 154 |
+
Instruction: $\pmb{y} = (\pmb{c},\pmb{e})$ , with (2)
|
| 155 |
+
|
| 156 |
+
Caption: $\pmb {c} = [c_1,\dots ,c_L]$ (3)
|
| 157 |
+
|
| 158 |
+
Grounding: $\pmb {e} = [(e_1,\pmb {l}_1),\dots ,(e_N,\pmb {l}_N)]$ (4)
|
| 159 |
+
|
| 160 |
+
where $L$ is the caption length, and $N$ is the number of entities to ground. In this work, we primarily study using bounding box as the grounding spatial configuration $l$ , because of its large availability and easy annotation for users. For the grounded entity $e$ , we mainly focus on using text as its representation due to simplicity. We process both caption and grounding entities as input tokens to the diffusion model, as described in detail below.
|
| 161 |
+
|
| 162 |
+
Caption Tokens. The caption $c$ is processed in the same way as in LDM. Specifically, we obtain the caption feature sequence (yellow tokens in Figure 2) using $h^c = [h_1^c, \dots, h_L^c] = f_{\text{text}}(c)$ , where $h_\ell^c$ is the contextualized text feature for the $\ell$ -th word in the caption.
|
| 163 |
+
|
| 164 |
+
Grounding Tokens. For each grounded text entity denoted with a bounding box, we represent the location information as $l = [\alpha_{\min}, \beta_{\min}, \alpha_{\max}, \beta_{\max}]$ with its top-left and bottom-right coordinates. For the text entity $e$ , we use the same pretrained text encoder to obtain its text feature $f_{\text{text}}(e)$ (light green token in Figure 2), and then fuse it with its bounding box information to produce a grounding token (dark green token in Figure 2):
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
h ^ {e} = \operatorname {M L P} \left(f _ {\text {t e x t}} (e), \text {F o u r i e r} (l)\right) \tag {5}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
where Fourier is the Fourier embedding [44], and $\mathrm{MLP}(\cdot, \cdot)$ is a multi-layer perceptron that first concatenates the two inputs across the feature dimension. The grounding token sequence is represented as $h^e = [h_1^e, \dots, h_N^e]$ .
|
| 171 |
+
|
| 172 |
+
From Closed-set to Open-set. Note that existing layout2img works only deal with a closed-set setting (e.g.,
|
| 173 |
+
|
| 174 |
+
COCO categories), as they typically learn a vector embedding $\mathbf{u}$ per entity, to replace $f_{\mathrm{text}}(e)$ in (5). For a closed-set setting with $K$ concepts, a dictionary of with $K$ embeddings are learned, $\mathbf{U} = [u_1,\dots ,u_K]$ . While this non-parametric representation works well in the closed-set setting, it has two drawbacks: (1) The conditioning is implemented as a dictionary look-up over $\mathbf{U}$ in the evaluation stage, and thus the model can only ground the observed entities in the generated images, lacking the ability to generalize to ground new entities; (2) No word/phrase is ever utilized in the model condition, and the semantic structure [23] of the underlying language instruction is missing. In contrast, in our open-set design, since the noun entities are processed by the same text encoder that is used to encode the caption, we find that even when the localization information is limited to the concepts in the grounding training datasets, our model can still generalize to other concepts as we will show in our experiments.
|
| 175 |
+
|
| 176 |
+
Extensions to Other Grounding Conditions. Note that the proposed grounding instruction in Eq (4) is in a general form, though our description thus far has focused on the case of using text as entity $e$ and bounding box as $l$ (the major setting of this paper). To demonstrate the flexibility of the GLIGEN framework, we also study additional representative cases which extend the use scenario of Eq (4).
|
| 177 |
+
|
| 178 |
+
- Image Prompt. While language allows users to describe a rich set of entities in an open-vocabulary manner, sometimes more abstract and fine-grained concepts can be better characterized by example images. To this end, one may describe entity $e$ using an image, instead of language. We use an image encoder to obtain feature $f_{\mathrm{image}}(e)$ which is used in place of $f_{\mathrm{text}}(e)$ in Eq (5) when $e$ is an image.
|
| 179 |
+
- Keypoints. As a simple parameterization method to specify the spatial configuration of an entity, bounding boxes ease the user-machine interaction interface by providing the height and width of the object layout only. One may consider richer spatial configurations such as keypoints for GLIGEN, by parameterizing $l$ in Eq (4) with a set of keypoints coordinates. Similar to encoding boxes, the Fourier embedding [44] can be applied to each keypoint location $l = [x,y]$ .
|
| 180 |
+
- Spatially-aligned conditions. To enable more fine-grained controlability, spatially-aligned condition maps can be used, such as edge map, depth map, normal map, and semantic map. In these cases, the semantic information $e$ is already contained within each spatial coordinate $l$ of the condition map. A network (e.g. conv layers) can be used to encode $l$ into $h \times w$ grounding tokens. We also notice that additionally feeding $l$ into the first conv layer of the UNet can accelerate training. Specifically, the input to the UNet is $\mathrm{CONCAT}(f_l(l), z_t)$ where $f_l$ is a simple downsampling network to reduce $l$ into the same
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
Figure 3. For a pretrained text2img model, the text features are fed into each cross-attention layer. A new gated self-attention layer is inserted to take in the new conditional information.
|
| 184 |
+
|
| 185 |
+
spatial resolution as $\mathbf{z}_t$ . In this case, the first conv layer of the UNet needs to be trainable.
|
| 186 |
+
|
| 187 |
+
Figure 1 shows generated examples for these other grounding conditions. Please refer to the supp for more details.
|
| 188 |
+
|
| 189 |
+
# 4.2. Continual Learning for Grounded Generation
|
| 190 |
+
|
| 191 |
+
Our goal is to endow new spatial grounding capabilities to existing large language-to-image generation models. Large diffusion models have been pre-trained on web-scale image-text to gain the required knowledge for synthesizing realistic images based on diverse and complex language instructions. Due to the high pre-training cost and excellent performance, it is important to retain such knowledge in the model weights while expanding the new capability. Hence, we consider to lock the original model weights, and gradually adapt the model by tuning new modules.
|
| 192 |
+
|
| 193 |
+
Gated Self-Attention. We denote $\pmb{v} = [v_{1},\dots ,v_{M}]$ as the visual feature tokens of an image. The original Transformer block of LDM consists of two attention layers: The self-attention over the visual tokens, followed by cross-attention from caption tokens. By considering the residual connection, the two layers can be written:
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
\boldsymbol {v} = \boldsymbol {v} + \operatorname {S e l f A t t n} (\boldsymbol {v}) \tag {6}
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
\boldsymbol {v} = \boldsymbol {v} + \operatorname {C r o s s A t t n} \left(\boldsymbol {v}, \boldsymbol {h} ^ {c}\right) \tag {7}
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
We freeze these two attention layers and add a new gated self-attention layer to enable the spatial grounding ability; see Figure 3. Specifically, the attention is performed over
|
| 204 |
+
|
| 205 |
+
the concatenation of visual and grounding tokens $[v, h^e]$ :
|
| 206 |
+
|
| 207 |
+
$$
|
| 208 |
+
\boldsymbol {v} = \boldsymbol {v} + \beta \cdot \tanh (\gamma) \cdot \operatorname {T S} (\text {S e l f A t t n} ([ \boldsymbol {v}, \boldsymbol {h} ^ {e} ]))) \tag {8}
|
| 209 |
+
$$
|
| 210 |
+
|
| 211 |
+
where $\mathrm{TS}(\cdot)$ is a token selection operation that considers visual tokens only, and $\gamma$ is a learnable scalar which is initialized as 0. $\beta$ is set as 1 during the entire training process and is only varied for scheduled sampling during inference (introduced below) for improved quality and controllability. Note that (8) is injected in between (6) and (7). Intuitively, the gated self-attention in (8) allows visual features to leverage conditional information, and the resulting grounded features are treated as a residual, whose gate is initially set to 0 (due to $\gamma$ being initialized as 0). This also enables more stable training. Note that a similar idea is used in Flamingo [1]; however, it uses gated cross-attention, which leads to worse performance in our ablation study.
|
| 212 |
+
|
| 213 |
+
Learning Procedure. We adapt the pre-trained model such that grounding information can be injected while all the original components remain intact. By denoting all the new parameters as $\theta^{\prime}$ , including all gated self-attention layers in Eq (8) and MLP in Eq (5), we use the original denoising objective as in (1) for model continual learning, based on the grounding instruction input $y$ :
|
| 214 |
+
|
| 215 |
+
$$
|
| 216 |
+
\min _ {\boldsymbol {\theta} ^ {\prime}} \mathcal {L} _ {\text {G r o u n d i n g}} = \mathbb {E} _ {\boldsymbol {z}, \boldsymbol {\epsilon} \sim \mathcal {N} (\boldsymbol {0}, \boldsymbol {\mathrm {I}}), t} \left[ \| \boldsymbol {\epsilon} - f _ {\{\boldsymbol {\theta}, \boldsymbol {\theta} ^ {\prime} \}} \left(\boldsymbol {z} _ {t}, t, \boldsymbol {y}\right) \| _ {2} ^ {2} \right]. \tag {9}
|
| 217 |
+
$$
|
| 218 |
+
|
| 219 |
+
Why should the model try to use the new grounding information? Intuitively, predicting the noise that was added
|
| 220 |
+
|
| 221 |
+
to a training image in the reverse diffusion process would be easier if the model could leverage the external knowledge (e.g., each object's location). Thus, in this way, the model learns to use the additional information while retaining the pre-trained concept knowledge.
|
| 222 |
+
|
| 223 |
+
Scheduled Sampling in Inference. The standard inference scheme of GLIGEN is to set $\beta = 1$ in (8), and the entire diffusion process is influenced by the grounding tokens. This constant $\beta$ sampling scheme provides overall good performance in terms of both generation and grounding, but sometimes generates lower quality images compared with the original text2img models (e.g., as Stable Diffusion is finetuned on high aesthetic scored images). To strike a better trade-off between generation and grounding for GLIGEN, we propose a scheduled sampling scheme. As we freeze the original model weights and add new layers to inject new grounding information in training, there is flexibility during inference to schedule the diffusion process to either use both the grounding and language tokens or use only the language tokens of the original model at anytime, by setting different $\beta$ values in (8). Specifically, we consider a two-stage inference procedure, divided by $\tau \in [0,1]$ . For a diffusion process with $T$ steps, one can set $\beta$ to 1 at the first $\tau * T$ steps, and set $\beta$ to 0 for the remaining $(1 - \tau)*T$ steps:
|
| 224 |
+
|
| 225 |
+
$$
|
| 226 |
+
\beta = \left\{ \begin{array}{l l} 1, & t \leq \tau * T \# \text {G r o u n d e d i n f e r e n c e s t a g e} \\ 0, & t > \tau * T \# \text {S t a n d a r d i n f e r e n c e s t a g e} \end{array} \right. \tag {10}
|
| 227 |
+
$$
|
| 228 |
+
|
| 229 |
+
The major benefit of scheduled sampling is improved visual quality as the rough concept location and outline are decided in the early stages, followed by fine-grained details in later stages. It also allows us to extend the model trained in one domain (human keypoint) to other domains (monkey, cartoon characters) as shown in Figure 1.
|
| 230 |
+
|
| 231 |
+
# 5. Experiments
|
| 232 |
+
|
| 233 |
+
We evaluate our model's boxes grounded text2img generation in both the closed-set and open-set settings, and show extensions to other grounding modalities. We conduct our main quantitative experiments by building upon a pretrained LDM on LAION [57], unless stated otherwise.
|
| 234 |
+
|
| 235 |
+
# 5.1. Closed-set Grounded Text2Img Generation
|
| 236 |
+
|
| 237 |
+
We first evaluate the generation quality and grounding accuracy of our model in a closed-set setting. For this, we train and evaluate on the COCO2014 [41] dataset, which is a standard benchmark used in the text2img literature [51, 56, 65, 70, 82], and evaluate how the different types of grounding instructions impact our model's performance.
|
| 238 |
+
|
| 239 |
+
Grounding instructions. We use the following grounding instructions to train our model: 1) COCO2014D: Detection Data. There are no caption annotations so we use a
|
| 240 |
+
|
| 241 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">Generation: FID (↓)</td><td rowspan="2">Grounding: YOLO (↑) AP/AP50/AP75</td></tr><tr><td>Fine-tuned</td><td>Zero-shot</td></tr><tr><td>CogView [11]</td><td>-</td><td>27.10</td><td>-</td></tr><tr><td>KNN-Diffusion [2]</td><td>-</td><td>16.66</td><td>-</td></tr><tr><td>DALL-E 2 [51]</td><td>-</td><td>10.39</td><td>-</td></tr><tr><td>Imagen [56]</td><td>-</td><td>7.27</td><td>-</td></tr><tr><td>Re-Imagen [7]</td><td>5.25</td><td>6.88</td><td></td></tr><tr><td>Parti [74]</td><td>3.20</td><td>7.23</td><td>-</td></tr><tr><td>LAFITE [82]</td><td>8.12</td><td>26.94</td><td>-</td></tr><tr><td>LAFITE2 [80]</td><td>4.28</td><td>8.42</td><td>-</td></tr><tr><td>Make-a-Scene [13]</td><td>7.55</td><td>11.84</td><td>-</td></tr><tr><td>NÜWA [69]</td><td>12.90</td><td>-</td><td>-</td></tr><tr><td>Frido [12]</td><td>11.24</td><td>-</td><td>-</td></tr><tr><td>XMC-GAN [77]</td><td>9.33</td><td>-</td><td>-</td></tr><tr><td>AttnGAN [70]</td><td>35.49</td><td>-</td><td>-</td></tr><tr><td>DF-GAN [65]</td><td>21.42</td><td>-</td><td>-</td></tr><tr><td>Obj-GAN [35]</td><td>20.75</td><td>-</td><td>-</td></tr><tr><td>LDM [53]</td><td>-</td><td>12.63</td><td>-</td></tr><tr><td>LDM*</td><td>5.91</td><td>11.73</td><td>0.6 / 2.0 / 0.3</td></tr><tr><td>GLIGEN (COCO2014CD)</td><td>5.82</td><td>-</td><td>21.7 / 39.0 / 21.7</td></tr><tr><td>GLIGEN (COCO2014D)</td><td>5.61</td><td>-</td><td>24.0 / 42.2 / 24.1</td></tr><tr><td>GLIGEN (COCO2014G)</td><td>6.38</td><td>-</td><td>11.2 / 21.2 / 10.7</td></tr></table>
|
| 242 |
+
|
| 243 |
+
Table 1. Evaluation of image quality and correspondence to layout on COCO2014 val-set. All numbers are taken from corresponding papers, LDM* is our COCO fine-tuned LDM baseline. Here GLIGEN is built upon LDM.
|
| 244 |
+
|
| 245 |
+
null caption input [21]. Detection annotations are used as noun-entities. 2) COCO2014CD: Detection + Caption Data. Both caption and detection annotations are used. Note that the noun entities may not always exist in the caption. 3) COCO2014G: Grounding Data. Given the caption annotations, we use GLIP [34], which detects the caption's noun entities in the image, to get pseudo box labels. Please refer to supp for more details about these three types of data.
|
| 246 |
+
|
| 247 |
+
Baselines. Baseline models are listed in Table 1. Among them, we also finetune an LDM [53] pretrained on LAION 400M [57] on COCO2014 with its caption annotations, which we denote as LDM*.
|
| 248 |
+
|
| 249 |
+
The text2img baselines, as they cannot be conditioned on box inputs, are evaluated on COCO2014C: Caption Data.
|
| 250 |
+
|
| 251 |
+
Evaluation metrics. We use the captions and/or box annotations from 30K randomly sampled images to generate 30K images for evaluation. We use FID [20] to evaluate image quality. To evaluate grounding accuracy (i.e. correspondence between the input bounding box and generated entity), we use the YOLO score [40]. Specifically, we use a pretrained YOLO-v4 [5] to detect bounding boxes on the generated images and compare them with the ground truth boxes using average precision (AP). Since prior text2img methods do not support taking box annotations as input, it is not fair to compare with them on this metric. Thus, we only report numbers for the fine-tuned LDM as a reference.
|
| 252 |
+
|
| 253 |
+
Results. Table 1 shows the results. First, we see that the image synthesis quality of our approach, as measured by FID, is better than most of the state-of-the-art baselines due to rich visual knowledge learned in the pretraining stage. Next, we find that all three grounding instructions lead to comparable FID to that of the LDM* baseline, which is finetuned on
|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
|
| 257 |
+

|
| 258 |
+
Figure 4. Our model can generalize to open-world concepts even when only trained using localization annotation from COCO.
|
| 259 |
+
|
| 260 |
+
COCO2014 with caption annotations. Our model trained using detection annotation instructions (COCO2014D) has the overall best performance. However, when we evaluate this model on COCO2014CD instructions, we find that it has worse performance (FID: 8.2) – its ability to understand real captions may be limited as it is only trained with the null caption. For the model trained with GLIP grounding instructions (COCO2014G), we actually evaluate it using the COCO2014CD instructions since we need to compute the YOLO score which requires ground-truth detection annotations. Its slightly worse FID may be attributed to its learning from GLIP pseudo-labels. The same reason can explain its low YOLO score (i.e., the model did not see any ground-truth detection annotations during training).
|
| 261 |
+
|
| 262 |
+
Overall, this experiment shows that: 1) Our model can successfully take in boxes as an additional condition while maintaining image generation quality. 2) All grounding instruction types are useful, which suggests that combining their data together can lead to complementary benefits.
|
| 263 |
+
|
| 264 |
+
Comparison to Layout2Img generation methods. Thus far, we have seen that our model correctly learns to use the grounding condition. But how accurate is it compared to methods that are specifically designed for layout2img generation? To answer this, we train our model on COCO2017D, which only has detection annotations. We use the 2017 splits (instead of 2014 as before), as it is the standard benchmark in the layout2img literature. In this experiment, we use the exact same annotation as all layout2img baselines.
|
| 265 |
+
|
| 266 |
+
Table 2 shows that we achieve the state-of-the-art performance for both image quality and grounding accuracy. We believe the core reason is because previous methods train their model from scratch, whereas we build upon a large-scale pretrained generative model with rich visual semantics. Qualitative comparisons are in the supp. We also scale up our training data (discussed later) and pretrain a model on this dataset. Figure 5 left shows this model's zero-shot and finetuned results.
|
| 267 |
+
|
| 268 |
+
<table><tr><td>Model</td><td>FID (↓)</td><td>YOLO score (AP/AP50/AP75) (↑)</td></tr><tr><td>LostGAN-V2 [62]</td><td>42.55</td><td>9.1 / 15.3 / 9.8</td></tr><tr><td>OCGAN [64]</td><td>41.65</td><td>-</td></tr><tr><td>HCSS [25]</td><td>33.68</td><td>-</td></tr><tr><td>LAMA [40]</td><td>31.12</td><td>13.40 / 19.70 / 14.90</td></tr><tr><td>TwFA [71]</td><td>22.15</td><td>- / 28.20 / 20.12</td></tr><tr><td>GLIGEN-LDM</td><td>21.04</td><td>22.4 / 36.5 / 24.1</td></tr></table>
|
| 269 |
+
|
| 270 |
+
Table 2. Image quality and correspondence to layout are compared with baselines on COCO2017 val-set.
|
| 271 |
+
|
| 272 |
+
<table><tr><td>Model</td><td>Training data</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>LAMA [40]</td><td>LVIS</td><td>2.0</td><td>0.9</td><td>1.3</td><td>3.2</td></tr><tr><td>GLIGEN-LDM</td><td>COCO2014CD</td><td>6.4</td><td>5.8</td><td>5.8</td><td>7.4</td></tr><tr><td>GLIGEN-LDM</td><td>COCO2014D</td><td>4.4</td><td>2.3</td><td>3.3</td><td>6.5</td></tr><tr><td>GLIGEN-LDM</td><td>COCO2014G</td><td>6.0</td><td>4.4</td><td>6.1</td><td>6.6</td></tr><tr><td>GLIGEN-LDM</td><td>GoldG,O365</td><td>10.6</td><td>5.8</td><td>9.6</td><td>13.8</td></tr><tr><td>GLIGEN-LDM</td><td>GoldG,O365,SBU,CC3M</td><td>11.1</td><td>9.0</td><td>9.8</td><td>13.4</td></tr><tr><td>GLIGEN-Stable</td><td>GoldG,O365,SBU,CC3M</td><td>10.8</td><td>8.8</td><td>9.9</td><td>12.6</td></tr><tr><td>Upper-bound</td><td>-</td><td>25.2</td><td>19.0</td><td>22.2</td><td>31.2</td></tr></table>
|
| 273 |
+
|
| 274 |
+
Table 3. GLIP-score on LVIS validation set. Upper-bound is provided by running GLIP on real images scaled to $256 \times 256$ .
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
Figure 5. Performance comparison measured by image generation and grounding quality on COCO2017 (left) and LVIS (right) datasets. GLIGEN is built upon LDM, and continually pre-trained on the joint data of GoldG, O365, SBU, and CC3M. GLIGEN (Reference) is pre-trained on COCO/LVIS only. The circle size indicates the model size.
|
| 278 |
+
|
| 279 |
+
# 5.2. Open-set Grounded Text2Img Generation
|
| 280 |
+
|
| 281 |
+
COCO-training model. We first take GLIGEN trained only with the grounding annotations of COCO (COCO2014CD), and evaluate whether it can generate grounded entities beyond the COCO categories. Figure 4 shows qualitative results, where GLIGEN can ground new concepts such as "blue jay", "croissant" or ground object attributes such as "brown wooden table", beyond the training categories. We hypothesize this is because the gated self-attention of GLIGEN learns to re-position the visual features corresponding to the grounding entities in the caption for the ensuing cross-attention layer, and gains generalization ability due to the shared text spaces in these two layers.
|
| 282 |
+
|
| 283 |
+
We also quantitatively evaluate our model's zero-shot generation performance on LVIS [15], which contains 1203 long-tail object categories. We use GLIP to predict bounding boxes from the generated images and calculate AP, thus we name it as GLIP score. We compare to a state-of-the-art model designed for the layout2img task: LAMA [40]. We
|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
Figure 6. Grounded text2image generation. The baseline lacks grounding ability and can also miss objects e.g. "umbrella" in a sentence with multiple objects due to CLIP text space, and it also struggles to generate spatially counterfactual concepts.
|
| 287 |
+
|
| 288 |
+
train LAMA using the official code on the LVIS training set (in a fully-supervised setting), whereas we directly evaluate our model in a zero-shot task transfer manner, by running inference on the LVIS val set without seeing any LVIS labels. Table 3 (first 4 rows) shows the results. Surprisingly, even though our model is only trained on COCO annotations, it outperforms the supervised baseline by a large margin. This is because the baseline, which is trained from scratch, struggles to learn from limited annotations (many of the rare classes in LVIS have fewer than five training samples). In contrast, our model can take advantage of the pretrained model's vast concept knowledge.
|
| 289 |
+
|
| 290 |
+
Scaling up the training data. We next study our model's open-set capability with much larger training data. Specifically, we follow GLIP [34] and train on Object365 [58] and GoldG [34], which combines two grounding datasets: Flickr [49] and VG [31]. We also use CC3M [59] and SBU [46] with grounding pseudo-labels generated by GLIP.
|
| 291 |
+
|
| 292 |
+
Table 3 shows the data scaling results. As we scale up the training data, our model's zero-shot performance increases, especially for rare concepts. We also try to finetune the model pretrained on our largest dataset on LVIS and demon
|
| 293 |
+
|
| 294 |
+
strate its performance on Figure 5 right. To demonstrate the generality of our method, we also train our model based on the Stable Diffusion model checkpoint using the largest data. We show some qualitative examples in Figure 6 using this model. Our model gains the grounding ability compared to vanilla Stable Diffusion. We notice that Stable Diffusion model may overlook certain objects ("umbrella" in the second example) due to its use of the CLIP text encoder which tends to focus on global scene properties, and may ignore object-level details [3]. It also struggles to generate spatially counterfactual concepts. By explicitly injecting entity information through grounding tokens, our model can improve the grounding ability in two ways: the referred objects are more likely to appear in the generated images, and the objects reside in the specified spatial location.
|
| 295 |
+
|
| 296 |
+
# 5.3. Beyond Text Modality Grounding
|
| 297 |
+
|
| 298 |
+
Image grounded generation. One can also use a reference image to represent a grounded entity as discussed previously. Fig. 1 (b) shows qualitative results, which demonstrate that the visual feature can complement details that are hard to describe by language.
|
| 299 |
+
|
| 300 |
+
Text and image grounded generation. Besides using either text or image to represent a grounded entity, one can also keep both representations in one model for more creative generation. Fig. 1 (c) shows text grounded generation with style / tone transfer. For the style reference image, we find that grounding it to an image corner or its edge is sufficient. Since the model needs to generate a harmonious style for the entire image, we hypothesize the self-attention layers may broadcast this information to all pixels, thus leading to consistent style for the entire image.
|
| 301 |
+
|
| 302 |
+
Keypoints grounded generation. We also demonstrate GLIGEN using keypoints for articulate objects control as shown in the Fig. 1 (d). Note that this model is only trained with human keypoint annotations; but it can generalize to other humanoid object due to the scheduled sampling technique we proposed. We also quantitatively study this grounding condition in the supp.
|
| 303 |
+
|
| 304 |
+
# Spatially-aligned condition map grounded generation.
|
| 305 |
+
|
| 306 |
+
Fig. 1 (e-h) demonstrate results for depth map, edge map, normal map, and semantic map grounded generation. These types of conditions allow users to have more fine-grained generation control. See supp for more qualitative results.
|
| 307 |
+
|
| 308 |
+
# 5.4. Scheduled Sampling
|
| 309 |
+
|
| 310 |
+
As stated in Eq. (8) and Eq. (10), we can schedule inference time sampling by setting $\beta$ to 1 (use extra grounding information) or 0 (reduce to the original pretrained diffusion model). This can make our model exploit different knowledge at different stages.
|
| 311 |
+
|
| 312 |
+
Fig. 7 qualitatively shows the benefits of our scheduled sampling by setting $\tau$ to be 0.2. The images in the same row share the same noise and conditional input. The first row shows that scheduled sampling can be used to improve image quality, as the original Stable Diffusion model is trained with high quality images. The second row shows a generation example by our model trained with COCO human keypoint annotations. Since this model is purely trained with human keypoints, the final result is biased towards generating a human even if a different object (i.e., robot) is specified in the caption. However, by using scheduled sampling, we can extend this model to generate other objects with a human-like shape.
|
| 313 |
+
|
| 314 |
+
# 6. Conclusion
|
| 315 |
+
|
| 316 |
+
We proposed GLIGEN for expanding pretrained text2img diffusion models with grounding ability, and demonstrated open-world generalization using bounding boxes as the grounding condition. Our method is simple and effective, and can be easily extended to other conditions such as keypoints, reference images, spatially-aligned conditions (e.g., edge map, depth map, etc). The versatility of GLIGEN makes it a promising direction for advancing the field of text-to
|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
Caption:"a cute low poly Shiba Inu" Grounded text:Shiba Inu
|
| 320 |
+
|
| 321 |
+

|
| 322 |
+
|
| 323 |
+

|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
Caption: "a robot is sitting on a bench" Grounded keypoints: plotted dots on the left figure
|
| 327 |
+
Figure 7. Scheduled Samping. It can improve visual or extend a model trained in one domain (e.g., human) to the others.
|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
|
| 331 |
+

|
| 332 |
+
|
| 333 |
+
image synthesis and expanding the capabilities of pretrained models in various applications.
|
| 334 |
+
|
| 335 |
+
Acknowledgement. This work was supported in part by NSF CAREER IIS2150012, NASA 80NSSC21K0295, and Institute of Information & communications Technology Planning & Evaluation(IITP) grants funded by the Korea government(MSIT) (No. 2022-0-00871, Development of AI Autonomy and Knowledge Enhancement for AI Agent Collaboration) and (No. RS-2022-00187238, Development of Large Korean Language Model Technology for Efficient Pre-training), and Adobe Data Science Research Award.
|
| 336 |
+
|
| 337 |
+
# References
|
| 338 |
+
|
| 339 |
+
[1] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katie Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Karen Simonyan. Flamingo: a visual language model for few-shot learning. ArXiv, abs/2204.14198, 2022. 2, 5, 14
|
| 340 |
+
[2] Oron Ashual, Shelly Sheynin, Adam Polyak, Uriel Singer, Oran Gafni, Eliya Nachmani, and Yaniv Taigman. Knndiffusion: Image generation via large-scale retrieval. arXiv preprint arXiv:2204.02849, 2022. 6
|
| 341 |
+
[3] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Ji-aming Song, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, Bryan Catanzaro, Tero Karras, and Ming-Yu Liu. ediffi: Text-to-image diffusion models with an ensemble of expert denoisers. ArXiv, abs/2211.01324, 2022. 3, 8
|
| 342 |
+
|
| 343 |
+
[4] Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021. 2
|
| 344 |
+
[5] Alexey Bochkovskiy, Chien-Yao Wang, and Hong-Yuan Mark Liao. Yolov4: Optimal speed and accuracy of object detection. ArXiv, abs/2004.10934, 2020. 6
|
| 345 |
+
[6] Huiwen Chang, Han Zhang, Jarred Barber, AJ Maschinot, Jose Lezama, Lu Jiang, Ming-Hsuan Yang, Kevin Murphy, William T Freeman, Michael Rubinstein, et al. Muse: Text-to-image generation via masked generative transformers. arXiv preprint arXiv:2301.00704, 2023. 2
|
| 346 |
+
[7] Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. arXiv preprint arXiv:2209.14491, 2022. 6
|
| 347 |
+
[8] Yunjey Choi, Min-Je Choi, Mun Su Kim, Jung-Woo Ha, Sunghun Kim, and Jaegul Choo. Stargan: Unified generative adversarial networks for multi-domain image-to-image translation. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8789-8797, 2018. 3
|
| 348 |
+
[9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171–4186, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. 3
|
| 349 |
+
[10] Prafulla Dhariwal and Alex Nichol. Diffusion models beat gans on image synthesis. ArXiv, abs/2105.05233, 2021. 2
|
| 350 |
+
[11] Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, and Jie Tang. Cogview: Mastering text-to-image generation via transformers, 2021. 6
|
| 351 |
+
[12] Wanshu Fan, Yen-Chun Chen, Dongdong Chen, Yu Cheng, Lu Yuan, and Yu-Chiang Frank Wang. Frido: Feature pyramid diffusion for complex scene image synthesis. ArXiv, abs/2208.13753, 2022. 6
|
| 352 |
+
[13] Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman. Make-a-scene: Scene-based text-to-image generation with human priors. ArXiv, abs/2203.13131, 2022. 2, 3, 6
|
| 353 |
+
[14] Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron C. Courville, and Yoshua Bengio. Generative adversarial nets. In NIPS, 2014. 2, 3
|
| 354 |
+
[15] Agrim Gupta, Piotr Dólár, and Ross B. Girshick. Lvis: A dataset for large vocabulary instance segmentation. CVPR, pages 5351-5359, 2019. 2, 7, 16
|
| 355 |
+
[16] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 2
|
| 356 |
+
[17] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, 2020. 2
|
| 357 |
+
|
| 358 |
+
[18] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross B. Girshick. Mask r-cnn. 2017 IEEE International Conference on Computer Vision (ICCV), pages 2980-2988, 2017. 15
|
| 359 |
+
[19] Kaiming He, X. Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. CVPR, pages 770-778, 2016. 3
|
| 360 |
+
[20] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In NIPS, 2017. 6
|
| 361 |
+
[21] Jonathan Ho. Classifier-free diffusion guidance. ArXiv, abs/2207.12598, 2022. 6, 14
|
| 362 |
+
[22] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural Computation, 9:1735-1780, 1997. 3
|
| 363 |
+
[23] Ray S Jackendoff. Semantic structures, volume 18. MIT press, 1992. 4
|
| 364 |
+
[24] Manuel Jahn, Robin Rombach, and Björn Ommer. High-resolution complex scene synthesis with transformers. ArXiv, abs/2105.06458, 2021. 3
|
| 365 |
+
[25] Manuel Jahn, Robin Rombach, and Björn Ommer. High-resolution complex scene synthesis with transformers. ArXiv, abs/2105.06458, 2021. 7, 16
|
| 366 |
+
[26] Justin Johnson, Agrim Gupta, and Li Fei-Fei. Image generation from scene graphs. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1219–1228, 2018. 2
|
| 367 |
+
[27] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. CVPR, pages 4396-4405, 2019. 2
|
| 368 |
+
[28] Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. CVPR, pages 4396-4405, 2019. 3
|
| 369 |
+
[29] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8107-8116, 2020. 2
|
| 370 |
+
[30] Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. CoRR, abs/1412.6980, 2015. 14
|
| 371 |
+
[31] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Li Fei-Fei. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision, 123:32-73, 2016. 8
|
| 372 |
+
[32] Chunyuan Li, Haotian Liu, Lianian Harold Li, Pengchuan Zhang, Jyoti Aneja, Jianwei Yang, Ping Jin, Houdong Hu, Zicheng Liu, Yong Jae Lee, and Jianfeng Gao. ELEVATER: A benchmark and toolkit for evaluating language-augmented visual models. In NeurIPS Track on Datasets and Benchmarks, 2022. 2
|
| 373 |
+
[33] Junnan Li, Ramprasaath R Selvaraju, Akhilesh Deepak Gotmare, Shafiq Joty, Caiming Xiong, and Steven Hoi. Align before fuse: Vision and language representation learning with momentum distillation. arXiv preprint arXiv:2107.07651, 2021. 2
|
| 374 |
+
|
| 375 |
+
[34] Lianian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, Kai-Wei Chang, and Jianfeng Gao. Grounded language-image pre-training. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2022, New Orleans, LA, USA, June 18-24, 2022, pages 10955-10965. IEEE, 2022. 2, 6, 8
|
| 376 |
+
[35] Wenbo Li, Pengchuan Zhang, Lei Zhang, Qiuyuan Huang, Xiaodong He, Siwei Lyu, and Jianfeng Gao. Object-driven text-to-image synthesis via adversarial training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12174-12182, 2019. 6
|
| 377 |
+
[36] Yuheng Li, Yijun Li, Jingwan Lu, Eli Shechtman, Yong Jae Lee, and Krishna Kumar Singh. Collaging class-specific gans for semantic image synthesis. ICCV, pages 14398-14407, 2021. 3
|
| 378 |
+
[37] Yuheng Li, Yijun Li, Jingwan Lu, Eli Shechtman, Yong Jae Lee, and Krishna Kumar Singh. Contrastive learning for diverse disentangled foreground generation. ArXiv, abs/2211.02707, 2022. 2
|
| 379 |
+
[38] Yuheng Li, Krishna Kumar Singh, Utkarsh Ojha, and Yong Jae Lee. Mixnmatch: Multifactor disentanglement and encoding for conditional image generation. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8036-8045, 2020. 3
|
| 380 |
+
[39] Z. Li, Jingyu Wu, Immanuel Koh, Yongchuan Tang, and Lingyun Sun. Image synthesis from layout with locality-aware mask adaption. ICCV, pages 13799-13808, 2021. 3
|
| 381 |
+
[40] Z. Li, Jingyu Wu, Immanuel Koh, Yongchuan Tang, and Lingyun Sun. Image synthesis from layout with locality-aware mask adaption. ICCV, pages 13799-13808, 2021. 6, 7, 16
|
| 382 |
+
[41] Tsung-Yi Lin, Michael Maire, Serge J. Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C. Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 2, 6, 14, 17
|
| 383 |
+
[42] Haotian Liu, Kilho Son, Jianwei Yang, Ce Liu, Jianfeng Gao, Yong Jae Lee, and Chunyuan Li. Learning customized visual models with retrieval-augmented knowledge. CVPR, 2023. 2
|
| 384 |
+
[43] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022. 13
|
| 385 |
+
[44] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 4, 13
|
| 386 |
+
[45] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. In ICML, 2022. 2, 15
|
| 387 |
+
[46] Vicente Ordonez, Girish Kulkarni, and Tamara L. Berg. Im2text: Describing images using 1 million captioned photographs. In NIPS, 2011. 8
|
| 388 |
+
[47] Taesung Park, Ming-Yu Liu, Ting-Chun Wang, and Jun-Yan Zhu. Semantic image synthesis with spatially-adaptive normalization. CVPR, pages 2332-2341, 2019. 2, 3
|
| 389 |
+
|
| 390 |
+
[48] Deepak Pathak, Philipp Krahenbuhl, Jeff Donahue, Trevor Darrell, and Alexei A. Efros. Context encoders: Feature learning by inpainting. CVPR, pages 2536-2544, 2016. 2
|
| 391 |
+
[49] Bryan A. Plummer, Liwei Wang, Christopher M. Cervantes, Juan C. Caicedo, J. Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. International Journal of Computer Vision, 123:74–93, 2015. 8
|
| 392 |
+
[50] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 2, 3
|
| 393 |
+
[51] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. ArXiv, abs/2204.06125, 2022. 2, 6
|
| 394 |
+
[52] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 8821-8831. PMLR, 18-24 Jul 2021. 2
|
| 395 |
+
[53] Robin Rombach, A. Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. CVPR, pages 10674-10685, 2022. 2, 3, 6, 13, 14, 15
|
| 396 |
+
[54] O. Ronneberger, P.Fischer, and T. Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention (MIC-CAI), volume 9351 of LNCS, pages 234–241. Springer, 2015. (available on arXiv:1505.04597 [cs.CV]). 3, 13
|
| 397 |
+
[55] Chitwan Sahara, William Chan, Huiwen Chang, Chris A. Lee, Jonathan Ho, Tim Salimans, David J. Fleet, and Mohammad Norouzi. Palette: Image-to-image diffusion models. ACM SIGGRAPH 2022 Conference Proceedings, 2022. 2, 3
|
| 398 |
+
[56] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L. Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, Seyedeh Sara Mahdavi, Raphael Gontijo Lopes, Tim Salimans, Jonathan Ho, David J. Fleet, and Mohammad Norouzi. Photorealistic text-to-image diffusion models with deep language understanding. ArXiv, abs/2205.11487, 2022. 2, 6
|
| 399 |
+
[57] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. LAION-400M: open dataset of clip-filtered 400 million image-text pairs. CoRR, abs/2111.02114, 2021. 6
|
| 400 |
+
[58] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. ICCV, pages 8429-8438, 2019. 8
|
| 401 |
+
[59] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In ACL, 2018. 8
|
| 402 |
+
|
| 403 |
+
[60] Yujun Shen, Jinjin Gu, Xiaou Tang, and Bolei Zhou. Interpreting the latent space of gans for semantic face editing. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 9240-9249, 2020. 2
|
| 404 |
+
[61] Wei Sun and Tianfu Wu. Image synthesis from reconfigurable layout and style. ICCV, pages 10530-10539, 2019. 3
|
| 405 |
+
[62] Wei Sun and Tianfu Wu. Learning layout and style reconfigurable gans for controllable image synthesis. TPAMI, 44:5070-5087, 2022. 3, 7, 16
|
| 406 |
+
[63] Tristan Sylvain, Pengchuan Zhang, Yoshua Bengio, R. Devon Hjelm, and Shikhar Sharma. Object-centric image generation from layouts. ArXiv, abs/2003.07449, 2021. 3
|
| 407 |
+
[64] Tristan Sylvain, Pengchuan Zhang, Yoshua Bengio, R. Devon Hjelm, and Shikhar Sharma. Object-centric image generation from layouts. ArXiv, abs/2003.07449, 2021. 7, 16
|
| 408 |
+
[65] Ming Tao, Hao Tang, Songsong Wu, N. Sebe, Fei Wu, and Xiaoyuan Jing. Df-gan: Deep fusion generative adversarial networks for text-to-image synthesis. ArXiv, abs/2008.05865, 2020. 3, 6
|
| 409 |
+
[66] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. 3
|
| 410 |
+
[67] Ashish Vaswani, Noam M. Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. ArXiv, abs/1706.03762, 2017. 2, 3
|
| 411 |
+
[68] Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. High-resolution image synthesis and semantic manipulation with conditional gans. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8798-8807, 2018. 15, 16
|
| 412 |
+
[69] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. Nüwa: Visual synthesis pretraining for neural visual world creation. In European Conference on Computer Vision, 2022. 2, 6
|
| 413 |
+
[70] Tao Xu, Pengchuan Zhang, Qiuyuan Huang, Han Zhang, Zhe Gan, Xiaolei Huang, and Xiaodong He. Attngan: Finegrained text to image generation with attentional generative adversarial networks. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1316-1324, 2018. 3, 6
|
| 414 |
+
[71] Zuopeng Yang, Daqing Liu, Chaoyue Wang, J. Yang, and Dacheng Tao. Modeling image composition for complex scene generation. CVPR, pages 7754-7763, 2022. 2, 7, 15, 16
|
| 415 |
+
[72] Zuopeng Yang, Daqing Liu, Chaoyue Wang, J. Yang, and Dacheng Tao. Modeling image composition for complex scene generation. CVPR, pages 7754-7763, 2022. 3
|
| 416 |
+
[73] Zhengyuan Yang, Jianfeng Wang, Zhe Gan, Linjie Li, Kevin Lin, Chenfei Wu, Nan Duan, Zicheng Liu, Ce Liu, Michael Zeng, and Lijuan Wang. Reco: Region-controlled text-to-image generation. ArXiv, abs/2211.15518, 2022. 3
|
| 417 |
+
[74] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei
|
| 418 |
+
|
| 419 |
+
Yang, Burcu Karagol Ayan, Benton C. Hutchinson, Wei Han, Zarana Parekh, Xin Li, Han Zhang, Jason Baldridge, and Yonghui Wu. Scaling autoregressive models for content-rich text-to-image generation. ArXiv, abs/2206.10789, 2022. 2, 6
|
| 420 |
+
[75] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, et al. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432, 2021. 2
|
| 421 |
+
[76] Alireza Zareian, Kevin Dela Rosa, Derek Hao Hu, and Shih-Fu Chang. Open-vocabulary object detection using captions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14393-14402, 2021. 2
|
| 422 |
+
[77] Han Zhang, Jing Yu Koh, Jason Baldridge, Honglak Lee, and Yinfei Yang. Cross-modal contrastive learning for text-to-image generation, 2021. 6
|
| 423 |
+
[78] Bo Zhao, Lili Meng, Weidong Yin, and Leonid Sigal. Image generation from layout. CVPR, pages 8576-8585, 2019. 3
|
| 424 |
+
[79] Yiwu Zhong, Jianwei Yang, Pengchuan Zhang, Chunyuan Li, Noel Codella, Liunian Harold Li, Luowei Zhou, Xiyang Dai, Lu Yuan, Yin Li, et al. RegionCLIP: Region-based language-image pretraining. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16793-16803, 2022. 2
|
| 425 |
+
[80] Yufan Zhou, Chunyuan Li, Changyou Chen, Jianfeng Gao, and Jinhui Xu. Lafite2: Few-shot text-to-image generation. arXiv preprint arXiv:2210.14124, 2022. 3, 6
|
| 426 |
+
[81] Yufan Zhou, Bingchen Liu, Yizhe Zhu, Xiao Yang, Changyou Chen, and Jinhui Xu. Shifted diffusion for text-to-image generation. arXiv preprint arXiv:2211.15388, 2022. 2
|
| 427 |
+
[82] Yufan Zhou, Ruiyi Zhang, Changyou Chen, Chunyuan Li, Chris Tensmeyer, Tong Yu, Jiuming Gu, Jinhui Xu, and Tong Sun. Towards language-free training for text-to-image generation. CVPR, 2022. 2, 6
|
| 428 |
+
[83] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Computer Vision (ICCV), 2017 IEEE International Conference on, 2017. 3
|
| 429 |
+
[84] Xueyan Zou*, Zi-Yi Dou*, Jianwei Yang*, Zhe Gan, Linjie Li, Chunyuan Li, Xiyang Dai, Jianfeng Wang, Lu Yuan, Nanyun Peng, Lijuan Wang, Yong Jae Lee, and Jianfeng Gao. Generalized decoding for pixel, image and language. arXiv, 2022. 2
|
| 430 |
+
|
| 431 |
+
# Appendix
|
| 432 |
+
|
| 433 |
+
In this supplemental material, we provide more implementation and training details, and then present more results and discussions.
|
| 434 |
+
|
| 435 |
+
# A. Implementation and training details
|
| 436 |
+
|
| 437 |
+
We use the Stable Diffusion model [53] as the example to illustrate our implementation details.
|
| 438 |
+
|
| 439 |
+
Box Grounding Tokens with Text. Each grounded text is first fed into the text encoder to get the text embedding (e.g., 768 dimension of the CLIP text embedding in Stable Diffusion). Since the Stable Diffusion uses features of 77 text tokens outputted from the transformer backbone, thus we choose "EOS" token feature at this layer as our grounded text embedding. This is because in the CLIP training, this "EOS" token feature is chosen and applied a linear transform (one FC layer) to compare with visual feature, thus this token feature should contain whole information about the input text description. We also tried to directly use CLIP text embedding (after linear projection), however, we notice slow convergence empirically probably due to unaligned space between the grounded text embedding and the caption embeddings. Following NeRF [44], we encode bounding box coordinates with the Fourier embedding with output dimension 64. As stated in the Eq 5 in the main paper, we first concatenate these two features and feed them into a multi-layer perceptron. The MLP consists of three hidden layers with hidden dimension 512, the output grounding token dimension is set to be the same as the text embedding dimension (e.g., 768 in the Stable Diffusion case). We set the maximum number of grounding tokens to be 30 in the bounding box case.
|
| 440 |
+
|
| 441 |
+
Box Grounding Tokens with Image. We use the similar way to get the grounding token for an image. We use the CLIP image encoder (ViT-L-14 is used for the Stable Diffusion) to get an image embedding. We denote the CLIP training objective as maximizing $(\mathbf{P}_t\pmb {h}_t)^\top (\mathbf{P}_i\pmb {h}_i)$ (we omit normalization), where $h_t$ is "EOS" token embedding from the text encoder, $h_i$ is "CLS" token embedding from the image encoder, and $\mathbf{P}_t$ and $\mathbf{P}_i$ are linear transformation for text and image embedding, respectively. Since $h_t$ is the text feature space used for grounded text features, to ease our training, we choose to project image features into the text feature space via $\mathbf{P}_t^\top \mathbf{P}_i\pmb {h}_i$ , and normalized it to 28.7, which is average norm of $h_t$ we empirically found. We also set the maximum number of grounding tokens to be 30. Thus, 60 tokens in total if one keep both image and text as representations for a grounded entity.
|
| 442 |
+
|
| 443 |
+
Keypoint Grounding Tokens. The grounding token for keypoint annotations is processed in the same way, except that we also learn $N$ person token embedding vectors
|
| 444 |
+
|
| 445 |
+

|
| 446 |
+
Figure 8. Additional grounding input is fed into the Unet input for spatially aligned conditions.
|
| 447 |
+
|
| 448 |
+
$\{p_1, \ldots, p_N\}$ to semantically link keypoints belonging to the same person. This is to deal with the situation in which there are multiple people in the same image that we want to generate, so that the model knows which keypoint corresponds to which person. Each keypoint semantic embedding $k_e$ is a learnable vector; the dimension of each person token is set the same as keypoint embedding dimension. The grounding token is calculated by:
|
| 449 |
+
|
| 450 |
+
$$
|
| 451 |
+
\boldsymbol {h} ^ {e} = \operatorname {M L P} \left(\boldsymbol {k} _ {e} + \boldsymbol {p} _ {j}, \text {F o u r i e r} (\boldsymbol {l})\right) \tag {11}
|
| 452 |
+
$$
|
| 453 |
+
|
| 454 |
+
where $l$ is the $x, y$ location of each keypoint and $p_j$ is the person token for the $j$ 'th person. In practice, we set $N$ as 10, which is the maximum number of persons allowed to be generated in each image. Thus, we have 170 tokens in the COCO dataset (i.e., $10^{*}17$ ; 17 keypoint annotations for each person).
|
| 455 |
+
|
| 456 |
+
Tokens for Spatially Aligned Condition. This type of condition includes edge map, depth map, semantic map, and normal map, etc; they can be represented as $C \times H \times W$ tensor. We resize spatial size into $256 \times 256$ and use the convnext-tiny [43] as the backbone to output a feature with spatial size as $8 \times 8$ , which then is flattened into 64 grounding tokens. We notice that it can help training faster if we also provide the grounding condition $l$ into the Unet input. As shown in the Figure 8, in this case, the input is $\text{CONCAT}(f_l(l), z_t)$ where $f_l$ is a simple downsampling network to reduce $l$ into the same spatial dimension as $z_t$ , which is the noisy latent code at the time step $t$ ( $64 \times 64$ for the Stable Diffusion). In this case, the first conv layer of Unet needs to be trainable.
|
| 457 |
+
|
| 458 |
+
Gated Self-Attention Layers. Our inserted self-attention layer is the same as the original diffusion model self-attention layer at each Transformer block, except that we add one linear projection layer which converts the grounding token into the same dimension as the visual token. For example, in the first layer of the down branch of the UNet [54], the projection layer converts grounding token of dimension 768 into 320 (which is the image feature dimension at this
|
| 459 |
+
|
| 460 |
+

|
| 461 |
+
Figure 9. Three different types of grounding data for box.
|
| 462 |
+
|
| 463 |
+
layer), and visual tokens are concatenated with the grounding tokens as the input to the gated attention layer.
|
| 464 |
+
|
| 465 |
+
Training Details. For all COCO related experiments (Sec 5.1 in the main paper), we train LDM with batch size 64 using 16 V100 GPUs for 100k iterations. In the scaling up training data experiment (in Sec 5.2 of the main paper), we train for 400k iterations for LDM, but 500K iterations with batch size of 32 for the Stable diffusion model. For all training, we use learning rate of 5e-5 with Adam [30], and use warm-up for the first 10k iterations. We randomly drop caption and grounding tokens with $10\%$ probability for classifier-free guidance [21].
|
| 466 |
+
|
| 467 |
+
Data Details. In the main paper Sec 5.1, we study three different types of data for box grounding. The training data requires both text $c$ and grounding entity $e$ as the full condition. In practice, we can relax the data requirement by considering a more flexible input, i.e. the three types of data shown in Figure 9. (i) Grounding data. Each image is associated with a caption describing the whole image; noun entities are extracted from the caption, and are labeled with bounding boxes. Since the noun entities are taken directly from the natural language caption, they can cover a much richer vocabulary which will be beneficial for open-world vocabulary grounded generation. (ii) Detection data. Nounentities are pre-defined closed-set categories (e.g., 80 object classes in COCO [41]). In this case, we choose to use a null caption token as introduced in classifier-free guidance [21] for the caption. The detection data is of larger quantity (millions) than the grounding data (thousands), and can therefore greatly increase overall training data. (iii) Detection and caption data. Noun entities are same as those in the detection data, and the image is described separately with a text caption. In this case, the noun entities may not exactly match those in the caption. For example, in Figure 9, the caption only gives a high-level description of the living room without mentioning the objects in the scene, whereas the detection annotation provides more fine-grained object-level details.
|
| 468 |
+
|
| 469 |
+
# B. Ablation Study
|
| 470 |
+
|
| 471 |
+
Ablation on gated self-attention. As shown in the main paper Figure 3 and Eq 8, our approach uses gated self
|
| 472 |
+
|
| 473 |
+

|
| 474 |
+
Figure 10. Inpainting results. Existing text2img diffusion models may generate objects that do not tightly fit the masked box or miss an object if the same object already exists in the image.
|
| 475 |
+
|
| 476 |
+
<table><tr><td></td><td>1%-3%</td><td>5%-10%</td><td>30%-50%</td></tr><tr><td>LDM [53]</td><td>25.9</td><td>23.4</td><td>14.6</td></tr><tr><td>GLIGEN-LDM</td><td>29.7</td><td>30.9</td><td>25.6</td></tr><tr><td>Upper-bound</td><td>41.7</td><td>43.4</td><td>45.0</td></tr></table>
|
| 477 |
+
|
| 478 |
+
Table 4. Inpainting results (YOLO AP) for different size of objects.
|
| 479 |
+
|
| 480 |
+
attention to absorb the grounding instruction. We can also consider gated cross-attention [1], where the query is the visual feature, and the keys and values are produced using the grounding condition. We ablate this design on COCO2014CD data using LDM. Compare with the Table 1 the main paper, we can find that it leads to similar FID: 5.8, but worse YOLO AP: 16.6 (compared to 21.7 for self-attention in the Table). This shows the necessity of information sharing among the visual tokens, which exists in self-attention but not in cross-attention.
|
| 481 |
+
|
| 482 |
+
Ablation on null caption. We choose to use the null caption when we only have detection annotations (COCO2014D). An alternative scheme is to simply combine all noun entities into a sentence; e.g., if there are two cats and a dog in an image, then the pseudo caption can be: "cat, cat, dog". In this case, the FID becomes worse and increases to 7.40 from 5.61 (null caption, refer to main paper table 1). This is likely due to the pretrained text encoder never having encountered this type of unnatural caption during LDM training. A solution would be to finetune the text encoder or design a better prompt, but this is not the focus of our work.
|
| 483 |
+
|
| 484 |
+
Ablation on fourier embedding. In Eq 5, we replace the Fourier embedding with MLP embedding and conduct an experiment using COCO2014CD data format (Table 1). In this case, the image quality (FID) is similar: Fourier/MLP: 5.82/5.80; however, the layout correspondence (YOLO AP) is much worse: Fourier/MLP: 21.7/3.2.
|
| 485 |
+
|
| 486 |
+

|
| 487 |
+
Figure 11. Layout2img comparison. Our model generates better quality images, especially when using stable diffusion. Baseline images are all copied from TwFA [71]
|
| 488 |
+
|
| 489 |
+
# C. Grounded inpainting
|
| 490 |
+
|
| 491 |
+
# C.1. Text Grounded Inpainting
|
| 492 |
+
|
| 493 |
+
Like other diffusion models, GLIGEN can also work for the inpainting task by replacing the known region with a sample from $q(z_{t}|z_{0})$ after each sampling step, where $z_{0}$ is the latent representation of an image [53]. One can ground text descriptions to missing regions, as shown in Figure 10. In this setting, however, one may wonder, can we simply use a vanilla text-to-image diffusion model such Stable Diffusion or DALLE2 to fill the missing region by providing the object name as the caption? What are the benefits of having extra grounding inputs in such cases? To answer this, we conduct the following experiment on the COCO dataset: for each image, we randomly mask one object. We then let the model inpaint the missing region. We choose the missing object with three different size ratios with respect to the image: small (1%-3%), median (5%-10%), and large (30%-50%). 5000 images are used for each case.
|
| 494 |
+
|
| 495 |
+
Table 4 demonstrates that our inpainted objects more tightly occupy the missing region (box) compared to the baselines. Fig. 10 provides examples to visually compare the inpainting results (we use Stable Diffusion for better quality). The first row shows that baselines' generated objects do not
|
| 496 |
+
|
| 497 |
+

|
| 498 |
+
Figure 12. Keypoint results. Our model generates higher quality images conditioned on keypoints, and it allows to use caption to specify details such as scene or gender.
|
| 499 |
+
|
| 500 |
+
follow the provided box. The second row shows that when the missing category is already present in the image, they may ignore the caption. This is understandable as baselines are trained to generate a whole image following the caption. Our method may be more favorable for editing applications, where a user might want to generate an object that fully fits the missing region or add an instance of a class that already exists in the image.
|
| 501 |
+
|
| 502 |
+
# C.2. Image Grounded Inpainting
|
| 503 |
+
|
| 504 |
+
As we previously demonstrated, one can ground text to missing region for inpainting, one can also ground reference images to missing regions. Figure 13 shows inpainting results grounded on reference images. To remove boundary artifacts, we follow GLIDE [45], and modify the first conv layer by adding 5 extra channels (4 for $z_0$ and 1 for inpainting mask) and make them trainable with the new added layers.
|
| 505 |
+
|
| 506 |
+
# D. Study for Keypoints Grounding
|
| 507 |
+
|
| 508 |
+
Although we have thus far demonstrated results with bounding boxes, our approach has flexibility in the grounding condition that it can use for generation. To demonstrate this, we next evaluate our model with another type of grounding condition: human keypoints. We use the COCO2017 dataset. We compare with pix2pixHD [68], a classic image-to-image translation model. Since pix2pixHD does not take captions as input, we train two variants of our model: one uses COCO captions, the other does not. In the latter case, null caption is used as input to the cross-attention layer for a fair comparison.
|
| 509 |
+
|
| 510 |
+
Fig. 12 shows the qualitative comparison. Clearly, our method generates much better image quality. For our model trained with captions, we can also specify other details such as the scene ("A person is skiing down a snowy hill") or person's gender ("A woman is holding a baby"). These two inputs complement each other and can enrich a user's controllability for image creation. We measure keypoint correspondence (similar to the YOLO score for boxes) by running a MaskRCNN [18] key
|
| 511 |
+
|
| 512 |
+
<table><tr><td>Model</td><td>Pre-training data</td><td>Traing data</td><td>FID</td><td>AP</td><td>\( AP_r \)</td><td>\( AP_c \)</td><td>\( AP_f \)</td></tr><tr><td>LAMA [40]</td><td>-</td><td>LVIS</td><td>151.96</td><td>2.0</td><td>0.9</td><td>1.3</td><td>3.2</td></tr><tr><td>GLIGEN-LDM</td><td>COCO2014CD</td><td>-</td><td>22.17</td><td>6.4</td><td>5.8</td><td>5.8</td><td>7.4</td></tr><tr><td>GLIGEN-LDM</td><td>COCO2014D</td><td>-</td><td>31.31</td><td>4.4</td><td>2.3</td><td>3.3</td><td>6.5</td></tr><tr><td>GLIGEN-LDM</td><td>COCO2014G</td><td>-</td><td>13.48</td><td>6.0</td><td>4.4</td><td>6.1</td><td>6.6</td></tr><tr><td>GLIGEN-LDM</td><td>GoldG,O365</td><td>-</td><td>8.45</td><td>10.6</td><td>5.8</td><td>9.6</td><td>13.8</td></tr><tr><td>GLIGEN-LDM</td><td>GoldG,O365,SBU,CC3M</td><td>-</td><td>10.28</td><td>11.1</td><td>9.0</td><td>9.8</td><td>13.4</td></tr><tr><td>GLIGEN-LDM</td><td>GoldG,O365,SBU,CC3M</td><td>LVIS</td><td>6.25</td><td>14.9</td><td>10.1</td><td>12.8</td><td>19.3</td></tr><tr><td>Upper-bound</td><td>-</td><td>-</td><td>-</td><td>25.2</td><td>19.0</td><td>22.2</td><td>31.2</td></tr></table>
|
| 513 |
+
|
| 514 |
+

|
| 515 |
+
|
| 516 |
+

|
| 517 |
+
|
| 518 |
+

|
| 519 |
+
|
| 520 |
+

|
| 521 |
+
|
| 522 |
+

|
| 523 |
+
|
| 524 |
+

|
| 525 |
+
Figure 13. Image grounded Inpainting. One can use reference images to ground holes they want to fill in.
|
| 526 |
+
|
| 527 |
+

|
| 528 |
+
|
| 529 |
+

|
| 530 |
+
|
| 531 |
+

|
| 532 |
+
|
| 533 |
+

|
| 534 |
+
|
| 535 |
+
Table 5. GLIP-score on LVIS validation set. Upper-bound is provided by running GLIP on real images scaled to ${256} \times {256}$ .
|
| 536 |
+
|
| 537 |
+
<table><tr><td>Model</td><td>FID</td><td>AP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td></tr><tr><td>pix2pixHD [68]</td><td>142.4</td><td>15.8</td><td>33.7</td><td>13.0</td></tr><tr><td>GLIGEN (w/o caption)</td><td>31.02</td><td>31.8</td><td>53.5</td><td>31.0</td></tr><tr><td>GLIGEN (w caption)</td><td>27.34</td><td>31.5</td><td>52.9</td><td>31.0</td></tr><tr><td>Upper-bound</td><td>-</td><td>62.4</td><td>75.0</td><td>65.9</td></tr></table>
|
| 538 |
+
|
| 539 |
+
point detector on the generated images. Both of our model variants produce similar results; see Table 6.
|
| 540 |
+
|
| 541 |
+
# E. Additional quantitative results
|
| 542 |
+
|
| 543 |
+
In this section, we show more studies with our pretrained model using our largest data (GoldG, O365, CC3M, SBU). We had reported this model's zero-shot performance on LVIS [15] in the main paper Table 3. Here we finetune this model on LVIS, and report its GLIP-score in Table 5. Clearly, after finetuning, we show much more accurate generation results, surpassing the supervised baseline LAMA [40] by a large margin.
|
| 544 |
+
|
| 545 |
+
Similarly, we also test this model's zero-shot performance on the COCO2017 val-set, and its finetuning results are in Table 7. The results show the benefits of pretraining which can largely improve layout correspondence performance.
|
| 546 |
+
|
| 547 |
+
Table 6. Conditioning with Human Keypoints evaluated on COCO2017 validation set. Upper-bound is calculated on real images scaled to $256 \times 256$ .
|
| 548 |
+
|
| 549 |
+
<table><tr><td rowspan="2">Model</td><td rowspan="2">FID</td><td colspan="3">YOLO score</td></tr><tr><td>AP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td></tr><tr><td>LostGAN-V2 [62]</td><td>42.55</td><td>9.1</td><td>15.3</td><td>9.8</td></tr><tr><td>OCGAN [64]</td><td>41.65</td><td></td><td>-</td><td></td></tr><tr><td>HCSS [25]</td><td>33.68</td><td></td><td>-</td><td></td></tr><tr><td>LAMA [40]</td><td>31.12</td><td>13.40</td><td>19.70</td><td>14.90</td></tr><tr><td>TwFA [71]</td><td>22.15</td><td>-</td><td>28.20</td><td>20.12</td></tr><tr><td>GLIGEN-LDM</td><td>21.04</td><td>22.4</td><td>36.5</td><td>24.1</td></tr></table>
|
| 550 |
+
|
| 551 |
+
After pretrain on GoldG,O365,SBU,CC3M
|
| 552 |
+
|
| 553 |
+
<table><tr><td>GLIGEN-LDM (zero-shot)</td><td>27.03</td><td>19.1</td><td>30.5</td><td>20.8</td></tr><tr><td>GLIGEN-LDM (finetuned)</td><td>21.58</td><td>30.8</td><td>42.3</td><td>35.3</td></tr></table>
|
| 554 |
+
|
| 555 |
+
Table 7. Image quality and correspondence to layout are compared with baselines on COCO2017 val-set.
|
| 556 |
+
|
| 557 |
+
# F. Analysis on GLIGEN
|
| 558 |
+
|
| 559 |
+
To have a better understanding of GLIGEN, we choose to study the box grounded model. Specifically, we try to visualize attention maps within gated self-attention layer and how does the learnable $\gamma$ in Eq 8 change during the training process.
|
| 560 |
+
|
| 561 |
+
In the Figure 14, we first show a generation result using two grounding tokens (teddy bear; bird). Next to it, we visualize the attention maps of our added layers between the
|
| 562 |
+
|
| 563 |
+

|
| 564 |
+
Figure 14. Attention maps in one gated self-attention layer. The visualization results are from the sample at the first time step (i.e., Gaussian noise) in the middle layer of the Unet.
|
| 565 |
+
|
| 566 |
+

|
| 567 |
+
Figure 15. learnable $\gamma$ in the gated self attention layer in the middle of Unet changes during the training progress.
|
| 568 |
+
|
| 569 |
+
visual features and two grounding tokens for all 8 heads for one middle layer in the UNet. Even for the first sampling step (input is Gaussian noise), the visual feature starts to attend to the grounding tokens with correct spatial correspondence. This correspondence fades away in later sampling steps (which is aligned with our 'scheduled sampling technique' where we find rough layout is decided in the early sample steps).
|
| 570 |
+
|
| 571 |
+
We also find the attention maps for the beginning layers of the UNet to be less interpretable for all sample steps. We hypothesize that this is due to the lack of positional embedding for visual tokens, whereas position information can be leaked into later layers through zero padding via Conv layers. This might suggest that adding positional embedding for diffusion model pretraining (e.g., Stable Diffusion model training) could benefit downstream adaptation.
|
| 572 |
+
|
| 573 |
+
The Figure 15 shows how the learned $\gamma$ at this layer (Eq 8) changes during training. We empirically find the model starts to learn the correspondence around 60-70k iterations (around the peak in the plot). We hypothesize the model tries to focus on learning spatial correspondence at the beginning of training, then tries to finetune and dampen the new layers' contribution so that it can focus on image quality and details as the original weights are fixed.
|
| 574 |
+
|
| 575 |
+
# G. More qualitative results
|
| 576 |
+
|
| 577 |
+
We show qualitative comparisons with layout2img baselines in Figure 11, which complements the results in Sec 5.1
|
| 578 |
+
|
| 579 |
+
of the main paper. The results show that our model has comparable image quality when built upon LDM, but has more visual appeal and details when built upon the Stable Diffusion model.
|
| 580 |
+
|
| 581 |
+
Lastly, we show more grounded text2img results with bounding boxes in Figure 16 and other modality grounding results in Figure 17 18 19 20 21 22. Note that our keypoint model only uses keypoint annotations from COCO [41] which is not linked with person identity, but it can successfully utilize and combine the knowledge learned in the text2img training stage to control keypoints of a specific person. Out of curiosity, we also tested whether the keypoint grounding information learned on humans can be transferred to other non-humanoid categories such as cat or lamp for keypoint grounded generation, but we find that our model struggles in such cases even with scheduled sampling. Compared to bounding boxes, which only specify a coarse location and size of an object in the image and thus can be shared across all object categories, keypoints (i.e., object parts) are not always shareable across different categories. Thus, while keypoints enable more fine-grained control than boxes, they are less generalizable.
|
| 582 |
+
|
| 583 |
+

|
| 584 |
+
|
| 585 |
+

|
| 586 |
+
|
| 587 |
+

|
| 588 |
+
|
| 589 |
+

|
| 590 |
+
|
| 591 |
+

|
| 592 |
+
|
| 593 |
+
Caption: "Space view of a planet and its sun"
|
| 594 |
+
|
| 595 |
+
Grounded text: planet, sun
|
| 596 |
+
|
| 597 |
+

|
| 598 |
+
|
| 599 |
+

|
| 600 |
+
|
| 601 |
+

|
| 602 |
+
|
| 603 |
+

|
| 604 |
+
|
| 605 |
+

|
| 606 |
+
|
| 607 |
+
Caption: "a photo of a hybrid between a bee and a rabbit"
|
| 608 |
+
|
| 609 |
+
Grounded text: hybrid between a bee and a rabbit, flower
|
| 610 |
+
|
| 611 |
+

|
| 612 |
+
|
| 613 |
+

|
| 614 |
+
|
| 615 |
+

|
| 616 |
+
|
| 617 |
+

|
| 618 |
+
|
| 619 |
+

|
| 620 |
+
|
| 621 |
+
Caption: "cartoon sketch of a little girl with a smile and balloons, old style, detailed, elegant, intricate"
|
| 622 |
+
|
| 623 |
+
Grounded text: girl with a smile, balloon, balloon, balloon
|
| 624 |
+
|
| 625 |
+

|
| 626 |
+
|
| 627 |
+

|
| 628 |
+
|
| 629 |
+

|
| 630 |
+
|
| 631 |
+

|
| 632 |
+
|
| 633 |
+

|
| 634 |
+
|
| 635 |
+
Caption: "Walter White in GTA v"
|
| 636 |
+
|
| 637 |
+
Grounded text: Walter White, car, bulldog
|
| 638 |
+
|
| 639 |
+

|
| 640 |
+
Figure 16. Bounding box grounded text2image generation. Our model can ground noun entities in the caption for controllable image generation
|
| 641 |
+
|
| 642 |
+

|
| 643 |
+
|
| 644 |
+

|
| 645 |
+
|
| 646 |
+

|
| 647 |
+
|
| 648 |
+

|
| 649 |
+
|
| 650 |
+
Caption: "two pirate ships on the ocean in mycrafct"
|
| 651 |
+
|
| 652 |
+
Grounded text: a pirate ship, a pirate ship
|
| 653 |
+
|
| 654 |
+

|
| 655 |
+
|
| 656 |
+

|
| 657 |
+
|
| 658 |
+

|
| 659 |
+
|
| 660 |
+

|
| 661 |
+
|
| 662 |
+

|
| 663 |
+
|
| 664 |
+
Caption: "Steve Jobs is working with his laptop"
|
| 665 |
+
|
| 666 |
+
Grounded keypoints: plotted dots on the left
|
| 667 |
+
|
| 668 |
+

|
| 669 |
+
Figure 17. Results for keypoints grounded generation.
|
| 670 |
+
|
| 671 |
+

|
| 672 |
+
|
| 673 |
+

|
| 674 |
+
|
| 675 |
+

|
| 676 |
+
|
| 677 |
+

|
| 678 |
+
|
| 679 |
+
Caption: "Barack Obama is sitting at a desk"
|
| 680 |
+
|
| 681 |
+
Grounded keypoints: plotted dots on the left
|
| 682 |
+
|
| 683 |
+

|
| 684 |
+
|
| 685 |
+

|
| 686 |
+
|
| 687 |
+

|
| 688 |
+
|
| 689 |
+

|
| 690 |
+
|
| 691 |
+

|
| 692 |
+
|
| 693 |
+
Caption: "a small church is sitting in a garden"
|
| 694 |
+
|
| 695 |
+
Grounded hed map: the left image
|
| 696 |
+
|
| 697 |
+

|
| 698 |
+
Figure 18. Results for HED map grounded generation.
|
| 699 |
+
|
| 700 |
+

|
| 701 |
+
|
| 702 |
+

|
| 703 |
+
|
| 704 |
+

|
| 705 |
+
|
| 706 |
+

|
| 707 |
+
|
| 708 |
+
Caption: "fox wallpaper, digit art, colorful"
|
| 709 |
+
|
| 710 |
+
Grounded hed map: the left image
|
| 711 |
+
|
| 712 |
+

|
| 713 |
+
|
| 714 |
+

|
| 715 |
+
|
| 716 |
+

|
| 717 |
+
|
| 718 |
+

|
| 719 |
+
|
| 720 |
+

|
| 721 |
+
|
| 722 |
+
Caption: "A Humanoid Robot Designed for Companionship"
|
| 723 |
+
|
| 724 |
+
Grounded canny map: the left image
|
| 725 |
+
|
| 726 |
+

|
| 727 |
+
Figure 19. Results for canny map grounded generation.
|
| 728 |
+
|
| 729 |
+

|
| 730 |
+
|
| 731 |
+

|
| 732 |
+
|
| 733 |
+

|
| 734 |
+
|
| 735 |
+

|
| 736 |
+
|
| 737 |
+
Caption: "a chair and a table"
|
| 738 |
+
|
| 739 |
+
Grounded canny map: the left image
|
| 740 |
+
|
| 741 |
+

|
| 742 |
+
|
| 743 |
+

|
| 744 |
+
|
| 745 |
+

|
| 746 |
+
|
| 747 |
+

|
| 748 |
+
|
| 749 |
+

|
| 750 |
+
|
| 751 |
+
Caption: "a busy street with many people"
|
| 752 |
+
|
| 753 |
+
Grounded depth map: the left image
|
| 754 |
+
|
| 755 |
+

|
| 756 |
+
Figure 20. Results for depth map grounded generation.
|
| 757 |
+
|
| 758 |
+

|
| 759 |
+
|
| 760 |
+

|
| 761 |
+
|
| 762 |
+

|
| 763 |
+
|
| 764 |
+

|
| 765 |
+
|
| 766 |
+
Caption: "a butterfly, ultra details"
|
| 767 |
+
|
| 768 |
+
Grounded depth map: the left image
|
| 769 |
+
|
| 770 |
+

|
| 771 |
+
Figure 21. Results for normal map grounded generation.
|
| 772 |
+
|
| 773 |
+

|
| 774 |
+
Figure 22. Results for semantic map grounded generation.
|
2301.07xxx/2301.07093/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2c7686b858d8d6343d42e9380128d38fc0676c1b868c0c73293d8653b4fd1ad
|
| 3 |
+
size 2251911
|
2301.07xxx/2301.07093/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07094/d2878099-987f-4b33-b5e3-9a40b28616f0_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07094/d2878099-987f-4b33-b5e3-9a40b28616f0_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07094/d2878099-987f-4b33-b5e3-9a40b28616f0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5693ccd9916ce42059629ebad1c4da8e8191d6b666565bb57d7b922b36e5ca99
|
| 3 |
+
size 2003598
|
2301.07xxx/2301.07094/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07094/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0d343c0fa18d1ba487ad46502889f5b1de6a1f746ec8d667742e6dd6d4e463d3
|
| 3 |
+
size 1122248
|
2301.07xxx/2301.07094/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07137/fe0279f1-2ecb-4373-83b4-236be8ac16ba_content_list.json
ADDED
|
@@ -0,0 +1,1907 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Heterogeneous Multi-Robot Reinforcement Learning",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
158,
|
| 8 |
+
101,
|
| 9 |
+
841,
|
| 10 |
+
127
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Matteo Bettini",
|
| 17 |
+
"bbox": [
|
| 18 |
+
171,
|
| 19 |
+
137,
|
| 20 |
+
290,
|
| 21 |
+
152
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "University of Cambridge",
|
| 28 |
+
"bbox": [
|
| 29 |
+
147,
|
| 30 |
+
154,
|
| 31 |
+
313,
|
| 32 |
+
167
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Cambridge, United Kingdom",
|
| 39 |
+
"bbox": [
|
| 40 |
+
133,
|
| 41 |
+
169,
|
| 42 |
+
328,
|
| 43 |
+
184
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "mb2389@cl.cam.ac.uk",
|
| 50 |
+
"bbox": [
|
| 51 |
+
156,
|
| 52 |
+
185,
|
| 53 |
+
307,
|
| 54 |
+
198
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "Ajay Shankar",
|
| 61 |
+
"bbox": [
|
| 62 |
+
442,
|
| 63 |
+
137,
|
| 64 |
+
557,
|
| 65 |
+
154
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "University of Cambridge",
|
| 72 |
+
"bbox": [
|
| 73 |
+
416,
|
| 74 |
+
154,
|
| 75 |
+
583,
|
| 76 |
+
167
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "Cambridge, United Kingdom",
|
| 83 |
+
"bbox": [
|
| 84 |
+
401,
|
| 85 |
+
169,
|
| 86 |
+
596,
|
| 87 |
+
183
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "as3233@cl.cam.ac.uk",
|
| 94 |
+
"bbox": [
|
| 95 |
+
428,
|
| 96 |
+
185,
|
| 97 |
+
571,
|
| 98 |
+
198
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "Amanda Prorok",
|
| 105 |
+
"bbox": [
|
| 106 |
+
702,
|
| 107 |
+
137,
|
| 108 |
+
833,
|
| 109 |
+
152
|
| 110 |
+
],
|
| 111 |
+
"page_idx": 0
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"text": "University of Cambridge",
|
| 116 |
+
"bbox": [
|
| 117 |
+
684,
|
| 118 |
+
154,
|
| 119 |
+
851,
|
| 120 |
+
167
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "Cambridge, United Kingdom",
|
| 127 |
+
"bbox": [
|
| 128 |
+
669,
|
| 129 |
+
169,
|
| 130 |
+
864,
|
| 131 |
+
184
|
| 132 |
+
],
|
| 133 |
+
"page_idx": 0
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"text": "asp45@cl.cam.ac.uk",
|
| 138 |
+
"bbox": [
|
| 139 |
+
700,
|
| 140 |
+
185,
|
| 141 |
+
836,
|
| 142 |
+
198
|
| 143 |
+
],
|
| 144 |
+
"page_idx": 0
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"text": "ABSTRACT",
|
| 149 |
+
"text_level": 1,
|
| 150 |
+
"bbox": [
|
| 151 |
+
83,
|
| 152 |
+
208,
|
| 153 |
+
184,
|
| 154 |
+
222
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "text",
|
| 160 |
+
"text": "Cooperative multi-robot tasks can benefit from heterogeneity in the robots' physical and behavioral traits. In spite of this, traditional Multi-Agent Reinforcement Learning (MARL) frameworks lack the ability to explicitly accommodate policy heterogeneity, and typically constrain agents to share neural network parameters. This enforced homogeneity limits application in cases where the tasks benefit from heterogeneous behaviors. In this paper, we crystallize the role of heterogeneity in MARL policies. Towards this end, we introduce Heterogeneous Graph Neural Network Proximal Policy Optimization (HetGPPO), a paradigm for training heterogeneous MARL policies that leverages a Graph Neural Network for differentiable inter-agent communication. HetGPPO allows communicating agents to learn heterogeneous behaviors while enabling fully decentralized training in partially observable environments. We complement this with a taxonomical overview that exposes more heterogeneity classes than previously identified. To motivate the need for our model, we present a characterization of techniques that homogeneous models can leverage to emulate heterogeneous behavior, and show how this \"apparent heterogeneity\" is brittle in real-world conditions. Through simulations and real-world experiments, we show that: (i) when homogeneous methods fail due to strong heterogeneous requirements, HetGPPO succeeds, and, (ii) when homogeneous methods are able to learn apparently heterogeneous behaviors, HetGPPO achieves higher resilience to both training and deployment noise.",
|
| 161 |
+
"bbox": [
|
| 162 |
+
81,
|
| 163 |
+
226,
|
| 164 |
+
483,
|
| 165 |
+
574
|
| 166 |
+
],
|
| 167 |
+
"page_idx": 0
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"type": "text",
|
| 171 |
+
"text": "KEYWORDS",
|
| 172 |
+
"text_level": 1,
|
| 173 |
+
"bbox": [
|
| 174 |
+
83,
|
| 175 |
+
590,
|
| 176 |
+
191,
|
| 177 |
+
604
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 0
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "Heterogeneity, Multi-agent reinforcement learning, Multi-robot systems",
|
| 184 |
+
"bbox": [
|
| 185 |
+
81,
|
| 186 |
+
608,
|
| 187 |
+
482,
|
| 188 |
+
638
|
| 189 |
+
],
|
| 190 |
+
"page_idx": 0
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"text": "1 INTRODUCTION",
|
| 195 |
+
"text_level": 1,
|
| 196 |
+
"bbox": [
|
| 197 |
+
83,
|
| 198 |
+
669,
|
| 199 |
+
256,
|
| 200 |
+
683
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 0
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "text",
|
| 206 |
+
"text": "Multi-robot systems deployed to tackle complex cooperative tasks can often benefit from heterogeneous physical and/or behavioral traits to fulfill their mission. Such heterogeneous systems have been leveraged in applications such as disaster response [37], collaborative mapping [8], agriculture [26], and package transport [22]. However, synthesizing optimal decentralized policies for these tasks can be computationally hard, and typically scales exponentially with the number of agents [5]. While faster and scalable solutions exist, such as metaheuristics [9], they lack in optimality. Multi-Agent Reinforcement Learning (MARL) [61] can be used as a scalable approach to find near-optimal solutions to these problems. However, MARL algorithms without inter-agent communication cannot be easily applied to real-world robotic problems, where partial observability of individual agents is pervasive. Communication is key to overcoming this partial observability, and to enable cooperation.",
|
| 207 |
+
"bbox": [
|
| 208 |
+
81,
|
| 209 |
+
686,
|
| 210 |
+
482,
|
| 211 |
+
896
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 0
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "image",
|
| 217 |
+
"img_path": "images/8aa5a12bacdaf6940fd11b7f744bc82006d4ffe7dcb2e66009f2a69b03221552.jpg",
|
| 218 |
+
"image_caption": [
|
| 219 |
+
"Figure 1: Taxonomy of heterogeneous multi-robot/agent systems. Top: the three heterogeneity classes $(\\mathcal{P},\\mathcal{B}_s,\\mathcal{B}_d)$ . Bottom: the five mutually exclusive heterogeneity subclasses. Every heterogeneous system belongs to one of these subclasses."
|
| 220 |
+
],
|
| 221 |
+
"image_footnote": [],
|
| 222 |
+
"bbox": [
|
| 223 |
+
516,
|
| 224 |
+
205,
|
| 225 |
+
913,
|
| 226 |
+
396
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 0
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "Our work deals with heterogeneous multi-robot reinforcement learning, a paradigm located at the boundary of MARL (with inter-agent communication) and heterogeneous multi-robot systems.",
|
| 233 |
+
"bbox": [
|
| 234 |
+
511,
|
| 235 |
+
479,
|
| 236 |
+
913,
|
| 237 |
+
521
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 0
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "text",
|
| 243 |
+
"text": "Most cooperative MARL works constrain agents to share policy neural network parameters to improve training sample efficiency [24, 44, 53]. This causes the agents' models to be identical and, thus, homogeneous. While this is beneficial to speed-up training, it can prevent learning in scenarios that require heterogeneous behavior. A classical method of overcoming this imposed homogeneity is to include a unique integer (e.g., the agent's index) as part of each agent's observations [19, 24]. This allows the agents to share the same policy while exhibiting apparently different behavior. Despite its wide adoption, this solution has many drawbacks [12].",
|
| 244 |
+
"bbox": [
|
| 245 |
+
511,
|
| 246 |
+
521,
|
| 247 |
+
915,
|
| 248 |
+
660
|
| 249 |
+
],
|
| 250 |
+
"page_idx": 0
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"type": "text",
|
| 254 |
+
"text": "We are interested in learning truly heterogeneous decentralized MARL policies. While it is common practice to learn heterogeneous policies when optimizing for different objectives [33], there is a dearth of work in applying this paradigm to scenarios where the objective is shared. Current solutions are few and tailored to specific tasks, and, as such, do not address the broader study and categorization of heterogeneity in MARL. Furthermore, they are limited to noise-free videogame-like MARL benchmarks [30, 45], without considering real-world multi-robot tasks with inter-agent communication. Therefore, we need a framework that enables true heterogeneity among communicating MARL agents and can learn policies that run in a decentralized fashion for (real-world) heterogeneous multi-robot systems.",
|
| 255 |
+
"bbox": [
|
| 256 |
+
511,
|
| 257 |
+
660,
|
| 258 |
+
915,
|
| 259 |
+
840
|
| 260 |
+
],
|
| 261 |
+
"page_idx": 0
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"type": "text",
|
| 265 |
+
"text": "In this work, we introduce Heterogeneous Graph Neural Network Proximal Policy Optimization (HetGPPO), a paradigm for heterogeneous MARL that overcomes the aforementioned issues. HetGPPO is a framework for training heterogeneous MARL policies",
|
| 266 |
+
"bbox": [
|
| 267 |
+
511,
|
| 268 |
+
840,
|
| 269 |
+
913,
|
| 270 |
+
896
|
| 271 |
+
],
|
| 272 |
+
"page_idx": 0
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"type": "aside_text",
|
| 276 |
+
"text": "arXiv:2301.07137v1 [cs.RO] 17 Jan 2023",
|
| 277 |
+
"bbox": [
|
| 278 |
+
22,
|
| 279 |
+
267,
|
| 280 |
+
57,
|
| 281 |
+
707
|
| 282 |
+
],
|
| 283 |
+
"page_idx": 0
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"type": "text",
|
| 287 |
+
"text": "that leverages a Graph Neural Network (GNN) for differentiable inter-agent communication. Our architecture enables learning for heterogeneous agents while being conditioned only on local communication and local observations. This enables to train HetGPPO in a decentralized fashion, in-line with the Decentralized Training Decentralized Execution (DTDE) paradigm [25].",
|
| 288 |
+
"bbox": [
|
| 289 |
+
81,
|
| 290 |
+
106,
|
| 291 |
+
480,
|
| 292 |
+
189
|
| 293 |
+
],
|
| 294 |
+
"page_idx": 1
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"type": "text",
|
| 298 |
+
"text": "We begin by presenting a taxonomy of heterogeneous systems in Sec. 2. The purpose of this taxonomy is to classify such systems according to the source of their heterogeneity. We use this taxonomy in Sec. 3 to categorize related works in the domains of multi-robot systems and MARL. Sec. 4 formulates the MARL problem tackled in this paper. In Sec. 5, we introduce HetGPPO and its homogeneous counterpart GPPO. To motivate the need for policy heterogeneity, we distill and define the techniques that homogeneous models use to emulate heterogeneous behavior (Sec. 6). Through example scenarios, we demonstrate how these technique work and how they can prove brittle in real-world conditions when compared to truly heterogeneous models. Finally, in Sec. 7, we present evaluations of our framework both in simulated and real-world multi-robot cooperative scenarios. These show that: (i) when homogeneous methods fail due to strong heterogeneous requirements, HetGPPO succeeds, and, (ii) when homogeneous methods are able to learn apparently heterogeneous behaviors, HetGPPO achieves higher resilience to both training and deployment noise. Furthermore, our real-robot experiments demonstrate how heterogeneous policies are intrinsically more resilient to real-world conditions.",
|
| 299 |
+
"bbox": [
|
| 300 |
+
81,
|
| 301 |
+
189,
|
| 302 |
+
482,
|
| 303 |
+
465
|
| 304 |
+
],
|
| 305 |
+
"page_idx": 1
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"type": "text",
|
| 309 |
+
"text": "In this paper, we demonstrate the power of heterogeneous MARL applied to real-world multi-robot systems. We claim the following key contributions:",
|
| 310 |
+
"bbox": [
|
| 311 |
+
81,
|
| 312 |
+
467,
|
| 313 |
+
480,
|
| 314 |
+
508
|
| 315 |
+
],
|
| 316 |
+
"page_idx": 1
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"type": "list",
|
| 320 |
+
"sub_type": "text",
|
| 321 |
+
"list_items": [
|
| 322 |
+
"(1) A taxonomy of heterogeneous systems that jointly categorizes research in the multi-robot and multi-agent domains;",
|
| 323 |
+
"(2) A discourse on behavioral typing techniques that homogeneous models rely on to emulate heterogeneous behavior, with empirical evidence for their brittleness in deployment;",
|
| 324 |
+
"(3) HetGPPO, a MARL model able to learn heterogeneous communicating policies in a decentralized fashion; and,",
|
| 325 |
+
"(4) Detailed evaluations of the performance and resilience of heterogeneous policies compared to homogeneous ones in several cooperative multi-robot tasks, both through simulations and real-world experiments."
|
| 326 |
+
],
|
| 327 |
+
"bbox": [
|
| 328 |
+
99,
|
| 329 |
+
513,
|
| 330 |
+
482,
|
| 331 |
+
666
|
| 332 |
+
],
|
| 333 |
+
"page_idx": 1
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"text": "2 TAXONOMY OF HETEROGENEOUS SYSTEMS",
|
| 338 |
+
"text_level": 1,
|
| 339 |
+
"bbox": [
|
| 340 |
+
83,
|
| 341 |
+
681,
|
| 342 |
+
406,
|
| 343 |
+
710
|
| 344 |
+
],
|
| 345 |
+
"page_idx": 1
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"type": "text",
|
| 349 |
+
"text": "In spite of a substantial body of work attempting to stimulate research on heterogeneous systems (see [1] and the references therein), the robotics and learning community still lacks a shared and structured taxonomy of heterogeneous systems. To properly characterize the related works in the heterogeneity (diversity) domain, we introduce a taxonomy of heterogeneous systems, shown in Fig. 1. According to our taxonomy, system heterogeneity is categorized in two classes: Physical $(\\mathcal{P})$ and Behavioral $(\\mathcal{B})$ .",
|
| 350 |
+
"bbox": [
|
| 351 |
+
81,
|
| 352 |
+
715,
|
| 353 |
+
482,
|
| 354 |
+
825
|
| 355 |
+
],
|
| 356 |
+
"page_idx": 1
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"type": "text",
|
| 360 |
+
"text": "A team is considered physically $(\\mathcal{P})$ heterogeneous when at least one of its components (i.e., agents, robots) differs from the others in terms of hardware or physical constraints. That is, it might have different sensors, actuators, motion constraints, etc. These physical differences might lead to different capabilities. For example, a small",
|
| 361 |
+
"bbox": [
|
| 362 |
+
81,
|
| 363 |
+
825,
|
| 364 |
+
482,
|
| 365 |
+
896
|
| 366 |
+
],
|
| 367 |
+
"page_idx": 1
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"type": "text",
|
| 371 |
+
"text": "drone might be able to fly and move aggressively, but likely has shorter battery life than a big and slow ground robot. This type of heterogeneity can lead to different observation and action spaces in the context of learning, for example when robots are equipped with different sensors or actuators.",
|
| 372 |
+
"bbox": [
|
| 373 |
+
511,
|
| 374 |
+
106,
|
| 375 |
+
913,
|
| 376 |
+
175
|
| 377 |
+
],
|
| 378 |
+
"page_idx": 1
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"type": "text",
|
| 382 |
+
"text": "A team is considered behaviorally $(\\mathcal{B})$ heterogeneous when at least one of its components differs from the others in terms of software or behavioral model. That is, two behaviorally heterogeneous agents can produce distinct policy outputs when observing the same input. For example, two physically identical drones might cooperate to monitor a site: here, one drone can survey from far away and direct the other to areas that need closer inspection. Behavioral heterogeneity is divided in two: Same objective $(\\mathcal{B}_s)$ and Different objective $(\\mathcal{B}_d)$ . In $\\mathcal{B}_s$ heterogeneous systems, agents optimize the same objective function through heterogeneous behavior. In MARL, this means that they share the same (global or local) reward function. $\\mathcal{B}_s$ heterogeneous systems usually represent cooperative settings [11]. However, they could also model adversarial scenarios where agents with the same objective compete for limited resources [7]. In $\\mathcal{B}_d$ heterogeneous systems, agents optimize different objective functions through heterogeneous behavior. In MARL, this means that they have different local reward functions or a global reward deriving from the composition of such local functions. $\\mathcal{B}_d$ heterogeneous systems usually represent non-cooperative or adversarial settings [33]. However, they could also model cooperative scenarios where agents optimize different sub-functions for a higher-order task [12]. For example, in cooperative search and rescue scenarios, one robot might only be tasked to remove debris, while the others are tasked with the search in an uncluttered space.",
|
| 383 |
+
"bbox": [
|
| 384 |
+
511,
|
| 385 |
+
176,
|
| 386 |
+
913,
|
| 387 |
+
507
|
| 388 |
+
],
|
| 389 |
+
"page_idx": 1
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"type": "text",
|
| 393 |
+
"text": "Physical and behavioral heterogeneity are not mutually exclusive. Thus, the three heterogeneity classes introduced $(\\mathcal{P},\\mathcal{B}_s,\\mathcal{B}_d)$ delineate five heterogeneity subclasses that a system can belong to:",
|
| 394 |
+
"bbox": [
|
| 395 |
+
513,
|
| 396 |
+
508,
|
| 397 |
+
913,
|
| 398 |
+
550
|
| 399 |
+
],
|
| 400 |
+
"page_idx": 1
|
| 401 |
+
},
|
| 402 |
+
{
|
| 403 |
+
"type": "list",
|
| 404 |
+
"sub_type": "text",
|
| 405 |
+
"list_items": [
|
| 406 |
+
"- $\\mathcal{P} \\setminus \\mathcal{B}$ : Agents are physically different but share the same behavioral model.",
|
| 407 |
+
"- $\\mathcal{P} \\cap \\mathcal{B}_d$ : Agents are physically different and differ in behavioral models and objectives.",
|
| 408 |
+
"- $\\mathcal{P} \\cap \\mathcal{B}_s$ : Agents are physically different and differ in behavioral models, but share the same objective.",
|
| 409 |
+
"- $\\mathcal{B}_s\\setminus \\mathcal{P}$ : Agents are physically identical and share the same objective but differ in behavioral models.",
|
| 410 |
+
"- $\\mathcal{B}_d\\setminus \\mathcal{P}$ : Agents are physically identical but differ in behavioral models and objectives."
|
| 411 |
+
],
|
| 412 |
+
"bbox": [
|
| 413 |
+
540,
|
| 414 |
+
553,
|
| 415 |
+
913,
|
| 416 |
+
691
|
| 417 |
+
],
|
| 418 |
+
"page_idx": 1
|
| 419 |
+
},
|
| 420 |
+
{
|
| 421 |
+
"type": "text",
|
| 422 |
+
"text": "While this taxonomy is concerned with classifying heterogeneous systems, it does not attempt to measure the degree of heterogeneity. Furthermore, it represents a high-level classification and does not consider dynamic $\\mathcal{P}$ heterogeneity, such as different battery levels or hardware deterioration [50].",
|
| 423 |
+
"bbox": [
|
| 424 |
+
511,
|
| 425 |
+
695,
|
| 426 |
+
913,
|
| 427 |
+
763
|
| 428 |
+
],
|
| 429 |
+
"page_idx": 1
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"type": "text",
|
| 433 |
+
"text": "3 RELATED WORK",
|
| 434 |
+
"text_level": 1,
|
| 435 |
+
"bbox": [
|
| 436 |
+
514,
|
| 437 |
+
776,
|
| 438 |
+
689,
|
| 439 |
+
789
|
| 440 |
+
],
|
| 441 |
+
"page_idx": 1
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"type": "text",
|
| 445 |
+
"text": "In this section, we review the current state of the art in the area of heterogeneous multi-robot/agent systems. We classify the related works according to our taxonomy in Tab. 1.",
|
| 446 |
+
"bbox": [
|
| 447 |
+
511,
|
| 448 |
+
795,
|
| 449 |
+
913,
|
| 450 |
+
837
|
| 451 |
+
],
|
| 452 |
+
"page_idx": 1
|
| 453 |
+
},
|
| 454 |
+
{
|
| 455 |
+
"type": "text",
|
| 456 |
+
"text": "3.1 Heterogeneity in multi-robot systems",
|
| 457 |
+
"text_level": 1,
|
| 458 |
+
"bbox": [
|
| 459 |
+
513,
|
| 460 |
+
849,
|
| 461 |
+
864,
|
| 462 |
+
864
|
| 463 |
+
],
|
| 464 |
+
"page_idx": 1
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"type": "text",
|
| 468 |
+
"text": "The core literature on heterogeneous robotics has generally focused on developing coordination algorithms that leverage the",
|
| 469 |
+
"bbox": [
|
| 470 |
+
511,
|
| 471 |
+
867,
|
| 472 |
+
913,
|
| 473 |
+
896
|
| 474 |
+
],
|
| 475 |
+
"page_idx": 1
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"type": "table",
|
| 479 |
+
"img_path": "images/f8336f1e9194de44c5d8227a3bf8cbcca150c223b263f524a12a085fbaaf467a.jpg",
|
| 480 |
+
"table_caption": [
|
| 481 |
+
"Table 1: Related work in heterogeneous multi-robot/agent systems classified according to our taxonomy of Sec. 2."
|
| 482 |
+
],
|
| 483 |
+
"table_footnote": [],
|
| 484 |
+
"table_body": "<table><tr><td>Heterogeneity class</td><td>Multi-robot systems</td><td>MARL</td></tr><tr><td>P\\B</td><td>[8]</td><td>[55],[54]</td></tr><tr><td>P∩Bd</td><td>[37]</td><td>[33],[12]</td></tr><tr><td>P∩Bs</td><td>[17],[40],[18],[36],[39],[43],[42],[46],[28],[34],[35],[10],[14]</td><td>[49]</td></tr><tr><td>Bs\\P</td><td>[1],[4],[2],[3],[31],[52],[56]</td><td>[57],[11],[58]</td></tr><tr><td>Bd\\P</td><td>[23],[47]</td><td>[33],[12]</td></tr></table>",
|
| 485 |
+
"bbox": [
|
| 486 |
+
83,
|
| 487 |
+
142,
|
| 488 |
+
480,
|
| 489 |
+
281
|
| 490 |
+
],
|
| 491 |
+
"page_idx": 2
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "text",
|
| 495 |
+
"text": "physical heterogeneity of a team to their advantage. Therefore, these works fall in the $\\mathcal{P} \\cap \\mathcal{B}_s$ class. Such diversity can manifest itself in the form of different sensor ranges [42], diverse sensing capabilities [46], or different maximum speeds [28]. These differences can then be exploited in a variety of problems such as multi-robot coverage [28, 42, 46] and heterogeneous task assignment [40, 43], with resilient formulations that can handle uncertainties in robot capabilities [17] or the environment [18, 36, 39]. Sensor heterogeneity has also received attention in the context of active sampling and mapping [34, 35], where heterogeneous computational resources can impact task execution [10]. Lastly, $\\mathcal{P} \\cap \\mathcal{B}_s$ diversity has also been investigated in more complex problems such as heterogeneous trajectory planning [14].",
|
| 496 |
+
"bbox": [
|
| 497 |
+
81,
|
| 498 |
+
328,
|
| 499 |
+
482,
|
| 500 |
+
507
|
| 501 |
+
],
|
| 502 |
+
"page_idx": 2
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "text",
|
| 506 |
+
"text": "Interestingly, such physical diversity without behavioral diversity $(\\mathcal{P} \\setminus \\mathcal{B})$ can often represent a constraint for the problem. Works in this heterogeneity class try to behaviorally reconcile the physical heterogeneity of robots in order to apply homogeneous solutions to the problem at hand. Heterogeneous multi-robot SLAM is an example application where scans coming from different robots, equipped with diverse sensors, need to be matched in order to build a homogeneous shared map [8].",
|
| 507 |
+
"bbox": [
|
| 508 |
+
81,
|
| 509 |
+
508,
|
| 510 |
+
482,
|
| 511 |
+
619
|
| 512 |
+
],
|
| 513 |
+
"page_idx": 2
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "text",
|
| 517 |
+
"text": "Behavioral heterogeneity for physically identical robots is a less explored but promising research direction [1]. Works in this area mostly tackle cooperative problems, leveraging $\\mathcal{B}_s \\setminus \\mathcal{P}$ heterogeneity. Early research by Balch [2, 3] and Li et al. [31] focuses on learning behavioral specialization for multi-robot teams using RL. Game-theoretic autonomous racing [52, 56] constitutes an adversarial setting of $\\mathcal{B}_s \\setminus \\mathcal{P}$ heterogeneity. Note that game-theoretic controllers do not present heterogeneous behavior when all players use the symmetric Nash equilibrium strategy [38]. However, heterogeneity emerges when some robots in the team use traditional model predictive controllers.",
|
| 518 |
+
"bbox": [
|
| 519 |
+
81,
|
| 520 |
+
619,
|
| 521 |
+
482,
|
| 522 |
+
770
|
| 523 |
+
],
|
| 524 |
+
"page_idx": 2
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "text",
|
| 528 |
+
"text": "Conversely, heterogeneous behavior with different objectives $(\\mathcal{B}_d)$ has also been analyzed for cooperative robotic tasks, for instance, by dividing a global task into sub-tasks for groups of identical robots $(\\mathcal{B}_d \\setminus \\mathcal{P})$ [23, 47]. When the robots additionally have physical differences between sub-groups, these differences can be leveraged to tackle complex multi-robot tasks, such as post-disaster collaborative mapping [37], resulting in $\\mathcal{P} \\cap \\mathcal{B}_d$ heterogeneity.",
|
| 529 |
+
"bbox": [
|
| 530 |
+
81,
|
| 531 |
+
771,
|
| 532 |
+
482,
|
| 533 |
+
867
|
| 534 |
+
],
|
| 535 |
+
"page_idx": 2
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "text",
|
| 539 |
+
"text": "All the works discussed in this subsection focus on a given heterogeneity class and problem, and develop a targeted solution for",
|
| 540 |
+
"bbox": [
|
| 541 |
+
81,
|
| 542 |
+
867,
|
| 543 |
+
482,
|
| 544 |
+
896
|
| 545 |
+
],
|
| 546 |
+
"page_idx": 2
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "text",
|
| 550 |
+
"text": "that setting. To a large extent, the approaches leverage conventional control theoretical methods. Our work, in contrast, proposes a learning-based framework to synthesize communicating multiagent/robot policies, and can be applied to any heterogeneity class.",
|
| 551 |
+
"bbox": [
|
| 552 |
+
513,
|
| 553 |
+
107,
|
| 554 |
+
915,
|
| 555 |
+
162
|
| 556 |
+
],
|
| 557 |
+
"page_idx": 2
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "text",
|
| 561 |
+
"text": "3.2 Heterogeneity in MARL",
|
| 562 |
+
"text_level": 1,
|
| 563 |
+
"bbox": [
|
| 564 |
+
514,
|
| 565 |
+
185,
|
| 566 |
+
754,
|
| 567 |
+
200
|
| 568 |
+
],
|
| 569 |
+
"page_idx": 2
|
| 570 |
+
},
|
| 571 |
+
{
|
| 572 |
+
"type": "text",
|
| 573 |
+
"text": "MARL has recently gained increasing traction as an effective technique to tackle multi-robot problems [61]. Using MARL, it is possible to synthesize efficient decentralized multi-agent controllers for hard coordination problems [5]. Homogeneous policies (that share parameters) for physically identical agents are abundant in MARL [20, 24, 29, 44, 53] and constitute the core of the research literature. In an attempt to emulate heterogeneous behavior, a common practice is to augment each agent's observation space with a unique index that represents the agent's type [19, 24]. In this case, agents share the same homogeneous multimodal policy, conditioned on a unique constant index. We define and discuss in depth the limitations of this approach in Sec. 6. $\\mathcal{P} \\setminus \\mathcal{B}$ heterogeneity in MARL focuses on leveraging the power of parameter sharing and homogeneous training for physically different agents. This is achieved by mapping heterogeneous observation spaces into homogeneous fixed-length encodings [55], or by padding and including the agent index into observations [54].",
|
| 574 |
+
"bbox": [
|
| 575 |
+
511,
|
| 576 |
+
203,
|
| 577 |
+
913,
|
| 578 |
+
438
|
| 579 |
+
],
|
| 580 |
+
"page_idx": 2
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"type": "text",
|
| 584 |
+
"text": "The majority of heterogeneous MARL literature falls in the $\\mathcal{B}$ heterogeneity class. Different behavioral roles for physically identical agents can be learned through various techniques, such as conditioning agents' policies on a latent representation [57], decomposing and clustering action spaces [58], or by an intrinsic reward that maximizes the mutual information between the agent's trajectory and its role [11]. All the aforementioned works consider physically identical agents with the same objective, thus leveraging $\\mathcal{B}_s \\setminus \\mathcal{P}$ heterogeneity. Furthermore, they do not use inter-agent communication, and hence their application to highly partially observable coordination problems is limited. When considering physically different robots, heterogeneous action or observation spaces have to be taken into account. Such $\\mathcal{P} \\cap \\mathcal{B}_s$ heterogeneity with communicating agents can be modeled, for instance, by an ad-hoc GNN layer for each physically different robot type [49]. While this may be suitable for some tasks where robot types are known beforehand, it prevents physically identical agents from learning heterogeneous behavior.",
|
| 585 |
+
"bbox": [
|
| 586 |
+
511,
|
| 587 |
+
439,
|
| 588 |
+
913,
|
| 589 |
+
686
|
| 590 |
+
],
|
| 591 |
+
"page_idx": 2
|
| 592 |
+
},
|
| 593 |
+
{
|
| 594 |
+
"type": "text",
|
| 595 |
+
"text": "Behavioral heterogeneity with different objectives $(\\mathcal{B}_d)$ emerges due to different agent reward functions, as discussed in Sec. 2. MADDPG [33] uses this paradigm in a centralized training approach to learn individual (not shared) actors and critics. They test their approach in mixed cooperative-competitive tasks. In these tasks, both physically identical and physically different agents (i.e., different maximum speeds) are considered. Thus, MADDPG leverages heterogeneity classes $\\mathcal{B}_d \\setminus \\mathcal{P}$ and $\\mathcal{P} \\cap \\mathcal{B}_d$ . The same heterogeneity classes are studied in [12], which proposes to use parameter sharing among sub-groups of agents which are physically identical and share the same reward function. This approach, however, prevents physically identical agents with the same objective to employ different behavioral roles to solve a task.",
|
| 596 |
+
"bbox": [
|
| 597 |
+
511,
|
| 598 |
+
688,
|
| 599 |
+
913,
|
| 600 |
+
867
|
| 601 |
+
],
|
| 602 |
+
"page_idx": 2
|
| 603 |
+
},
|
| 604 |
+
{
|
| 605 |
+
"type": "text",
|
| 606 |
+
"text": "Most works discussed in this section propose solutions to problems that sit exclusively within one given heterogeneity subclass.",
|
| 607 |
+
"bbox": [
|
| 608 |
+
511,
|
| 609 |
+
867,
|
| 610 |
+
913,
|
| 611 |
+
896
|
| 612 |
+
],
|
| 613 |
+
"page_idx": 2
|
| 614 |
+
},
|
| 615 |
+
{
|
| 616 |
+
"type": "text",
|
| 617 |
+
"text": "While a selected few could be applied to multiple classes [11, 33, 57], they leverage centralized training methods and do not consider inter-agent communication. These are two key features needed to make MARL suitable for multi-robot problems.",
|
| 618 |
+
"bbox": [
|
| 619 |
+
81,
|
| 620 |
+
106,
|
| 621 |
+
480,
|
| 622 |
+
161
|
| 623 |
+
],
|
| 624 |
+
"page_idx": 3
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"type": "text",
|
| 628 |
+
"text": "4 PROBLEM FORMULATION",
|
| 629 |
+
"text_level": 1,
|
| 630 |
+
"bbox": [
|
| 631 |
+
83,
|
| 632 |
+
176,
|
| 633 |
+
336,
|
| 634 |
+
191
|
| 635 |
+
],
|
| 636 |
+
"page_idx": 3
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "text",
|
| 640 |
+
"text": "We now formulate the multi-robot MARL problem tackled in this work. To do so, we first introduce the multi-agent extension of a Partially Observable Markov Decision Process (POMDP) [27].",
|
| 641 |
+
"bbox": [
|
| 642 |
+
81,
|
| 643 |
+
195,
|
| 644 |
+
480,
|
| 645 |
+
236
|
| 646 |
+
],
|
| 647 |
+
"page_idx": 3
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"type": "text",
|
| 651 |
+
"text": "Partially Observable Markov Games. A Partially Observable Markov Game (POMG) is defined as a tuple",
|
| 652 |
+
"bbox": [
|
| 653 |
+
81,
|
| 654 |
+
237,
|
| 655 |
+
480,
|
| 656 |
+
263
|
| 657 |
+
],
|
| 658 |
+
"page_idx": 3
|
| 659 |
+
},
|
| 660 |
+
{
|
| 661 |
+
"type": "equation",
|
| 662 |
+
"text": "\n$$\n\\left\\langle \\mathcal {V}, S, \\mathcal {O}, \\{\\sigma_ {i} \\} _ {i \\in \\mathcal {V}}, \\mathcal {A}, \\{\\mathcal {R} _ {i} \\} _ {i \\in \\mathcal {V}}, \\mathcal {T}, \\gamma \\right\\rangle ,\n$$\n",
|
| 663 |
+
"text_format": "latex",
|
| 664 |
+
"bbox": [
|
| 665 |
+
161,
|
| 666 |
+
273,
|
| 667 |
+
403,
|
| 668 |
+
292
|
| 669 |
+
],
|
| 670 |
+
"page_idx": 3
|
| 671 |
+
},
|
| 672 |
+
{
|
| 673 |
+
"type": "text",
|
| 674 |
+
"text": "where $\\mathcal{V} = \\{1, \\dots, n\\}$ denotes the set of agents, $S$ is the state space, shared by all agents, and, $O \\equiv O_1 \\times \\ldots \\times O_n$ and $\\mathcal{A} \\equiv \\mathcal{A}_1 \\times \\ldots \\times \\mathcal{A}_n$ are the observation and action spaces, with $O_i \\subseteq S$ , $\\forall i \\in \\mathcal{V}$ . Further, $\\{\\sigma_i\\}_{i \\in \\mathcal{V}}$ and $\\{\\mathcal{R}_i\\}_{i \\in \\mathcal{V}}$ are the agent observation and reward functions<sup>1</sup>, such that $\\sigma_i : S \\mapsto O_i$ , and, $\\mathcal{R}_i : S \\times \\mathcal{A} \\times S \\mapsto \\mathbb{R}$ . $\\mathcal{T}$ is the stochastic state transition model, defined as $\\mathcal{T} : S \\times \\mathcal{A} \\times S \\mapsto [0, 1]$ . Lastly, $\\gamma$ is the discount factor.",
|
| 675 |
+
"bbox": [
|
| 676 |
+
81,
|
| 677 |
+
299,
|
| 678 |
+
482,
|
| 679 |
+
397
|
| 680 |
+
],
|
| 681 |
+
"page_idx": 3
|
| 682 |
+
},
|
| 683 |
+
{
|
| 684 |
+
"type": "text",
|
| 685 |
+
"text": "We structure the agents in a communication graph $\\mathcal{G} = (\\mathcal{V},\\mathcal{E})$ . Nodes $i\\in \\mathcal{V}$ represent agents and edges $e_{ij}\\in \\mathcal{E}$ represent communication links. The set of edges is dependent of the maximum agent communication range and changes over time. The communication neighborhood of each agent is defined as $\\mathcal{N}_i\\equiv \\{v_j\\mid e_{ij}\\in \\mathcal{E}\\}$ .",
|
| 686 |
+
"bbox": [
|
| 687 |
+
81,
|
| 688 |
+
397,
|
| 689 |
+
482,
|
| 690 |
+
465
|
| 691 |
+
],
|
| 692 |
+
"page_idx": 3
|
| 693 |
+
},
|
| 694 |
+
{
|
| 695 |
+
"type": "text",
|
| 696 |
+
"text": "At each timestep $t$ , each agent $i$ gets an observation $o_i^t = \\sigma_i(s^t) \\in O_i$ that is a portion of the global state $s^t \\in S$ . This is communicated to the neighboring agents $\\mathcal{N}_i^t$ . A stochastic policy $\\pi_i$ uses this information to compute an action $a_i^t \\sim \\pi_i(\\cdot | o_{\\mathcal{N}_i}^t)$ . The agents' actions $\\mathbf{a}^t = (a_1^t, \\dots, a_n^t) \\in \\mathcal{A}$ , along with the current state $s^t$ , are then used in the transition model to obtain the next state $s^{t+1} \\sim \\mathcal{T}(\\cdot | s^t, \\mathbf{a}^t)$ . A reward $r_i^t = \\mathcal{R}_i(s^t, \\mathbf{a}^t, s^{t+1})$ is then fed to agent $i$ .",
|
| 697 |
+
"bbox": [
|
| 698 |
+
81,
|
| 699 |
+
465,
|
| 700 |
+
482,
|
| 701 |
+
570
|
| 702 |
+
],
|
| 703 |
+
"page_idx": 3
|
| 704 |
+
},
|
| 705 |
+
{
|
| 706 |
+
"type": "text",
|
| 707 |
+
"text": "The goal of each agent is to maximize the sum of discounted rewards $v_{i}^{t} = \\sum_{k=0}^{T} \\gamma^{k} r_{i}^{t+k}$ over an episode with horizon $T$ , potentially infinite. $v_{i}^{t}$ is called the return. Each agent has a value function $V_{i}(o_{\\mathcal{N}_{i}}) = \\mathbb{E}_{\\pi_{i}} \\left[ v_{i}^{t} \\Big| o_{\\mathcal{N}_{i}}^{t} = o_{\\mathcal{N}_{i}} \\right]$ , which represents the expected return starting from observations $o_{\\mathcal{N}_{i}}$ and following policy $\\pi_{i}$ . This function estimates the \"goodness\" of an observation. In this work, we use the Proximal Policy Optimization (PPO) actor-critic algorithm [48], which approximates the policy (actor) and the value function (critic) using neural networks and a constrained policy gradient update.",
|
| 708 |
+
"bbox": [
|
| 709 |
+
81,
|
| 710 |
+
570,
|
| 711 |
+
482,
|
| 712 |
+
717
|
| 713 |
+
],
|
| 714 |
+
"page_idx": 3
|
| 715 |
+
},
|
| 716 |
+
{
|
| 717 |
+
"type": "text",
|
| 718 |
+
"text": "Problem. Learn heterogeneous policies $\\pi_i(o_{\\mathcal{N}_i}^t;\\theta_i)$ and critics $V_{i}(o_{\\mathcal{N}_{i}};\\theta_{i})$ conditioned on the neural network parameters $\\theta_{i}$ , different for each agent. The observations $o_{\\mathcal{N}_i}^t$ from the agent's neighborhood $\\mathcal{N}_i$ are obtained through a differentiable communication channel, making learning inherently decentralizable.",
|
| 719 |
+
"bbox": [
|
| 720 |
+
81,
|
| 721 |
+
717,
|
| 722 |
+
482,
|
| 723 |
+
789
|
| 724 |
+
],
|
| 725 |
+
"page_idx": 3
|
| 726 |
+
},
|
| 727 |
+
{
|
| 728 |
+
"type": "text",
|
| 729 |
+
"text": "Our objective is to crystallize the role of heterogeneity in MARL policies. Towards this end, we develop a model that addresses",
|
| 730 |
+
"bbox": [
|
| 731 |
+
81,
|
| 732 |
+
790,
|
| 733 |
+
482,
|
| 734 |
+
818
|
| 735 |
+
],
|
| 736 |
+
"page_idx": 3
|
| 737 |
+
},
|
| 738 |
+
{
|
| 739 |
+
"type": "text",
|
| 740 |
+
"text": "the problem description above, motivating it with an empirically-backed discourse on the shortcomings of homogeneous policies (and the behavioral typing techniques that they rely on).",
|
| 741 |
+
"bbox": [
|
| 742 |
+
513,
|
| 743 |
+
106,
|
| 744 |
+
915,
|
| 745 |
+
148
|
| 746 |
+
],
|
| 747 |
+
"page_idx": 3
|
| 748 |
+
},
|
| 749 |
+
{
|
| 750 |
+
"type": "text",
|
| 751 |
+
"text": "5 HETEROGENEOUS MODEL",
|
| 752 |
+
"text_level": 1,
|
| 753 |
+
"bbox": [
|
| 754 |
+
514,
|
| 755 |
+
160,
|
| 756 |
+
772,
|
| 757 |
+
174
|
| 758 |
+
],
|
| 759 |
+
"page_idx": 3
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "text",
|
| 763 |
+
"text": "We introduce the two MARL models that constitute the methodology leveraged in this work: Graph Neural Network Proximal Policy Optimization (GPPO) and its heterogeneous counterpart, HetGPPO.",
|
| 764 |
+
"bbox": [
|
| 765 |
+
513,
|
| 766 |
+
178,
|
| 767 |
+
915,
|
| 768 |
+
220
|
| 769 |
+
],
|
| 770 |
+
"page_idx": 3
|
| 771 |
+
},
|
| 772 |
+
{
|
| 773 |
+
"type": "text",
|
| 774 |
+
"text": "GPPO builds upon Independent Proximal Policy Optimization (IPPO) [13]. In IPPO, each agent learns a local critic $V_{i}(o_{i})$ and actor $\\pi_{i}(o_{i})$ , conditioned only on its own observations. Conditioning the critic only on local observations and not on the full state introduces non-stationarity during training. This results in other agents being considered as part of the environment and not explicitly modeled in the critic. While this can be problematic, it has the advantage of not requiring global information during training. Furthermore, IPPO has been shown to outperform many fully-observable critic models on state-of-the-art MARL benchmarks [13].",
|
| 775 |
+
"bbox": [
|
| 776 |
+
513,
|
| 777 |
+
220,
|
| 778 |
+
913,
|
| 779 |
+
358
|
| 780 |
+
],
|
| 781 |
+
"page_idx": 3
|
| 782 |
+
},
|
| 783 |
+
{
|
| 784 |
+
"type": "text",
|
| 785 |
+
"text": "GPPO overcomes the limitations of IPPO while maintaining its benefits. It uses a GNN communication layer, allowing agents to share information in neighborhoods to coordinate and overcome partial observability. Thanks to this, the GPPO critic $V_{i}(o_{\\mathcal{N}_{i}})$ and actor $\\pi_{i}(o_{\\mathcal{N}_{i}})$ are conditioned on local communication neighborhood observations $o_{\\mathcal{N}_i}$ . This helps overcome non-stationarity, while requiring only local information and communication during training.",
|
| 786 |
+
"bbox": [
|
| 787 |
+
513,
|
| 788 |
+
359,
|
| 789 |
+
913,
|
| 790 |
+
455
|
| 791 |
+
],
|
| 792 |
+
"page_idx": 3
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "text",
|
| 796 |
+
"text": "The GPPO model is illustrated in Fig. 2. At each timestep, each agent $i$ observes the environment, collecting the observations $o_i$ . These observations contain absolute geometrical features, such as the agent position $\\mathbf{p}_i \\in \\mathbb{R}^2$ . The non-absolute features are passed through a Multi Layer Perceptron (MLP) encoder, obtaining an embedding $z_i$ . The absolute position and the agent velocity $\\mathbf{v}_i \\in \\mathbb{R}^2$ are used to compute edge features $e_{ij}$ , which are relative features of agents $i$ and $j$ . In this work, we use the relative position $\\mathbf{p}_{ij} = \\mathbf{p}_i - \\mathbf{p}_j$ and relative velocity $\\mathbf{v}_{ij} = \\mathbf{v}_i - \\mathbf{v}_j$ as edge features $e_{ij} = \\mathbf{p}_{ij}||\\mathbf{v}_{ij}$ , where $||$ indicates the concatenation operation. This process ensures that GNN outputs are invariant to translations in $\\mathbb{R}^2$ (i.e., the same output is obtained if all the team is translated in space), helping the model generalize [21]. The edge features $e_{ij}$ and the agent embedding $z_i$ are then used in the message-passing GNN kernel:",
|
| 797 |
+
"bbox": [
|
| 798 |
+
513,
|
| 799 |
+
455,
|
| 800 |
+
913,
|
| 801 |
+
650
|
| 802 |
+
],
|
| 803 |
+
"page_idx": 3
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "equation",
|
| 807 |
+
"text": "\n$$\nh _ {i} = \\psi_ {\\theta_ {i}} (z _ {i}) + \\bigoplus_ {j \\in \\mathcal {N} _ {i}} \\phi_ {\\theta_ {i}} (z _ {j} | | e _ {i j}).\n$$\n",
|
| 808 |
+
"text_format": "latex",
|
| 809 |
+
"bbox": [
|
| 810 |
+
614,
|
| 811 |
+
660,
|
| 812 |
+
812,
|
| 813 |
+
691
|
| 814 |
+
],
|
| 815 |
+
"page_idx": 3
|
| 816 |
+
},
|
| 817 |
+
{
|
| 818 |
+
"type": "text",
|
| 819 |
+
"text": "Here, $\\psi_{\\theta_i}$ and $\\phi_{\\theta_i}$ are two MLPs, parameterized by the agent parameters $\\theta_i^3$ , and $\\bigoplus$ is an aggregation operator (e.g., sum). The GNN output $h_i$ is then fed to two different MLP decoders, which output the action $a_i \\sim \\pi_i(\\cdot | o_{\\mathcal{N}_i})$ and the value $V_i(o_{\\mathcal{N}_i})$ . Similar to IPPO, GPPO uses parameter sharing to improve sample efficiency. Thus $\\theta_1 = \\ldots = \\theta_n$ . Parameter sharing allows agents to benefit from collective experiences and thereby reduces training time. On the other hand, it enforces centralized training and constraints agents' policies to be identical (i.e., homogeneous).",
|
| 820 |
+
"bbox": [
|
| 821 |
+
513,
|
| 822 |
+
693,
|
| 823 |
+
913,
|
| 824 |
+
818
|
| 825 |
+
],
|
| 826 |
+
"page_idx": 3
|
| 827 |
+
},
|
| 828 |
+
{
|
| 829 |
+
"type": "text",
|
| 830 |
+
"text": "HetGPPO, removes the parameter sharing constraint of GPPO, thus allowing agent policies to diverge, $\\theta_{1} \\neq \\dots \\neq \\theta_{n}$ . However, the impact of not sharing parameters in the context of GNN communications is profound: the permutation equivariance property",
|
| 831 |
+
"bbox": [
|
| 832 |
+
513,
|
| 833 |
+
818,
|
| 834 |
+
913,
|
| 835 |
+
875
|
| 836 |
+
],
|
| 837 |
+
"page_idx": 3
|
| 838 |
+
},
|
| 839 |
+
{
|
| 840 |
+
"type": "page_footnote",
|
| 841 |
+
"text": "1Note that, while we formulate our problem with local agent reward functions $\\mathcal{R}_i$ (to enable learning of $\\mathcal{B}_d$ heterogeneity), our experiments all present a global reward function $\\mathcal{R} = \\mathcal{R}_1 = \\dots = \\mathcal{R}_n$ which cannot be decomposed into local sub-functions. This reward encodes a global cooperative objective, leading to $\\mathcal{B}_s$ heterogeneity. $2\\gamma^{k}$ indicates $\\gamma$ to the power of $k$ , and not the timestep superscript.",
|
| 842 |
+
"bbox": [
|
| 843 |
+
81,
|
| 844 |
+
840,
|
| 845 |
+
482,
|
| 846 |
+
896
|
| 847 |
+
],
|
| 848 |
+
"page_idx": 3
|
| 849 |
+
},
|
| 850 |
+
{
|
| 851 |
+
"type": "page_footnote",
|
| 852 |
+
"text": "<sup>3</sup>With $\\theta_{i}$ we indicate the parameters for all the neural network layers of agent $i$ .",
|
| 853 |
+
"bbox": [
|
| 854 |
+
514,
|
| 855 |
+
883,
|
| 856 |
+
893,
|
| 857 |
+
896
|
| 858 |
+
],
|
| 859 |
+
"page_idx": 3
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"type": "image",
|
| 863 |
+
"img_path": "images/8fcbd3c60a0272c9b4e74daecfec9143e96bf9873cef638dea176afe2f5f5769.jpg",
|
| 864 |
+
"image_caption": [
|
| 865 |
+
"Figure 2: Architecture of GPPO and HetGPPO: MARL models with communicating agents. Each agent passes its observation through an encoder, then aggregates messages received from its neighbors using a translation-invariant message-passing GNN and updates its hidden state $h_i$ . $h_i$ is then used as input to the policy and value decoders (Dec). HetGPPO is equivalent to GPPO without parameter sharing."
|
| 866 |
+
],
|
| 867 |
+
"image_footnote": [],
|
| 868 |
+
"bbox": [
|
| 869 |
+
86,
|
| 870 |
+
103,
|
| 871 |
+
277,
|
| 872 |
+
276
|
| 873 |
+
],
|
| 874 |
+
"page_idx": 4
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"type": "image",
|
| 878 |
+
"img_path": "images/06c7913ec7fd00abe1a32c40905170a80657e2f8d911095c99a30d885596d273.jpg",
|
| 879 |
+
"image_caption": [
|
| 880 |
+
"Figure 3: Different forms of behavioral typing. Homogeneous policies use typing to differentiate among agents and emulate heterogeneous behavior."
|
| 881 |
+
],
|
| 882 |
+
"image_footnote": [],
|
| 883 |
+
"bbox": [
|
| 884 |
+
285,
|
| 885 |
+
102,
|
| 886 |
+
480,
|
| 887 |
+
273
|
| 888 |
+
],
|
| 889 |
+
"page_idx": 4
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "text",
|
| 893 |
+
"text": "of GNNs [59] does not hold, since the agents now learn different message encoding and interpreting strategies. This results in the GNN having to learn a different team output for all the possible permutations of a given team input, instead of learning only one output. This can lead to decreases in generalization power and sample efficiency. On the other hand, gradients are backpropagated through communication neighborhoods, enabling agents to learn collectively from local interactions.",
|
| 894 |
+
"bbox": [
|
| 895 |
+
81,
|
| 896 |
+
398,
|
| 897 |
+
480,
|
| 898 |
+
511
|
| 899 |
+
],
|
| 900 |
+
"page_idx": 4
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "text",
|
| 904 |
+
"text": "The structure of HetGPPO, shown in Fig. 2, allows for Decentralized Training Decentralized Execution (DTDE). This is thanks to the fact that GPPO critics are not conditioned on global information. While GPPO uses parameter sharing, HetGPPO removes this need, thus enabling training in any environment where just inter-agent communication is possible. We note that, by implementing an ad-hoc mechanism to achieve decentralized parameter sharing (e.g., through distributed optimization [60]), GPPO could be trained in a decentralized fashion as well.",
|
| 905 |
+
"bbox": [
|
| 906 |
+
81,
|
| 907 |
+
511,
|
| 908 |
+
482,
|
| 909 |
+
635
|
| 910 |
+
],
|
| 911 |
+
"page_idx": 4
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "text",
|
| 915 |
+
"text": "We implement HetGPPO and GPPO in PyTorch [41] and employ the RLlib [32] framework for training. The code is available here<sup>4</sup>. Simulations are executed in custom created scenarios using the VMAS simulator [6], available at this link<sup>5</sup>.",
|
| 916 |
+
"bbox": [
|
| 917 |
+
81,
|
| 918 |
+
635,
|
| 919 |
+
482,
|
| 920 |
+
691
|
| 921 |
+
],
|
| 922 |
+
"page_idx": 4
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "text",
|
| 926 |
+
"text": "6 BEHAVIORAL TYPING",
|
| 927 |
+
"text_level": 1,
|
| 928 |
+
"bbox": [
|
| 929 |
+
83,
|
| 930 |
+
703,
|
| 931 |
+
303,
|
| 932 |
+
718
|
| 933 |
+
],
|
| 934 |
+
"page_idx": 4
|
| 935 |
+
},
|
| 936 |
+
{
|
| 937 |
+
"type": "text",
|
| 938 |
+
"text": "HetGPPO, introduced above, allows us to learn truly heterogeneous policies. Counter-intuitively, it is also possible to learn apparently heterogeneous behavior with homogeneous models like GPPO. This allows agents to emulate heterogeneous behavior while leveraging the sample efficiency benefits of parameter sharing. A shared model can encompass different behavioral types which are activated by particular combinations of the input observations. For example, if two robots are transporting a package towards a destination, the model can identify if an agent is in the back (further from the goal) and assign it a different behavioral type from that of the agent in",
|
| 939 |
+
"bbox": [
|
| 940 |
+
81,
|
| 941 |
+
720,
|
| 942 |
+
482,
|
| 943 |
+
861
|
| 944 |
+
],
|
| 945 |
+
"page_idx": 4
|
| 946 |
+
},
|
| 947 |
+
{
|
| 948 |
+
"type": "image",
|
| 949 |
+
"img_path": "images/1fa6f1ac549011aa2e550f5d53b431e8f9a7ee8a9589ba978b062a36c42b2fec.jpg",
|
| 950 |
+
"image_caption": [],
|
| 951 |
+
"image_footnote": [],
|
| 952 |
+
"bbox": [
|
| 953 |
+
516,
|
| 954 |
+
102,
|
| 955 |
+
913,
|
| 956 |
+
191
|
| 957 |
+
],
|
| 958 |
+
"page_idx": 4
|
| 959 |
+
},
|
| 960 |
+
{
|
| 961 |
+
"type": "text",
|
| 962 |
+
"text": "the front. The input observations provide the conditions for the model to assign behavioral types to the agents.",
|
| 963 |
+
"bbox": [
|
| 964 |
+
513,
|
| 965 |
+
263,
|
| 966 |
+
911,
|
| 967 |
+
291
|
| 968 |
+
],
|
| 969 |
+
"page_idx": 4
|
| 970 |
+
},
|
| 971 |
+
{
|
| 972 |
+
"type": "text",
|
| 973 |
+
"text": "We refer to this identification process as typing. Fig. 3 depicts a classification of behavioral typing techniques which we describe in the following subsections. Note that behavioral types lie in a continuous behavioral space (and are not part of a discrete set) [2].",
|
| 974 |
+
"bbox": [
|
| 975 |
+
513,
|
| 976 |
+
291,
|
| 977 |
+
913,
|
| 978 |
+
347
|
| 979 |
+
],
|
| 980 |
+
"page_idx": 4
|
| 981 |
+
},
|
| 982 |
+
{
|
| 983 |
+
"type": "text",
|
| 984 |
+
"text": "6.1 Explicit behavioral typing",
|
| 985 |
+
"text_level": 1,
|
| 986 |
+
"bbox": [
|
| 987 |
+
514,
|
| 988 |
+
358,
|
| 989 |
+
772,
|
| 990 |
+
375
|
| 991 |
+
],
|
| 992 |
+
"page_idx": 4
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"text": "The most popular form of behavioral typing consists in feeding the index $i$ of the agent explicitly as part of the observation. This practice has been used extensively in the MARL literature [12, 19, 24, 54]. However, it requires the model to learn a multimodal policy, which switches modes based on this integer index. This can lead to discontinuities in the agents' policy and has been shown to perform sub-optimally [12].",
|
| 997 |
+
"bbox": [
|
| 998 |
+
513,
|
| 999 |
+
376,
|
| 1000 |
+
913,
|
| 1001 |
+
474
|
| 1002 |
+
],
|
| 1003 |
+
"page_idx": 4
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"type": "text",
|
| 1007 |
+
"text": "Definition 6.1 (Explicit behavioral typing). Explicit behavioral typing occurs when a shared decentralized MARL policy is able to type agents based on a constant value concatenated to the input, different for each agent.",
|
| 1008 |
+
"bbox": [
|
| 1009 |
+
513,
|
| 1010 |
+
479,
|
| 1011 |
+
911,
|
| 1012 |
+
537
|
| 1013 |
+
],
|
| 1014 |
+
"page_idx": 4
|
| 1015 |
+
},
|
| 1016 |
+
{
|
| 1017 |
+
"type": "text",
|
| 1018 |
+
"text": "When no explicit index is available, a shared policy may still be able to emulate heterogeneous behavior [15]. We refer to this phenomenon as inferred behavioral typing. Inferred typing can occur for both physically heterogeneous and physically identical agents.",
|
| 1019 |
+
"bbox": [
|
| 1020 |
+
513,
|
| 1021 |
+
542,
|
| 1022 |
+
911,
|
| 1023 |
+
613
|
| 1024 |
+
],
|
| 1025 |
+
"page_idx": 4
|
| 1026 |
+
},
|
| 1027 |
+
{
|
| 1028 |
+
"type": "text",
|
| 1029 |
+
"text": "6.2 Inferred behavioral typing for physically heterogeneous agents",
|
| 1030 |
+
"text_level": 1,
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
514,
|
| 1033 |
+
625,
|
| 1034 |
+
890,
|
| 1035 |
+
657
|
| 1036 |
+
],
|
| 1037 |
+
"page_idx": 4
|
| 1038 |
+
},
|
| 1039 |
+
{
|
| 1040 |
+
"type": "text",
|
| 1041 |
+
"text": "We first present a case study of inferred behavioral typing for agents that are physically heterogeneous.",
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
513,
|
| 1044 |
+
659,
|
| 1045 |
+
911,
|
| 1046 |
+
688
|
| 1047 |
+
],
|
| 1048 |
+
"page_idx": 4
|
| 1049 |
+
},
|
| 1050 |
+
{
|
| 1051 |
+
"type": "text",
|
| 1052 |
+
"text": "Definition 6.2 (Inferred behavioral typing for physically heterogeneous agents). Inferred behavioral typing for physically heterogeneous agents occurs when a shared decentralized MARL policy is able to type $\\mathcal{P}$ heterogeneous agents through their observations.",
|
| 1053 |
+
"bbox": [
|
| 1054 |
+
513,
|
| 1055 |
+
694,
|
| 1056 |
+
913,
|
| 1057 |
+
750
|
| 1058 |
+
],
|
| 1059 |
+
"page_idx": 4
|
| 1060 |
+
},
|
| 1061 |
+
{
|
| 1062 |
+
"type": "text",
|
| 1063 |
+
"text": "Scenario A (Fig. 4). Consider two robots with different masses, $m_{1} > m_{2}$ , located in a 1D workspace at random positions. The robots observe their own position $\\mathbf{p}_i \\in \\mathbb{R}$ and velocity $\\mathbf{v}_i \\in \\mathbb{R}$ and share them via communication. Their action is a force $\\mathbf{f}_i \\in \\mathbb{R}$ . They are rewarded collectively to maximize the maximum speed in the team while minimizing the energy consumed. The optimal policy in this case is, clearly, for the robot with the higher mass to not move at all, while the lighter robot moves at the maximum speed. Evidently these behaviors are heterogeneous, since $\\mathbf{f}_1 \\neq \\mathbf{f}_2$ when both agents receive the same observations.",
|
| 1064 |
+
"bbox": [
|
| 1065 |
+
511,
|
| 1066 |
+
757,
|
| 1067 |
+
913,
|
| 1068 |
+
896
|
| 1069 |
+
],
|
| 1070 |
+
"page_idx": 4
|
| 1071 |
+
},
|
| 1072 |
+
{
|
| 1073 |
+
"type": "page_footnote",
|
| 1074 |
+
"text": "$^{4}$ https://github.com/proroklab/HetGPPO $^{5}$ https://github.com/proroklab/VectorizedMultiAgentSimulator",
|
| 1075 |
+
"bbox": [
|
| 1076 |
+
81,
|
| 1077 |
+
872,
|
| 1078 |
+
380,
|
| 1079 |
+
895
|
| 1080 |
+
],
|
| 1081 |
+
"page_idx": 4
|
| 1082 |
+
},
|
| 1083 |
+
{
|
| 1084 |
+
"type": "image",
|
| 1085 |
+
"img_path": "images/f5bda0c89c09d7bf66e3b58c7af33737436e0d4be255df90f2f776491cb38de5.jpg",
|
| 1086 |
+
"image_caption": [
|
| 1087 |
+
"(a) Heterogeneous"
|
| 1088 |
+
],
|
| 1089 |
+
"image_footnote": [],
|
| 1090 |
+
"bbox": [
|
| 1091 |
+
83,
|
| 1092 |
+
103,
|
| 1093 |
+
294,
|
| 1094 |
+
273
|
| 1095 |
+
],
|
| 1096 |
+
"page_idx": 5
|
| 1097 |
+
},
|
| 1098 |
+
{
|
| 1099 |
+
"type": "image",
|
| 1100 |
+
"img_path": "images/83b08d099505f2cd04c671bb90e8b115b4cb4cb404bd44d178540116e4f897af.jpg",
|
| 1101 |
+
"image_caption": [
|
| 1102 |
+
"(b) Homogeneous"
|
| 1103 |
+
],
|
| 1104 |
+
"image_footnote": [],
|
| 1105 |
+
"bbox": [
|
| 1106 |
+
297,
|
| 1107 |
+
103,
|
| 1108 |
+
496,
|
| 1109 |
+
273
|
| 1110 |
+
],
|
| 1111 |
+
"page_idx": 5
|
| 1112 |
+
},
|
| 1113 |
+
{
|
| 1114 |
+
"type": "image",
|
| 1115 |
+
"img_path": "images/0fb0215e3860d12fa43af89b37f27ea30b8b017c6ffd49f06129119a26c1a293.jpg",
|
| 1116 |
+
"image_caption": [
|
| 1117 |
+
"(c) Heterogeneous with noise"
|
| 1118 |
+
],
|
| 1119 |
+
"image_footnote": [],
|
| 1120 |
+
"bbox": [
|
| 1121 |
+
503,
|
| 1122 |
+
103,
|
| 1123 |
+
705,
|
| 1124 |
+
273
|
| 1125 |
+
],
|
| 1126 |
+
"page_idx": 5
|
| 1127 |
+
},
|
| 1128 |
+
{
|
| 1129 |
+
"type": "image",
|
| 1130 |
+
"img_path": "images/edce5290cfc6fd940289c875a19c39c2e9ea4521fc04483f34bb6f485d436802.jpg",
|
| 1131 |
+
"image_caption": [
|
| 1132 |
+
"(d) Homogenous with noise"
|
| 1133 |
+
],
|
| 1134 |
+
"image_footnote": [],
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
707,
|
| 1137 |
+
103,
|
| 1138 |
+
910,
|
| 1139 |
+
273
|
| 1140 |
+
],
|
| 1141 |
+
"page_idx": 5
|
| 1142 |
+
},
|
| 1143 |
+
{
|
| 1144 |
+
"type": "text",
|
| 1145 |
+
"text": "We train the agents in this scenario using GPPO and HetGPPO. Fig. 4 shows a graphical representation of the learned policies of each model. In these plots, an arrow represents the team action vector $\\vec{\\mathbf{f}} (\\vec{\\mathbf{v}}) = [\\mathbf{f}_1(\\vec{\\mathbf{v}}),\\mathbf{f}_2(\\vec{\\mathbf{v}})]$ as a function of the observation $\\vec{\\mathbf{v}} = [\\mathbf{v}_1,\\mathbf{v}_2]$ . A vertical arrow at $[0,0]$ indicates that, when the agents are both still, agent 1 wants to stay still while agent 2 wants to increase its velocity. We plot the policy mean action for every observation pair with a gray vector field, and show a rollout of the policy in red. In Fig. 4a, we observe how HetGPPO is able to learn the optimal policy, which is not dependent on any observation. Thanks to physically inferred typing, GPPO (Fig. 4b) is surprisingly also able to learn a policy that grants optimal rollouts. We observe how, due to homogeneity, the GPPO policy is forced to be symmetric about the $\\mathbf{v}_1 = \\mathbf{v}_2$ axis. Thus, when the GPPO agents are spawned at $[0,0]$ , they forcibly take the same action of increasing their velocities. Due to their $\\mathcal{P}$ differences, however, this action produces different velocities, making the rollout quickly diverge from the symmetry and thus producing optimal behavior. The fact that a physical difference (i.e., different agent mass) produces different observations (i.e., different agent speed) for the same action, enables the homogeneous model to learn an optimal policy with apparent heterogeneous behavior - an example of inferred behavioral typing for physically heterogeneous agents.",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
81,
|
| 1148 |
+
398,
|
| 1149 |
+
482,
|
| 1150 |
+
718
|
| 1151 |
+
],
|
| 1152 |
+
"page_idx": 5
|
| 1153 |
+
},
|
| 1154 |
+
{
|
| 1155 |
+
"type": "text",
|
| 1156 |
+
"text": "Physically inferred typing proves to be a brittle solution. When additive uniform observation noise is injected during rollouts, we observe how the HetGPPO rollout (Fig. 4c) is not impacted at all, while the GPPO rollout (Fig. 4d) occasionally falls on the other side of the diagonal, producing the symmetrical opposite of the optimal behavior, and causing the heavy agent to move (horizontal arrows).",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
81,
|
| 1159 |
+
719,
|
| 1160 |
+
482,
|
| 1161 |
+
801
|
| 1162 |
+
],
|
| 1163 |
+
"page_idx": 5
|
| 1164 |
+
},
|
| 1165 |
+
{
|
| 1166 |
+
"type": "text",
|
| 1167 |
+
"text": "6.3 Inferred behavioral typing for physically identical agents",
|
| 1168 |
+
"text_level": 1,
|
| 1169 |
+
"bbox": [
|
| 1170 |
+
83,
|
| 1171 |
+
813,
|
| 1172 |
+
460,
|
| 1173 |
+
845
|
| 1174 |
+
],
|
| 1175 |
+
"page_idx": 5
|
| 1176 |
+
},
|
| 1177 |
+
{
|
| 1178 |
+
"type": "text",
|
| 1179 |
+
"text": "We now present a case study of inferred behavioral typing for agents that are physically identical.",
|
| 1180 |
+
"bbox": [
|
| 1181 |
+
81,
|
| 1182 |
+
848,
|
| 1183 |
+
480,
|
| 1184 |
+
876
|
| 1185 |
+
],
|
| 1186 |
+
"page_idx": 5
|
| 1187 |
+
},
|
| 1188 |
+
{
|
| 1189 |
+
"type": "image",
|
| 1190 |
+
"img_path": "images/ad038285d08986c3c5931e077320f1994c29e031fb15b5f57d87678cb49eca33.jpg",
|
| 1191 |
+
"image_caption": [
|
| 1192 |
+
"(a) Scenario"
|
| 1193 |
+
],
|
| 1194 |
+
"image_footnote": [],
|
| 1195 |
+
"bbox": [
|
| 1196 |
+
524,
|
| 1197 |
+
396,
|
| 1198 |
+
578,
|
| 1199 |
+
526
|
| 1200 |
+
],
|
| 1201 |
+
"page_idx": 5
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "image",
|
| 1205 |
+
"img_path": "images/25df29c4ec0abba58e3301daa217ff0b877c66fba939d899a1e75bd690cef0b4.jpg",
|
| 1206 |
+
"image_caption": [
|
| 1207 |
+
"Figure 4: Policies learned for Scenario A represented as vector fields (gray) and rollouts in the environment (red). (a) and (b) are not subject to deployment noise. (c) and (d) are subject to $\\pm 0.3$ uniform noise on the observations. In these plots, an arrow represents the team action vector $\\vec{\\mathbf{f}}(\\vec{\\mathbf{v}}) = [\\mathbf{f}_1(\\vec{\\mathbf{v}}), \\mathbf{f}_2(\\vec{\\mathbf{v}})]$ as a function of the observation $\\vec{\\mathbf{v}} = [\\mathbf{v}_1, \\mathbf{v}_2]$ . The rollouts always start in the origin ( $\\vec{\\mathbf{v}} = [0,0]$ ). We can observe how the vector field representing the homogeneous policies is forced to be invariant to permutations of the two inputs and thus is symmetric along $\\mathbf{v}_1 = \\mathbf{v}_2$ . This causes it to become brittle in the presence of noise (d), which makes the observations fall in the wrong part of the plane where the symmetry enforces a suboptimal policy (horizontal arrows).",
|
| 1208 |
+
"(b) Training performance",
|
| 1209 |
+
"Figure 5: Scenario B. (a): The setup with two robots (bigger circles) on opposite sides of a corridor which need to give way to each other to reach their goals (smaller circles). (b) The training curve for Scenario B, showing that, while the heterogeneous model is able to solve the scenario immediately, homogeneous models need around 300 training iterations to learn inferred behavioral typing for physically identical agents. We plot the mean and standard deviation of 10 different runs. Each iteration is performed over 200 episodes."
|
| 1210 |
+
],
|
| 1211 |
+
"image_footnote": [],
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
598,
|
| 1214 |
+
393,
|
| 1215 |
+
910,
|
| 1216 |
+
542
|
| 1217 |
+
],
|
| 1218 |
+
"page_idx": 5
|
| 1219 |
+
},
|
| 1220 |
+
{
|
| 1221 |
+
"type": "text",
|
| 1222 |
+
"text": "Definition 6.3 (Inferred behavioral typing for physically identical agents). Inferred behavioral typing for physically identical agents occurs when a shared decentralized MARL policy is able to type physically identical agents through their observations.",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
513,
|
| 1225 |
+
717,
|
| 1226 |
+
913,
|
| 1227 |
+
773
|
| 1228 |
+
],
|
| 1229 |
+
"page_idx": 5
|
| 1230 |
+
},
|
| 1231 |
+
{
|
| 1232 |
+
"type": "text",
|
| 1233 |
+
"text": "Scenario B (Fig. 5). Consider now two physically identical robots, initialized at different ends of a narrow corridor, depicted in Fig. 5a. Each robot is positioned in front of the other's goal. The corridor is wide enough to fit only one robot, but contains two robot-sized recesses in the center. The robots observe and communicate their respective 2D positions and velocities, and are tasked with reaching their goals without colliding. Thus, the task can only be solved when one robot gives way to the other.",
|
| 1234 |
+
"bbox": [
|
| 1235 |
+
511,
|
| 1236 |
+
785,
|
| 1237 |
+
913,
|
| 1238 |
+
896
|
| 1239 |
+
],
|
| 1240 |
+
"page_idx": 5
|
| 1241 |
+
},
|
| 1242 |
+
{
|
| 1243 |
+
"type": "page_footnote",
|
| 1244 |
+
"text": "6The PPO policy is stochastic and outputs a distribution over $\\mathbf{f}_i$",
|
| 1245 |
+
"bbox": [
|
| 1246 |
+
81,
|
| 1247 |
+
883,
|
| 1248 |
+
383,
|
| 1249 |
+
895
|
| 1250 |
+
],
|
| 1251 |
+
"page_idx": 5
|
| 1252 |
+
},
|
| 1253 |
+
{
|
| 1254 |
+
"type": "image",
|
| 1255 |
+
"img_path": "images/3bfa2abe2c4257799d2c5e6622a150aa9ab10f08716bc0bd95d85789006a39c3.jpg",
|
| 1256 |
+
"image_caption": [
|
| 1257 |
+
"(a) Scenario A"
|
| 1258 |
+
],
|
| 1259 |
+
"image_footnote": [],
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
86,
|
| 1262 |
+
102,
|
| 1263 |
+
285,
|
| 1264 |
+
246
|
| 1265 |
+
],
|
| 1266 |
+
"page_idx": 6
|
| 1267 |
+
},
|
| 1268 |
+
{
|
| 1269 |
+
"type": "image",
|
| 1270 |
+
"img_path": "images/db953c14f73734f8d30797129475b689aa89c350fad773baade83888bfb84721.jpg",
|
| 1271 |
+
"image_caption": [
|
| 1272 |
+
"(b) Scenario B",
|
| 1273 |
+
"Figure 7: Performance evaluation in the passage scenario with differently sized robots. Here, the homogeneous model is not able to perform inferred behavioral typing for physically heterogeneous agents since $\\mathcal{P}$ heterogeneity does not affect the robots' observations. Thus, only the heterogeneous model is able to solve the task. We plot the mean and standard deviation success rate of 4 runs. Each iteration is performed over 200 episodes of experience."
|
| 1274 |
+
],
|
| 1275 |
+
"image_footnote": [],
|
| 1276 |
+
"bbox": [
|
| 1277 |
+
285,
|
| 1278 |
+
102,
|
| 1279 |
+
480,
|
| 1280 |
+
244
|
| 1281 |
+
],
|
| 1282 |
+
"page_idx": 6
|
| 1283 |
+
},
|
| 1284 |
+
{
|
| 1285 |
+
"type": "text",
|
| 1286 |
+
"text": "Again, we train the agents with both GPPO and HetGPPO in this scenario. By looking at the training reward plot in Fig. 5b, we can see that both models are able to learn the correct behavior (reward $>700$ ). GPPO leverages inferred typing for physically identical agents and is able to assign the \"give way\" role dynamically according to the relative position and velocity of the two robots. However, we observe that learning behavioral typing takes all 300 training iterations, while the heterogeneous model learns the optimal solution with only 20 iterations.",
|
| 1287 |
+
"bbox": [
|
| 1288 |
+
81,
|
| 1289 |
+
386,
|
| 1290 |
+
482,
|
| 1291 |
+
512
|
| 1292 |
+
],
|
| 1293 |
+
"page_idx": 6
|
| 1294 |
+
},
|
| 1295 |
+
{
|
| 1296 |
+
"type": "text",
|
| 1297 |
+
"text": "6.4 Limitations of behavioral typing",
|
| 1298 |
+
"text_level": 1,
|
| 1299 |
+
"bbox": [
|
| 1300 |
+
83,
|
| 1301 |
+
526,
|
| 1302 |
+
393,
|
| 1303 |
+
542
|
| 1304 |
+
],
|
| 1305 |
+
"page_idx": 6
|
| 1306 |
+
},
|
| 1307 |
+
{
|
| 1308 |
+
"type": "text",
|
| 1309 |
+
"text": "Although homogeneous models can use behavioral typing to learn apparently heterogeneous behavior, this does not prove to be a reliable and scalable solution.",
|
| 1310 |
+
"bbox": [
|
| 1311 |
+
81,
|
| 1312 |
+
544,
|
| 1313 |
+
482,
|
| 1314 |
+
585
|
| 1315 |
+
],
|
| 1316 |
+
"page_idx": 6
|
| 1317 |
+
},
|
| 1318 |
+
{
|
| 1319 |
+
"type": "text",
|
| 1320 |
+
"text": "In [12] it is shown that the performance of explicit behavioral typing degrades as a function of the number of types to be learned. Furthermore, the authors empirically show that this performance decrease is not related to the capacity of the shared homogeneous model (i.e., the number of parameters).",
|
| 1321 |
+
"bbox": [
|
| 1322 |
+
81,
|
| 1323 |
+
585,
|
| 1324 |
+
482,
|
| 1325 |
+
655
|
| 1326 |
+
],
|
| 1327 |
+
"page_idx": 6
|
| 1328 |
+
},
|
| 1329 |
+
{
|
| 1330 |
+
"type": "text",
|
| 1331 |
+
"text": "Inferred indexing also proves to be a brittle solution. To characterize this brittleness, we perform an evaluation by injecting observation noise during execution. This is shown in Fig. 6. We report the mean and standard deviation of the normalized reward on 100 runs for 50 noise values between 0 and 2. As we can observe, all models start with the optimal policy with a reward of 1 when the noise is 0. As the noise increases, we observe how homogeneous models either almost immediately lose functionality (like for Scenario B in Fig. 6b), or degrade in performance rapidly (like for Scenario A in Fig. 6a). A heterogeneous policy, in contrast, is able to tolerate higher magnitudes of noise, and, even in the difficult corridor scenario, still manages to complete the task about $20\\%$ of the time at high noise values.",
|
| 1332 |
+
"bbox": [
|
| 1333 |
+
81,
|
| 1334 |
+
655,
|
| 1335 |
+
482,
|
| 1336 |
+
835
|
| 1337 |
+
],
|
| 1338 |
+
"page_idx": 6
|
| 1339 |
+
},
|
| 1340 |
+
{
|
| 1341 |
+
"type": "text",
|
| 1342 |
+
"text": "7 EXPERIMENTAL EVALUATIONS",
|
| 1343 |
+
"text_level": 1,
|
| 1344 |
+
"bbox": [
|
| 1345 |
+
83,
|
| 1346 |
+
849,
|
| 1347 |
+
382,
|
| 1348 |
+
862
|
| 1349 |
+
],
|
| 1350 |
+
"page_idx": 6
|
| 1351 |
+
},
|
| 1352 |
+
{
|
| 1353 |
+
"type": "text",
|
| 1354 |
+
"text": "We now present some evaluations of the proposed models in simulated and real environments.",
|
| 1355 |
+
"bbox": [
|
| 1356 |
+
81,
|
| 1357 |
+
867,
|
| 1358 |
+
482,
|
| 1359 |
+
895
|
| 1360 |
+
],
|
| 1361 |
+
"page_idx": 6
|
| 1362 |
+
},
|
| 1363 |
+
{
|
| 1364 |
+
"type": "image",
|
| 1365 |
+
"img_path": "images/1a19e343b580904360cc26edec25921948af648ef34f8126324f1b74a76039d0.jpg",
|
| 1366 |
+
"image_caption": [
|
| 1367 |
+
"Figure 6: Performance of homogeneous and heterogeneous models in the presence of deployment noise on the two inferred typing scenarios. Reward is normalized between 0 and 1. Uniform noise is applied to all observations and it is in the same units as the observations. We report the mean and standard deviation of the normalized reward on 100 runs for 50 noise values between 0 and 2."
|
| 1368 |
+
],
|
| 1369 |
+
"image_footnote": [],
|
| 1370 |
+
"bbox": [
|
| 1371 |
+
526,
|
| 1372 |
+
109,
|
| 1373 |
+
648,
|
| 1374 |
+
205
|
| 1375 |
+
],
|
| 1376 |
+
"page_idx": 6
|
| 1377 |
+
},
|
| 1378 |
+
{
|
| 1379 |
+
"type": "image",
|
| 1380 |
+
"img_path": "images/3b1e9a792c3a5e85940238b6f26404cf96a84c0eb8d4c3d508ffb3924b23d9e6.jpg",
|
| 1381 |
+
"image_caption": [
|
| 1382 |
+
"(a) Scenario",
|
| 1383 |
+
"(b) Training performance"
|
| 1384 |
+
],
|
| 1385 |
+
"image_footnote": [],
|
| 1386 |
+
"bbox": [
|
| 1387 |
+
656,
|
| 1388 |
+
104,
|
| 1389 |
+
905,
|
| 1390 |
+
232
|
| 1391 |
+
],
|
| 1392 |
+
"page_idx": 6
|
| 1393 |
+
},
|
| 1394 |
+
{
|
| 1395 |
+
"type": "text",
|
| 1396 |
+
"text": "Performance evaluation. We evaluate HetGPPO on a simulated 2D task which requires heterogeneous behavior. The task is shown in Fig. 7a. Here, two robots of different sizes (blue circles), connected by a rigid linkage through two revolute joints, need to cross a passage while keeping the linkage parallel to it and then match the desired goal position (green circles) on the other side. The passage is comprised of a bigger and a smaller gap, which are spawned in a random position and order on the wall, but always at the same distance between each other. The team is spawned in a random order and position on the lower side with the linkage always perpendicular to the passage. The goal is spawned horizontally in a random position on the upper side. Each robot observes and communicates its velocity, relative position to each gap, and relative position to the goal center. The shaped global reward is composed of two convex terms. Before the passage, the robots are rewarded to keep the linkage parallel to the goal and to bring its center to the center of the passage. After the passage, the robots are rewarded for bringing it to the goal at the desired orientation. Collisions are also penalized.",
|
| 1397 |
+
"bbox": [
|
| 1398 |
+
511,
|
| 1399 |
+
383,
|
| 1400 |
+
913,
|
| 1401 |
+
646
|
| 1402 |
+
],
|
| 1403 |
+
"page_idx": 6
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"type": "text",
|
| 1407 |
+
"text": "Fig. 7b shows training success rate (i.e., percentage of episodes in each batch that complete the task). The heterogeneous model is able to learn two behaviorally different policies: the bigger robot passes through the bigger gap and the smaller robot through the smaller gap, achieving the optimal solution. On the other hand, the homogeneous model is not able to assign these two behavioral types using inferred behavioral typing for physically heterogeneous agents, since the $\\mathcal{P}$ heterogeneity caused by different robot sizes does not affect the robots' observations. Agents with homogeneous policies never manage to cross the passage, being deterred by unavoidable collisions.",
|
| 1408 |
+
"bbox": [
|
| 1409 |
+
511,
|
| 1410 |
+
646,
|
| 1411 |
+
913,
|
| 1412 |
+
797
|
| 1413 |
+
],
|
| 1414 |
+
"page_idx": 6
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "text",
|
| 1418 |
+
"text": "Resilience to training noise. As elucidated in Sec. 6, homogeneous models can learn heterogeneous behavior. In this subsection, we evaluate the resilience of this paradigm in the presence of observation noise during training. We consider the task depicted in Fig. 8a. This is the same as in Fig. 7a with the difference that the robots are now physically identical, but the linkage has an asymmetric mass (black circle) that causes a different type of $\\mathcal{P}$ heterogeneity,",
|
| 1419 |
+
"bbox": [
|
| 1420 |
+
511,
|
| 1421 |
+
797,
|
| 1422 |
+
915,
|
| 1423 |
+
896
|
| 1424 |
+
],
|
| 1425 |
+
"page_idx": 6
|
| 1426 |
+
},
|
| 1427 |
+
{
|
| 1428 |
+
"type": "image",
|
| 1429 |
+
"img_path": "images/24cf9f52873ef05a8a4b1e5c3ebc070d19db88e133976df49cc335a5b5ab8437.jpg",
|
| 1430 |
+
"image_caption": [
|
| 1431 |
+
"(a) Scenario"
|
| 1432 |
+
],
|
| 1433 |
+
"image_footnote": [],
|
| 1434 |
+
"bbox": [
|
| 1435 |
+
93,
|
| 1436 |
+
107,
|
| 1437 |
+
217,
|
| 1438 |
+
205
|
| 1439 |
+
],
|
| 1440 |
+
"page_idx": 7
|
| 1441 |
+
},
|
| 1442 |
+
{
|
| 1443 |
+
"type": "image",
|
| 1444 |
+
"img_path": "images/0c777d1ee335c5a7a493485cf19372a9ff78ff738b2130b88213f35fb6db7662.jpg",
|
| 1445 |
+
"image_caption": [
|
| 1446 |
+
"(b) Resilience to training noise"
|
| 1447 |
+
],
|
| 1448 |
+
"image_footnote": [],
|
| 1449 |
+
"bbox": [
|
| 1450 |
+
222,
|
| 1451 |
+
102,
|
| 1452 |
+
480,
|
| 1453 |
+
229
|
| 1454 |
+
],
|
| 1455 |
+
"page_idx": 7
|
| 1456 |
+
},
|
| 1457 |
+
{
|
| 1458 |
+
"type": "text",
|
| 1459 |
+
"text": "reflected in the velocity observations. The passage is a single gap, positioned randomly on the wall. The agents need to cross it while keeping the linkage perpendicular to the wall and avoiding collisions. The team and the goal are spawned in a random position, order, and rotation on opposite sides of the passage.",
|
| 1460 |
+
"bbox": [
|
| 1461 |
+
81,
|
| 1462 |
+
383,
|
| 1463 |
+
482,
|
| 1464 |
+
452
|
| 1465 |
+
],
|
| 1466 |
+
"page_idx": 7
|
| 1467 |
+
},
|
| 1468 |
+
{
|
| 1469 |
+
"type": "text",
|
| 1470 |
+
"text": "In Fig. 8b we report the training success rate for different observation noise values. Thanks to inferred behavioral typing for physically heterogeneous agents we see that both models solve the task optimally when 0 noise is added. As noise increases, the heterogeneous model is able to maintain significantly better performance. For example, with 0.2 observation noise, HetGPPO still achieves more than $80\\%$ success rate, while GPPO is below $40\\%$ .",
|
| 1471 |
+
"bbox": [
|
| 1472 |
+
81,
|
| 1473 |
+
452,
|
| 1474 |
+
482,
|
| 1475 |
+
547
|
| 1476 |
+
],
|
| 1477 |
+
"page_idx": 7
|
| 1478 |
+
},
|
| 1479 |
+
{
|
| 1480 |
+
"type": "text",
|
| 1481 |
+
"text": "Real-world deployment. To demonstrate the resilience of heterogeneous policies, we deploy Scenario B (Sec. 6.3) to a real-world setting. The setup of the task is shown in Fig. 9b and is the same as in simulation. We use two holonomic RoboMaster S1 ground robots [16] (Fig. 9a), each running a customized model-based controller onboard [51]. We perform 10 runs for the trained HetGPPO and GPPO models both in simulation (Fig. 9c) and in the real world (Fig. 9d). As already discussed in Sec. 6.3, both the heterogeneous and the homogeneous models are able to solve the scenario in simulation, with the homogeneous model leveraging inferred behavioral typing for physically identical agents. This is shown in Fig. 9c, where all the runs of both models reach $100\\%$ task completion within 15s. On the other hand, as seen in Fig. 9d, the performance of the homogeneous model is heavily impacted in the real world. This is because, in this symmetric scenario, the homogeneous model cannot type agents based on position only, and has to rely on velocity observations to build the behavioral types. In practice, however, real-world estimated velocities can be noisy due to factors such as control/process delays and variability in the robot's measurement and motion models. Thus, relying on these observations makes the homogeneous (memory-less) model susceptible to erroneously switching the behavioral types dynamically (i.e., failing to distinguish if the robots are currently moving towards or away from the center). This leads to the plotted rollouts, where robots alternate the role of giving way to each other near the passage. Out of 10 runs,",
|
| 1482 |
+
"bbox": [
|
| 1483 |
+
81,
|
| 1484 |
+
549,
|
| 1485 |
+
482,
|
| 1486 |
+
896
|
| 1487 |
+
],
|
| 1488 |
+
"page_idx": 7
|
| 1489 |
+
},
|
| 1490 |
+
{
|
| 1491 |
+
"type": "image",
|
| 1492 |
+
"img_path": "images/d4e2bba452c452dcecc2e65ef6efc2698cd91f9a25e500df419c39ad4a4463cf.jpg",
|
| 1493 |
+
"image_caption": [
|
| 1494 |
+
"(a) Robot"
|
| 1495 |
+
],
|
| 1496 |
+
"image_footnote": [],
|
| 1497 |
+
"bbox": [
|
| 1498 |
+
517,
|
| 1499 |
+
103,
|
| 1500 |
+
619,
|
| 1501 |
+
186
|
| 1502 |
+
],
|
| 1503 |
+
"page_idx": 7
|
| 1504 |
+
},
|
| 1505 |
+
{
|
| 1506 |
+
"type": "image",
|
| 1507 |
+
"img_path": "images/6e71a34e4cb6005e80d3798842030669f3384667a195293120358abff201fab8.jpg",
|
| 1508 |
+
"image_caption": [
|
| 1509 |
+
"(b) Scenario B (real world)"
|
| 1510 |
+
],
|
| 1511 |
+
"image_footnote": [],
|
| 1512 |
+
"bbox": [
|
| 1513 |
+
651,
|
| 1514 |
+
103,
|
| 1515 |
+
911,
|
| 1516 |
+
186
|
| 1517 |
+
],
|
| 1518 |
+
"page_idx": 7
|
| 1519 |
+
},
|
| 1520 |
+
{
|
| 1521 |
+
"type": "image",
|
| 1522 |
+
"img_path": "images/db3783a1defd21fed19e86a3e0d6a3968b0a6b82109faa8dba9e30fa4961d28b.jpg",
|
| 1523 |
+
"image_caption": [
|
| 1524 |
+
"Figure 8: Resilience to uniform observation noise during training in the passage scenario with asymmetric package. Here, the heterogeneous model is able to maintain higher performance as the noise increases. We train the two models for 7 different noise values. For each noise value, we report the mean and standard deviation of the success rate after 1000 training iterations for 5 runs. Each training iteration is performed over 200 episodes of experience.",
|
| 1525 |
+
"(c) Simulation",
|
| 1526 |
+
"Figure 9: Real-world deployment of Scenario B (Fig. 5). We report 10 runs for each model both in simulation and in the real world. We plot task completion (the scaled sum of the negative distances of each robot from its goal) over time. While in simulation both models are able to perform the task, real world imperfections make the homogeneous model dynamically switch between learned behavioral types, leading to the robots switching positions multiple times near the central area. This causes the zigzag behavior in (d) with certain rollouts failing or taking over the maximum allocated time of 60s."
|
| 1527 |
+
],
|
| 1528 |
+
"image_footnote": [],
|
| 1529 |
+
"bbox": [
|
| 1530 |
+
516,
|
| 1531 |
+
205,
|
| 1532 |
+
709,
|
| 1533 |
+
351
|
| 1534 |
+
],
|
| 1535 |
+
"page_idx": 7
|
| 1536 |
+
},
|
| 1537 |
+
{
|
| 1538 |
+
"type": "image",
|
| 1539 |
+
"img_path": "images/9aed662e64999fa33bc977dce045411e55a8c07494983b7be3ebe473921bc6d6.jpg",
|
| 1540 |
+
"image_caption": [
|
| 1541 |
+
"(d) Real world"
|
| 1542 |
+
],
|
| 1543 |
+
"image_footnote": [],
|
| 1544 |
+
"bbox": [
|
| 1545 |
+
718,
|
| 1546 |
+
205,
|
| 1547 |
+
913,
|
| 1548 |
+
351
|
| 1549 |
+
],
|
| 1550 |
+
"page_idx": 7
|
| 1551 |
+
},
|
| 1552 |
+
{
|
| 1553 |
+
"type": "text",
|
| 1554 |
+
"text": "only 5 are completed within 60s. The heterogeneous model, on the other hand, does not rely on behavioral typing and is not impacted by the deployment noises, performing as well as in simulations.",
|
| 1555 |
+
"bbox": [
|
| 1556 |
+
513,
|
| 1557 |
+
577,
|
| 1558 |
+
911,
|
| 1559 |
+
619
|
| 1560 |
+
],
|
| 1561 |
+
"page_idx": 7
|
| 1562 |
+
},
|
| 1563 |
+
{
|
| 1564 |
+
"type": "text",
|
| 1565 |
+
"text": "8 CONCLUSION",
|
| 1566 |
+
"text_level": 1,
|
| 1567 |
+
"bbox": [
|
| 1568 |
+
514,
|
| 1569 |
+
642,
|
| 1570 |
+
661,
|
| 1571 |
+
655
|
| 1572 |
+
],
|
| 1573 |
+
"page_idx": 7
|
| 1574 |
+
},
|
| 1575 |
+
{
|
| 1576 |
+
"type": "text",
|
| 1577 |
+
"text": "In this paper, we introduced a new paradigm for learning heterogeneous policies in MARL. We motivated it with a categorization of techniques that homogeneous models can use to emulate heterogeneous behavior and empirically demonstrated their limits. Finally, we showed the benefits of policy heterogeneity for both performance and resilience on multi-robot tasks in simulation and in the real world. While we do not employ any methods to control the degree of heterogeneity of the agents' policies, we observe that training is already a good heterogeneity regularizer. In other words, if the system has heterogeneous requirements, HetGPPO will be able to learn them, while, if the system benefits from homogeneous policies, HetGPPO will learn the same policy as GPPO (with some loss in sample efficiency). In future work, we are interested in developing mechanisms that measure and actively tune the degree of policy heterogeneity in the team, allowing us to control the trade-offs between sample efficiency (of homogeneous policies) and resilience (of heterogeneous policies).",
|
| 1578 |
+
"bbox": [
|
| 1579 |
+
511,
|
| 1580 |
+
660,
|
| 1581 |
+
913,
|
| 1582 |
+
896
|
| 1583 |
+
],
|
| 1584 |
+
"page_idx": 7
|
| 1585 |
+
},
|
| 1586 |
+
{
|
| 1587 |
+
"type": "text",
|
| 1588 |
+
"text": "ACKNOWLEDGMENTS",
|
| 1589 |
+
"text_level": 1,
|
| 1590 |
+
"bbox": [
|
| 1591 |
+
84,
|
| 1592 |
+
104,
|
| 1593 |
+
279,
|
| 1594 |
+
119
|
| 1595 |
+
],
|
| 1596 |
+
"page_idx": 8
|
| 1597 |
+
},
|
| 1598 |
+
{
|
| 1599 |
+
"type": "text",
|
| 1600 |
+
"text": "This work was supported by ARL DCIST CRA W911NF-17-2-0181, the European Research Council (ERC) Project 949940 (gAIA), and in part by a gift from Arm.",
|
| 1601 |
+
"bbox": [
|
| 1602 |
+
83,
|
| 1603 |
+
125,
|
| 1604 |
+
482,
|
| 1605 |
+
165
|
| 1606 |
+
],
|
| 1607 |
+
"page_idx": 8
|
| 1608 |
+
},
|
| 1609 |
+
{
|
| 1610 |
+
"type": "text",
|
| 1611 |
+
"text": "REFERENCES",
|
| 1612 |
+
"text_level": 1,
|
| 1613 |
+
"bbox": [
|
| 1614 |
+
84,
|
| 1615 |
+
183,
|
| 1616 |
+
200,
|
| 1617 |
+
196
|
| 1618 |
+
],
|
| 1619 |
+
"page_idx": 8
|
| 1620 |
+
},
|
| 1621 |
+
{
|
| 1622 |
+
"type": "list",
|
| 1623 |
+
"sub_type": "ref_text",
|
| 1624 |
+
"list_items": [
|
| 1625 |
+
"[1] Nora Ayanian. 2019. Dart: Diversity-enhanced autonomy in robot teams. The International Journal of Robotics Research 38, 12-13 (2019), 1329-1337.",
|
| 1626 |
+
"[2] Tucker Balch. 2000. Hierarchic social entropy: An information theoretic measure of robot group diversity. Autonomous robots 8, 3 (2000), 209-238.",
|
| 1627 |
+
"[3] Tucker Balch et al. 1997. Learning roles: Behavioral diversity in robot teams. In AAAI Workshop on Multiagent Learning.",
|
| 1628 |
+
"[4] Spring Berman, Adam Halasz, Vijay Kumar, and Stephen Pratt. 2007. Bio-inspired group behaviors for the deployment of a swarm of robots to multiple destinations. In Proceedings 2007 IEEE international conference on robotics and automation. IEEE, 2318-2323.",
|
| 1629 |
+
"[5] Daniel S Bernstein, Robert Givan, Neil Immerman, and Shlomo Zilberstein. 2002. The complexity of decentralized control of Markov decision processes. Mathematics of operations research 27, 4 (2002), 819-840.",
|
| 1630 |
+
"[6] Matteo Bettini, Ryan Kortvelesy, Jan Blumenkamp, and Amanda Prorok. 2022. VMAS: A Vectorized Multi-Agent Simulator for Collective Robot Learning. The 16th International Symposium on Distributed Autonomous Robotic Systems (2022).",
|
| 1631 |
+
"[7] Jan Blumenkamp and Amanda Prorok. 2021. The Emergence of Adversarial Communication in Multi-Agent Reinforcement Learning. In Conference on Robot Learning. PMLR, 1394-1414.",
|
| 1632 |
+
"[8] Elizabeth R Boroson and Nora Ayanian. 2019. 3D keypoint repeatability for heterogeneous multi-robot SLAM. In 2019 International Conference on Robotics and Automation (ICRA). IEEE, 6337-6343.",
|
| 1633 |
+
"[9] Olli Bräysy and Michel Gendreau. 2005. Vehicle routing problem with time windows, Part II: Metaheuristics. Transportation science 39, 1 (2005), 119-139.",
|
| 1634 |
+
"[10] Praneel Chand and Dale A Carnegie. 2013. Mapping and exploration in a hierarchical heterogeneous multi-robot system using limited capability robots. Robotics and autonomous Systems 61, 6 (2013), 565-579.",
|
| 1635 |
+
"[11] Li Chenghao, Tonghan Wang, Chengjie Wu, Qianchuan Zhao, Jun Yang, and Chongjie Zhang. 2021. Celebrating diversity in shared multi-agent reinforcement learning. Advances in Neural Information Processing Systems 34 (2021).",
|
| 1636 |
+
"[12] Filippos Christianos, Georgios Papoudakis, Muhammad A Rahman, and Stefano V Albrecht. 2021. Scaling multi-agent reinforcement learning with selective parameter sharing. In International Conference on Machine Learning. PMLR, 1989-1998.",
|
| 1637 |
+
"[13] Christian Schroeder de Witt, Tarun Gupta, Denys Makoviichuk, Viktor Makoviychuk, Philip HS Torr, Mingfei Sun, and Shimon Whiteson. 2020. Is independent learning all you need in the starcraft multi-agent challenge? arXiv preprint arXiv:2011.09533 (2020).",
|
| 1638 |
+
"[14] Mark Debord, Wolfgang Honig, and Nora Ayanian. 2018. Trajectory planning for heterogeneous robot teams. In 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 7924-7931.",
|
| 1639 |
+
"[15] Ankur Deka and Katia Sycara. 2021. Natural Emergence of Heterogeneous Strategies in Artificially Intelligent Competitive Teams. In Advances in Swarm Intelligence: 12th International Conference. 13-25.",
|
| 1640 |
+
"[16] DJI. Accessed: 2023-01-17. Robomaster S1. https://www.dji.com/robomaster-s1.",
|
| 1641 |
+
"[17] Yousef Emam, Siddharth Mayya, Gennaro Notomista, Addison Bohannon, and Magnus Egerstedt. 2020. Adaptive task allocation for heterogeneous multi-robot teams with evolving and unknown robot capabilities. In 2020 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 7719-7725.",
|
| 1642 |
+
"[18] Yousef Emam, Gennaro Notomista, Paul Glotfelter, and Magnus Egerstedt. 2021. Data-Driven Adaptive Task Allocation for Heterogeneous Multi-Robot Teams Using Robust Control Barrier Functions. In 2021 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 9124–9130.",
|
| 1643 |
+
"[19] Jakob Foerster, Ioannis Alexandros Assael, Nando De Freitas, and Shimon Whiteson. 2016. Learning to communicate with deep multi-agent reinforcement learning. Advances in neural information processing systems 29 (2016).",
|
| 1644 |
+
"[20] Jakob Foerster, Gregory Farquhar, Triantafyllos Afouras, Nantas Nardelli, and Shimon Whiteson. 2018. Counterfactual multi-agent policy gradients. In Proceedings of the AAAI conference on artificial intelligence, Vol. 32.",
|
| 1645 |
+
"[21] Fabian Fuchs, Daniel Worrall, Volker Fischer, and Max Welling. 2020. Se (3)-transformers: 3d roto-translation equivariant attention networks. Advances in Neural Information Processing Systems 33 (2020), 1970–1981.",
|
| 1646 |
+
"[22] Brian P. Gerkey and Maja J. Mataric. 2002. Pusher-watcher: An approach to fault-tolerant tightly-coupled robot coordination. In Proceedings 2002 IEEE International Conference on Robotics and Automation, Vol. 1. IEEE, 464-469.",
|
| 1647 |
+
"[23] Dani Goldberg and Maja J Mataric. 1997. Interference as a tool for designing and evaluating multi-robot controllers. In Aaii/iaai. 637-642.",
|
| 1648 |
+
"[24] Jayesh K Gupta, Maxim Egorov, and Mykel Kochenderfer. 2017. Cooperative multi-agent control using deep reinforcement learning. In International conference on autonomous agents and multiagent systems. Springer, 66-83."
|
| 1649 |
+
],
|
| 1650 |
+
"bbox": [
|
| 1651 |
+
86,
|
| 1652 |
+
199,
|
| 1653 |
+
480,
|
| 1654 |
+
893
|
| 1655 |
+
],
|
| 1656 |
+
"page_idx": 8
|
| 1657 |
+
},
|
| 1658 |
+
{
|
| 1659 |
+
"type": "list",
|
| 1660 |
+
"sub_type": "ref_text",
|
| 1661 |
+
"list_items": [
|
| 1662 |
+
"[25] Natasha Jaques, Angeliki Lazaridou, Edward Hughes, Caglar Gulcehre, Pedro Ortega, DJ Strouse, Joel Z Leibo, and Nando De Freitas. 2019. Social influence as intrinsic motivation for multi-agent deep reinforcement learning. In International Conference on Machine Learning. PMLR, 3040-3049.",
|
| 1663 |
+
"[26] Chanyoung Ju and Hyoung II Son. 2019. Modeling and control of heterogeneous agricultural field robots based on Ramadge-Wonham theory. IEEE Robotics and Automation Letters 5, 1 (2019), 48-55.",
|
| 1664 |
+
"[27] Leslie Pack Kaelbling, Michael L Littman, and Anthony R Cassandra. 1998. Planning and acting in partially observable stochastic domains. Artificial intelligence 101, 1-2 (1998), 99-134.",
|
| 1665 |
+
"[28] Soobum Kim, María Santos, Luis Guerrero-Bonilla, Anthony Yezzi, and Magnus Egerstedt. 2022. Coverage Control of Mobile Robots With Different Maximum Speeds for Time-Sensitive Applications. IEEE Robotics and Automation Letters 7, 2 (2022), 3001-3007.",
|
| 1666 |
+
"[29] Ryan Kortvelesy and Amanda Prorok. 2022. QGNN: Value Function Factorisation with Graph Neural Networks. arXiv preprint arXiv:2205.13005 (2022).",
|
| 1667 |
+
"[30] Karol Kurach, Anton Raichuk, Piotr Stanczyk, Michal Zajac, Olivier Bachem, Lasse Espeholt, Carlos Riquelme, Damien Vincent, Marcin Michalski, Olivier Bousquet, et al. 2020. Google research football: A novel reinforcement learning environment. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 34. 4501-4510.",
|
| 1668 |
+
"[31] Ling Li, Alcherio Martinoli, and Yaser S Abu-Mostafa. 2004. Learning and measuring specialization in collaborative swarm systems. Adaptive Behavior 12, 3-4 (2004), 199-212.",
|
| 1669 |
+
"[32] Eric Liang, Richard Liaw, Robert Nishihara, Philipp Moritz, Roy Fox, Ken Goldberg, Joseph Gonzalez, Michael Jordan, and Ion Stoica. 2018. RLib: Abstractions for distributed reinforcement learning. In International Conference on Machine Learning. PMLR, 3053-3062.",
|
| 1670 |
+
"[33] Ryan Lowe, Yi I Wu, Aviv Tamar, Jean Harb, OpenAI Pieter Abbeel, and Igor Mordatch. 2017. Multi-agent actor-critic for mixed cooperative-competitive environments. Advances in neural information processing systems 30 (2017).",
|
| 1671 |
+
"[34] Matthew Malencia, Sandeep Manjanna, M Ani Hsieh, George Pappas, and Vijay Kumar. 2022. Adaptive Sampling of Latent Phenomena using Heterogeneous Robot Teams (ASLaP-HR). arXiv preprint arXiv:2208.06053 (2022).",
|
| 1672 |
+
"[35] Sandeep Manjanna, Alberto Quattrini Li, Ryan N Smith, Ioannis Rekleitis, and Gregory Dudek. 2018. Heterogeneous multi-robot system for exploration and strategic water sampling. In 2018 IEEE international conference on robotics and automation (ICRA). IEEE, 4873-4880.",
|
| 1673 |
+
"[36] Siddharth Mayya, Diego S D'antonio, David Saldana, and Vijay Kumar. 2021. Resilient task allocation in heterogeneous multi-robot systems. IEEE Robotics and Automation Letters 6, 2 (2021), 1327-1334.",
|
| 1674 |
+
"[37] Nathan Michael, Shaojie Shen, Kartik Mohta, Vijay Kumar, Keiji Nagatani, Yoshito Okada, Seiga Kiribayashi, Kazuki Otake, Kazuya Yoshida, Kazunori Ohno, et al. 2014. Collaborative mapping of an earthquake damaged building via ground and aerial robots. In Field and service robotics. Springer, 33-47.",
|
| 1675 |
+
"[38] John F Nash Jr. 1950. Equilibrium points in n-person games. Proceedings of the national academy of sciences 36, 1 (1950), 48-49.",
|
| 1676 |
+
"[39] Gennaro Notomista, Siddharth Mayya, Yousef Emam, Christopher Kroninger, Addison Bohannon, Seth Hutchinson, and Magnus Egerstedt. 2021. A resilient and energy-aware task allocation framework for heterogeneous multirobot systems. IEEE Transactions on Robotics 38, 1 (2021), 159-179.",
|
| 1677 |
+
"[40] Gennaro Notomista, Siddharth Mayya, Seth Hutchinson, and Magnus Egerstedt. 2019. An optimal task allocation strategy for heterogeneous multi-robot systems. In 2019 18th European Control Conference (ECC), IEEE, 2071-2076.",
|
| 1678 |
+
"[41] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. 2019. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32 (2019).",
|
| 1679 |
+
"[42] Luciano CA Pimenta, Vijay Kumar, Renato C Mesquita, and Guilherme AS Pereira. 2008. Sensing and coverage for a network of heterogeneous robots. In 2008 47th IEEE conference on decision and control. IEEE, 3947-3952.",
|
| 1680 |
+
"[43] Amanda Prorok, M Ani Hsieh, and Vijay Kumar. 2017. The impact of diversity on optimal control policies for heterogeneous robot swarms. IEEE Transactions on Robotics 33, 2 (2017), 346-358.",
|
| 1681 |
+
"[44] Tabish Rashid, Mikayel Samvelyan, Christian Schroeder, Gregory Farquhar, Jakob Foerster, and Shimon Whiteson. 2018. Qmix: Monotonic value function factorisation for deep multi-agent reinforcement learning. In International Conference on Machine Learning, PMLR, 4295-4304.",
|
| 1682 |
+
"[45] Mikayel Samvelyan, Tabish Rashid, Christian Schroeder de Witt, Gregory Farquhar, Nantas Nardelli, Tim GJ Rudner, Chia-Man Hung, Philip HS Torr, Jakob Foerster, and Shimon Whiteson. 2019. The StarCraft Multi-Agent Challenge. In Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems. 2186-2188.",
|
| 1683 |
+
"[46] Maria Santos, Yancy Diaz-Mercado, and Magnus Egerstedt. 2018. Coverage control for multirobot teams with heterogeneous sensing capabilities. IEEE Robotics and Automation Letters 3, 2 (2018), 919-925.",
|
| 1684 |
+
"[47] Miguel Schneider-Fontan and Maja J Mataric. 1998. Territorial multi-robot task division. IEEE Transactions on Robotics and Automation 14, 5 (1998), 815–822."
|
| 1685 |
+
],
|
| 1686 |
+
"bbox": [
|
| 1687 |
+
516,
|
| 1688 |
+
108,
|
| 1689 |
+
911,
|
| 1690 |
+
893
|
| 1691 |
+
],
|
| 1692 |
+
"page_idx": 8
|
| 1693 |
+
},
|
| 1694 |
+
{
|
| 1695 |
+
"type": "list",
|
| 1696 |
+
"sub_type": "ref_text",
|
| 1697 |
+
"list_items": [
|
| 1698 |
+
"[48] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347 (2017).",
|
| 1699 |
+
"[49] Esmaeil Seraj, Zheyuan Wang, Rohan Paleja, Daniel Martin, Matthew Sklar, Anirudh Patel, and Matthew Gombolay. 2022. Learning efficient diverse communication for cooperative heterogeneous teaming. In Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems. 1173-1182.",
|
| 1700 |
+
"[50] Beining Shang, Richard Crowder, and Klaus-Peter Zauner. 2014. Swarm behavioral sorting based on robotic hardware variation. In 2014 4th International Conference On Simulation And Modeling Methodologies, Technologies And Applications (SIMULTECH). IEEE, 631-636.",
|
| 1701 |
+
"[51] Ajay Shankar, Sebastian Elbaum, and Carrick Detweiler. 2021. Freyja: A full multirotor system for agile & precise outdoor flights. In 2021 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 217-223.",
|
| 1702 |
+
"[52] Riccardo Spica, Eric Cristofalo, Zijian Wang, Eduardo Montijano, and Mac Schwager. 2020. A real-time game theoretic planner for autonomous two-player drone racing. IEEE Transactions on Robotics 36, 5 (2020), 1389-1403.",
|
| 1703 |
+
"[53] Sainbayar Sukhbaatar, Rob Fergus, et al. 2016. Learning multiagent communication with backpropagation. Advances in neural information processing systems 29 (2016).",
|
| 1704 |
+
"[54] Justin K Terry, Nathaniel Grammel, Ananth Hari, Luis Santos, and Benjamin Black. 2020. Revisiting parameter sharing in multi-agent deep reinforcement learning. arXiv preprint arXiv:2005.13625 (2020).",
|
| 1705 |
+
"[55] Ceyer Wakilpoor, Patrick J Martin, Carrie Rebuhn, and Amanda Vu. 2020. Heterogeneous multi-agent reinforcement learning for unknown environment mapping. arXiv preprint arXiv:2010.02663 (2020).",
|
| 1706 |
+
"[56] Mingyu Wang, Zijian Wang, John Talbot, J Christian Gerdes, and Mac Schwager. 2021. Game-theoretic planning for self-driving cars in multivehicle competitive scenarios. IEEE Transactions on Robotics 37, 4 (2021), 1313-1325.",
|
| 1707 |
+
"[57] Tonghan Wang, Heng Dong, Victor Lesser, and Chongjie Zhang. 2020. ROMA: Multi-Agent Reinforcement Learning with Emergent Roles. In International Conference on Machine Learning. PMLR, 9876-9886.",
|
| 1708 |
+
"[58] T Wang, T Gupta, B Peng, A Mahajan, S Whiteson, and C Zhang. 2021. RODE: learning roles to decompose multi-agent tasks. In Proceedings of the International Conference on Learning Representations.",
|
| 1709 |
+
"[59] Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2018. How Powerful are Graph Neural Networks?. In International Conference on Learning Representations.",
|
| 1710 |
+
"[60] Javier Yu, Joseph A Vincent, and Mac Schwager. 2022. DiNNO: Distributed Neural Network Optimization for Multi-Robot Collaborative Learning. IEEE Robotics and Automation Letters 7, 2 (2022), 1896-1903.",
|
| 1711 |
+
"[61] Kaiqing Zhang, Zhuoran Yang, and Tamer Başar. 2021. Multi-agent reinforcement learning: A selective overview of theories and algorithms. Handbook of Reinforcement Learning and Control (2021), 321-384."
|
| 1712 |
+
],
|
| 1713 |
+
"bbox": [
|
| 1714 |
+
84,
|
| 1715 |
+
108,
|
| 1716 |
+
483,
|
| 1717 |
+
561
|
| 1718 |
+
],
|
| 1719 |
+
"page_idx": 9
|
| 1720 |
+
},
|
| 1721 |
+
{
|
| 1722 |
+
"type": "text",
|
| 1723 |
+
"text": "A EXPERIMENTAL SETUP",
|
| 1724 |
+
"text_level": 1,
|
| 1725 |
+
"bbox": [
|
| 1726 |
+
83,
|
| 1727 |
+
579,
|
| 1728 |
+
318,
|
| 1729 |
+
593
|
| 1730 |
+
],
|
| 1731 |
+
"page_idx": 9
|
| 1732 |
+
},
|
| 1733 |
+
{
|
| 1734 |
+
"type": "text",
|
| 1735 |
+
"text": "A.1 Simulation",
|
| 1736 |
+
"text_level": 1,
|
| 1737 |
+
"bbox": [
|
| 1738 |
+
83,
|
| 1739 |
+
599,
|
| 1740 |
+
220,
|
| 1741 |
+
613
|
| 1742 |
+
],
|
| 1743 |
+
"page_idx": 9
|
| 1744 |
+
},
|
| 1745 |
+
{
|
| 1746 |
+
"type": "text",
|
| 1747 |
+
"text": "We attach all the code used for simulations and training. Simulations are performed in the VMAS [6] simulator. All environments are customly created apart from Scenario B which is adapted from one of the scenarios already available in the simulator. The training is performed in RLlib [32] using PyTorch [41] and an implementation of the PPO algorithm for multi-agent training. The general training parameters used are shown in Tab. 2. Small variations of these are done on a per-environment basis and can be seen in the training scripts attached. The GPPO and HetGPPO model implementations and details are available in the code. Training is performed on a NVIDIA GeForce RTX 2080 Ti GPU. Each worker collects experience from the simulator using an Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz.",
|
| 1748 |
+
"bbox": [
|
| 1749 |
+
81,
|
| 1750 |
+
617,
|
| 1751 |
+
482,
|
| 1752 |
+
796
|
| 1753 |
+
],
|
| 1754 |
+
"page_idx": 9
|
| 1755 |
+
},
|
| 1756 |
+
{
|
| 1757 |
+
"type": "text",
|
| 1758 |
+
"text": "A.2 Real-world",
|
| 1759 |
+
"text_level": 1,
|
| 1760 |
+
"bbox": [
|
| 1761 |
+
83,
|
| 1762 |
+
810,
|
| 1763 |
+
222,
|
| 1764 |
+
824
|
| 1765 |
+
],
|
| 1766 |
+
"page_idx": 9
|
| 1767 |
+
},
|
| 1768 |
+
{
|
| 1769 |
+
"type": "text",
|
| 1770 |
+
"text": "Real-world experiments are performed using an Optitrack motion capture system with 12 cameras to provide positional information to the robots. The robots used are holonomic RoboMaster",
|
| 1771 |
+
"bbox": [
|
| 1772 |
+
81,
|
| 1773 |
+
828,
|
| 1774 |
+
482,
|
| 1775 |
+
869
|
| 1776 |
+
],
|
| 1777 |
+
"page_idx": 9
|
| 1778 |
+
},
|
| 1779 |
+
{
|
| 1780 |
+
"type": "table",
|
| 1781 |
+
"img_path": "images/078a5c3e8d9f85e8bf2c355af89ece3eee4f61e88c18e58c01474454332a69ad.jpg",
|
| 1782 |
+
"table_caption": [
|
| 1783 |
+
"Table 2: PPO training parameters."
|
| 1784 |
+
],
|
| 1785 |
+
"table_footnote": [],
|
| 1786 |
+
"table_body": "<table><tr><td colspan=\"2\">Training</td><td colspan=\"2\">PPO</td></tr><tr><td>Batch size</td><td>60000</td><td>ε</td><td>0.2</td></tr><tr><td>Minibatch size</td><td>4096</td><td>γ</td><td>0.99</td></tr><tr><td>SDG Iterations</td><td>40</td><td>λ</td><td>0.9</td></tr><tr><td># Workers</td><td>5</td><td>Entropy coeff</td><td>0</td></tr><tr><td># Envs per worker</td><td>50</td><td>KL coeff</td><td>0.01</td></tr><tr><td>Learning rate</td><td>5e-5</td><td>KL target</td><td>0.01</td></tr></table>",
|
| 1787 |
+
"bbox": [
|
| 1788 |
+
555,
|
| 1789 |
+
132,
|
| 1790 |
+
874,
|
| 1791 |
+
234
|
| 1792 |
+
],
|
| 1793 |
+
"page_idx": 9
|
| 1794 |
+
},
|
| 1795 |
+
{
|
| 1796 |
+
"type": "text",
|
| 1797 |
+
"text": "S1 ground robots<sup>8</sup>, running a customized model-based controller onboard [51].",
|
| 1798 |
+
"bbox": [
|
| 1799 |
+
513,
|
| 1800 |
+
253,
|
| 1801 |
+
911,
|
| 1802 |
+
282
|
| 1803 |
+
],
|
| 1804 |
+
"page_idx": 9
|
| 1805 |
+
},
|
| 1806 |
+
{
|
| 1807 |
+
"type": "text",
|
| 1808 |
+
"text": "B SCENARIO B REWARD STRUCTURE",
|
| 1809 |
+
"text_level": 1,
|
| 1810 |
+
"bbox": [
|
| 1811 |
+
514,
|
| 1812 |
+
292,
|
| 1813 |
+
852,
|
| 1814 |
+
306
|
| 1815 |
+
],
|
| 1816 |
+
"page_idx": 9
|
| 1817 |
+
},
|
| 1818 |
+
{
|
| 1819 |
+
"type": "text",
|
| 1820 |
+
"text": "The reward used to train Scenario B is comprised of two components: a positional reward and a collision reward.",
|
| 1821 |
+
"bbox": [
|
| 1822 |
+
513,
|
| 1823 |
+
311,
|
| 1824 |
+
913,
|
| 1825 |
+
339
|
| 1826 |
+
],
|
| 1827 |
+
"page_idx": 9
|
| 1828 |
+
},
|
| 1829 |
+
{
|
| 1830 |
+
"type": "text",
|
| 1831 |
+
"text": "The positional reward is proportional to the time delta in relative distance of an agent from its goal. In other words, a positive reward is assigned if an agent moves towards its goal and a negative one if it moves away. The agents receive a shared positional reward equal to the sum of their individual positional rewards. When both agents are placed on their goal, they keep receiving an additional final reward. The episode ends after 500 timesteps.",
|
| 1832 |
+
"bbox": [
|
| 1833 |
+
513,
|
| 1834 |
+
340,
|
| 1835 |
+
911,
|
| 1836 |
+
436
|
| 1837 |
+
],
|
| 1838 |
+
"page_idx": 9
|
| 1839 |
+
},
|
| 1840 |
+
{
|
| 1841 |
+
"type": "text",
|
| 1842 |
+
"text": "The collision reward is a constant penalty assigned to each agent in the presence of collisions. When training starts, the only collisions penalized are inter-agent ones. A curriculum is set up throughout training so that, when the agents' positional reward gets high enough to symbolize that they solved the task, collisions at the recesses start being penalized as well. This is done so that the agents are able to first learn to solve the task and can then fine-tune their performance by removing collisions.",
|
| 1843 |
+
"bbox": [
|
| 1844 |
+
513,
|
| 1845 |
+
436,
|
| 1846 |
+
913,
|
| 1847 |
+
547
|
| 1848 |
+
],
|
| 1849 |
+
"page_idx": 9
|
| 1850 |
+
},
|
| 1851 |
+
{
|
| 1852 |
+
"type": "text",
|
| 1853 |
+
"text": "C SIM TO REAL TRANSFER",
|
| 1854 |
+
"text_level": 1,
|
| 1855 |
+
"bbox": [
|
| 1856 |
+
514,
|
| 1857 |
+
559,
|
| 1858 |
+
759,
|
| 1859 |
+
573
|
| 1860 |
+
],
|
| 1861 |
+
"page_idx": 9
|
| 1862 |
+
},
|
| 1863 |
+
{
|
| 1864 |
+
"type": "text",
|
| 1865 |
+
"text": "To deploy policies trained in the VMAS simulator to the real world, we iteratively tune some simulation hyperparameters to fit the real-world conditions. These parameters are dependent just on the robots and their interaction with the real-world. Once tuned, they can be used for any training scenario.",
|
| 1866 |
+
"bbox": [
|
| 1867 |
+
511,
|
| 1868 |
+
577,
|
| 1869 |
+
911,
|
| 1870 |
+
647
|
| 1871 |
+
],
|
| 1872 |
+
"page_idx": 9
|
| 1873 |
+
},
|
| 1874 |
+
{
|
| 1875 |
+
"type": "text",
|
| 1876 |
+
"text": "The parameters that were key to a successfully deployment are linear friction and drag. Since we operate with ground robots at relatively low speeds, we set drag to 0 and tune linear friction. Through 3 real to sim iterations of binary search we were able to find the correct friction value for our robot-ground pair. Together with friction, we tuned the maximum acceleration in simulation to fit the real robot one. These parameters were tested and validated on simple single-robot tasks such as trajectory following and moving to a goal position.",
|
| 1877 |
+
"bbox": [
|
| 1878 |
+
511,
|
| 1879 |
+
647,
|
| 1880 |
+
913,
|
| 1881 |
+
772
|
| 1882 |
+
],
|
| 1883 |
+
"page_idx": 9
|
| 1884 |
+
},
|
| 1885 |
+
{
|
| 1886 |
+
"type": "page_footnote",
|
| 1887 |
+
"text": "<sup>7</sup>https://optitrack.com/",
|
| 1888 |
+
"bbox": [
|
| 1889 |
+
84,
|
| 1890 |
+
882,
|
| 1891 |
+
194,
|
| 1892 |
+
895
|
| 1893 |
+
],
|
| 1894 |
+
"page_idx": 9
|
| 1895 |
+
},
|
| 1896 |
+
{
|
| 1897 |
+
"type": "page_footnote",
|
| 1898 |
+
"text": "$^{8}$ https://www.dji.com/uk/robomaster-s1",
|
| 1899 |
+
"bbox": [
|
| 1900 |
+
514,
|
| 1901 |
+
882,
|
| 1902 |
+
705,
|
| 1903 |
+
895
|
| 1904 |
+
],
|
| 1905 |
+
"page_idx": 9
|
| 1906 |
+
}
|
| 1907 |
+
]
|
2301.07xxx/2301.07137/fe0279f1-2ecb-4373-83b4-236be8ac16ba_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07137/fe0279f1-2ecb-4373-83b4-236be8ac16ba_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2a422d04fb7c89c153da3ab6be7056fdd51a741bb0b39080dc9a047a6f6ea78
|
| 3 |
+
size 6574918
|
2301.07xxx/2301.07137/full.md
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Heterogeneous Multi-Robot Reinforcement Learning
|
| 2 |
+
|
| 3 |
+
Matteo Bettini
|
| 4 |
+
|
| 5 |
+
University of Cambridge
|
| 6 |
+
|
| 7 |
+
Cambridge, United Kingdom
|
| 8 |
+
|
| 9 |
+
mb2389@cl.cam.ac.uk
|
| 10 |
+
|
| 11 |
+
Ajay Shankar
|
| 12 |
+
|
| 13 |
+
University of Cambridge
|
| 14 |
+
|
| 15 |
+
Cambridge, United Kingdom
|
| 16 |
+
|
| 17 |
+
as3233@cl.cam.ac.uk
|
| 18 |
+
|
| 19 |
+
Amanda Prorok
|
| 20 |
+
|
| 21 |
+
University of Cambridge
|
| 22 |
+
|
| 23 |
+
Cambridge, United Kingdom
|
| 24 |
+
|
| 25 |
+
asp45@cl.cam.ac.uk
|
| 26 |
+
|
| 27 |
+
# ABSTRACT
|
| 28 |
+
|
| 29 |
+
Cooperative multi-robot tasks can benefit from heterogeneity in the robots' physical and behavioral traits. In spite of this, traditional Multi-Agent Reinforcement Learning (MARL) frameworks lack the ability to explicitly accommodate policy heterogeneity, and typically constrain agents to share neural network parameters. This enforced homogeneity limits application in cases where the tasks benefit from heterogeneous behaviors. In this paper, we crystallize the role of heterogeneity in MARL policies. Towards this end, we introduce Heterogeneous Graph Neural Network Proximal Policy Optimization (HetGPPO), a paradigm for training heterogeneous MARL policies that leverages a Graph Neural Network for differentiable inter-agent communication. HetGPPO allows communicating agents to learn heterogeneous behaviors while enabling fully decentralized training in partially observable environments. We complement this with a taxonomical overview that exposes more heterogeneity classes than previously identified. To motivate the need for our model, we present a characterization of techniques that homogeneous models can leverage to emulate heterogeneous behavior, and show how this "apparent heterogeneity" is brittle in real-world conditions. Through simulations and real-world experiments, we show that: (i) when homogeneous methods fail due to strong heterogeneous requirements, HetGPPO succeeds, and, (ii) when homogeneous methods are able to learn apparently heterogeneous behaviors, HetGPPO achieves higher resilience to both training and deployment noise.
|
| 30 |
+
|
| 31 |
+
# KEYWORDS
|
| 32 |
+
|
| 33 |
+
Heterogeneity, Multi-agent reinforcement learning, Multi-robot systems
|
| 34 |
+
|
| 35 |
+
# 1 INTRODUCTION
|
| 36 |
+
|
| 37 |
+
Multi-robot systems deployed to tackle complex cooperative tasks can often benefit from heterogeneous physical and/or behavioral traits to fulfill their mission. Such heterogeneous systems have been leveraged in applications such as disaster response [37], collaborative mapping [8], agriculture [26], and package transport [22]. However, synthesizing optimal decentralized policies for these tasks can be computationally hard, and typically scales exponentially with the number of agents [5]. While faster and scalable solutions exist, such as metaheuristics [9], they lack in optimality. Multi-Agent Reinforcement Learning (MARL) [61] can be used as a scalable approach to find near-optimal solutions to these problems. However, MARL algorithms without inter-agent communication cannot be easily applied to real-world robotic problems, where partial observability of individual agents is pervasive. Communication is key to overcoming this partial observability, and to enable cooperation.
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
Figure 1: Taxonomy of heterogeneous multi-robot/agent systems. Top: the three heterogeneity classes $(\mathcal{P},\mathcal{B}_s,\mathcal{B}_d)$ . Bottom: the five mutually exclusive heterogeneity subclasses. Every heterogeneous system belongs to one of these subclasses.
|
| 41 |
+
|
| 42 |
+
Our work deals with heterogeneous multi-robot reinforcement learning, a paradigm located at the boundary of MARL (with inter-agent communication) and heterogeneous multi-robot systems.
|
| 43 |
+
|
| 44 |
+
Most cooperative MARL works constrain agents to share policy neural network parameters to improve training sample efficiency [24, 44, 53]. This causes the agents' models to be identical and, thus, homogeneous. While this is beneficial to speed-up training, it can prevent learning in scenarios that require heterogeneous behavior. A classical method of overcoming this imposed homogeneity is to include a unique integer (e.g., the agent's index) as part of each agent's observations [19, 24]. This allows the agents to share the same policy while exhibiting apparently different behavior. Despite its wide adoption, this solution has many drawbacks [12].
|
| 45 |
+
|
| 46 |
+
We are interested in learning truly heterogeneous decentralized MARL policies. While it is common practice to learn heterogeneous policies when optimizing for different objectives [33], there is a dearth of work in applying this paradigm to scenarios where the objective is shared. Current solutions are few and tailored to specific tasks, and, as such, do not address the broader study and categorization of heterogeneity in MARL. Furthermore, they are limited to noise-free videogame-like MARL benchmarks [30, 45], without considering real-world multi-robot tasks with inter-agent communication. Therefore, we need a framework that enables true heterogeneity among communicating MARL agents and can learn policies that run in a decentralized fashion for (real-world) heterogeneous multi-robot systems.
|
| 47 |
+
|
| 48 |
+
In this work, we introduce Heterogeneous Graph Neural Network Proximal Policy Optimization (HetGPPO), a paradigm for heterogeneous MARL that overcomes the aforementioned issues. HetGPPO is a framework for training heterogeneous MARL policies
|
| 49 |
+
|
| 50 |
+
that leverages a Graph Neural Network (GNN) for differentiable inter-agent communication. Our architecture enables learning for heterogeneous agents while being conditioned only on local communication and local observations. This enables to train HetGPPO in a decentralized fashion, in-line with the Decentralized Training Decentralized Execution (DTDE) paradigm [25].
|
| 51 |
+
|
| 52 |
+
We begin by presenting a taxonomy of heterogeneous systems in Sec. 2. The purpose of this taxonomy is to classify such systems according to the source of their heterogeneity. We use this taxonomy in Sec. 3 to categorize related works in the domains of multi-robot systems and MARL. Sec. 4 formulates the MARL problem tackled in this paper. In Sec. 5, we introduce HetGPPO and its homogeneous counterpart GPPO. To motivate the need for policy heterogeneity, we distill and define the techniques that homogeneous models use to emulate heterogeneous behavior (Sec. 6). Through example scenarios, we demonstrate how these technique work and how they can prove brittle in real-world conditions when compared to truly heterogeneous models. Finally, in Sec. 7, we present evaluations of our framework both in simulated and real-world multi-robot cooperative scenarios. These show that: (i) when homogeneous methods fail due to strong heterogeneous requirements, HetGPPO succeeds, and, (ii) when homogeneous methods are able to learn apparently heterogeneous behaviors, HetGPPO achieves higher resilience to both training and deployment noise. Furthermore, our real-robot experiments demonstrate how heterogeneous policies are intrinsically more resilient to real-world conditions.
|
| 53 |
+
|
| 54 |
+
In this paper, we demonstrate the power of heterogeneous MARL applied to real-world multi-robot systems. We claim the following key contributions:
|
| 55 |
+
|
| 56 |
+
(1) A taxonomy of heterogeneous systems that jointly categorizes research in the multi-robot and multi-agent domains;
|
| 57 |
+
(2) A discourse on behavioral typing techniques that homogeneous models rely on to emulate heterogeneous behavior, with empirical evidence for their brittleness in deployment;
|
| 58 |
+
(3) HetGPPO, a MARL model able to learn heterogeneous communicating policies in a decentralized fashion; and,
|
| 59 |
+
(4) Detailed evaluations of the performance and resilience of heterogeneous policies compared to homogeneous ones in several cooperative multi-robot tasks, both through simulations and real-world experiments.
|
| 60 |
+
|
| 61 |
+
# 2 TAXONOMY OF HETEROGENEOUS SYSTEMS
|
| 62 |
+
|
| 63 |
+
In spite of a substantial body of work attempting to stimulate research on heterogeneous systems (see [1] and the references therein), the robotics and learning community still lacks a shared and structured taxonomy of heterogeneous systems. To properly characterize the related works in the heterogeneity (diversity) domain, we introduce a taxonomy of heterogeneous systems, shown in Fig. 1. According to our taxonomy, system heterogeneity is categorized in two classes: Physical $(\mathcal{P})$ and Behavioral $(\mathcal{B})$ .
|
| 64 |
+
|
| 65 |
+
A team is considered physically $(\mathcal{P})$ heterogeneous when at least one of its components (i.e., agents, robots) differs from the others in terms of hardware or physical constraints. That is, it might have different sensors, actuators, motion constraints, etc. These physical differences might lead to different capabilities. For example, a small
|
| 66 |
+
|
| 67 |
+
drone might be able to fly and move aggressively, but likely has shorter battery life than a big and slow ground robot. This type of heterogeneity can lead to different observation and action spaces in the context of learning, for example when robots are equipped with different sensors or actuators.
|
| 68 |
+
|
| 69 |
+
A team is considered behaviorally $(\mathcal{B})$ heterogeneous when at least one of its components differs from the others in terms of software or behavioral model. That is, two behaviorally heterogeneous agents can produce distinct policy outputs when observing the same input. For example, two physically identical drones might cooperate to monitor a site: here, one drone can survey from far away and direct the other to areas that need closer inspection. Behavioral heterogeneity is divided in two: Same objective $(\mathcal{B}_s)$ and Different objective $(\mathcal{B}_d)$ . In $\mathcal{B}_s$ heterogeneous systems, agents optimize the same objective function through heterogeneous behavior. In MARL, this means that they share the same (global or local) reward function. $\mathcal{B}_s$ heterogeneous systems usually represent cooperative settings [11]. However, they could also model adversarial scenarios where agents with the same objective compete for limited resources [7]. In $\mathcal{B}_d$ heterogeneous systems, agents optimize different objective functions through heterogeneous behavior. In MARL, this means that they have different local reward functions or a global reward deriving from the composition of such local functions. $\mathcal{B}_d$ heterogeneous systems usually represent non-cooperative or adversarial settings [33]. However, they could also model cooperative scenarios where agents optimize different sub-functions for a higher-order task [12]. For example, in cooperative search and rescue scenarios, one robot might only be tasked to remove debris, while the others are tasked with the search in an uncluttered space.
|
| 70 |
+
|
| 71 |
+
Physical and behavioral heterogeneity are not mutually exclusive. Thus, the three heterogeneity classes introduced $(\mathcal{P},\mathcal{B}_s,\mathcal{B}_d)$ delineate five heterogeneity subclasses that a system can belong to:
|
| 72 |
+
|
| 73 |
+
- $\mathcal{P} \setminus \mathcal{B}$ : Agents are physically different but share the same behavioral model.
|
| 74 |
+
- $\mathcal{P} \cap \mathcal{B}_d$ : Agents are physically different and differ in behavioral models and objectives.
|
| 75 |
+
- $\mathcal{P} \cap \mathcal{B}_s$ : Agents are physically different and differ in behavioral models, but share the same objective.
|
| 76 |
+
- $\mathcal{B}_s\setminus \mathcal{P}$ : Agents are physically identical and share the same objective but differ in behavioral models.
|
| 77 |
+
- $\mathcal{B}_d\setminus \mathcal{P}$ : Agents are physically identical but differ in behavioral models and objectives.
|
| 78 |
+
|
| 79 |
+
While this taxonomy is concerned with classifying heterogeneous systems, it does not attempt to measure the degree of heterogeneity. Furthermore, it represents a high-level classification and does not consider dynamic $\mathcal{P}$ heterogeneity, such as different battery levels or hardware deterioration [50].
|
| 80 |
+
|
| 81 |
+
# 3 RELATED WORK
|
| 82 |
+
|
| 83 |
+
In this section, we review the current state of the art in the area of heterogeneous multi-robot/agent systems. We classify the related works according to our taxonomy in Tab. 1.
|
| 84 |
+
|
| 85 |
+
# 3.1 Heterogeneity in multi-robot systems
|
| 86 |
+
|
| 87 |
+
The core literature on heterogeneous robotics has generally focused on developing coordination algorithms that leverage the
|
| 88 |
+
|
| 89 |
+
Table 1: Related work in heterogeneous multi-robot/agent systems classified according to our taxonomy of Sec. 2.
|
| 90 |
+
|
| 91 |
+
<table><tr><td>Heterogeneity class</td><td>Multi-robot systems</td><td>MARL</td></tr><tr><td>P\B</td><td>[8]</td><td>[55],[54]</td></tr><tr><td>P∩Bd</td><td>[37]</td><td>[33],[12]</td></tr><tr><td>P∩Bs</td><td>[17],[40],[18],[36],[39],[43],[42],[46],[28],[34],[35],[10],[14]</td><td>[49]</td></tr><tr><td>Bs\P</td><td>[1],[4],[2],[3],[31],[52],[56]</td><td>[57],[11],[58]</td></tr><tr><td>Bd\P</td><td>[23],[47]</td><td>[33],[12]</td></tr></table>
|
| 92 |
+
|
| 93 |
+
physical heterogeneity of a team to their advantage. Therefore, these works fall in the $\mathcal{P} \cap \mathcal{B}_s$ class. Such diversity can manifest itself in the form of different sensor ranges [42], diverse sensing capabilities [46], or different maximum speeds [28]. These differences can then be exploited in a variety of problems such as multi-robot coverage [28, 42, 46] and heterogeneous task assignment [40, 43], with resilient formulations that can handle uncertainties in robot capabilities [17] or the environment [18, 36, 39]. Sensor heterogeneity has also received attention in the context of active sampling and mapping [34, 35], where heterogeneous computational resources can impact task execution [10]. Lastly, $\mathcal{P} \cap \mathcal{B}_s$ diversity has also been investigated in more complex problems such as heterogeneous trajectory planning [14].
|
| 94 |
+
|
| 95 |
+
Interestingly, such physical diversity without behavioral diversity $(\mathcal{P} \setminus \mathcal{B})$ can often represent a constraint for the problem. Works in this heterogeneity class try to behaviorally reconcile the physical heterogeneity of robots in order to apply homogeneous solutions to the problem at hand. Heterogeneous multi-robot SLAM is an example application where scans coming from different robots, equipped with diverse sensors, need to be matched in order to build a homogeneous shared map [8].
|
| 96 |
+
|
| 97 |
+
Behavioral heterogeneity for physically identical robots is a less explored but promising research direction [1]. Works in this area mostly tackle cooperative problems, leveraging $\mathcal{B}_s \setminus \mathcal{P}$ heterogeneity. Early research by Balch [2, 3] and Li et al. [31] focuses on learning behavioral specialization for multi-robot teams using RL. Game-theoretic autonomous racing [52, 56] constitutes an adversarial setting of $\mathcal{B}_s \setminus \mathcal{P}$ heterogeneity. Note that game-theoretic controllers do not present heterogeneous behavior when all players use the symmetric Nash equilibrium strategy [38]. However, heterogeneity emerges when some robots in the team use traditional model predictive controllers.
|
| 98 |
+
|
| 99 |
+
Conversely, heterogeneous behavior with different objectives $(\mathcal{B}_d)$ has also been analyzed for cooperative robotic tasks, for instance, by dividing a global task into sub-tasks for groups of identical robots $(\mathcal{B}_d \setminus \mathcal{P})$ [23, 47]. When the robots additionally have physical differences between sub-groups, these differences can be leveraged to tackle complex multi-robot tasks, such as post-disaster collaborative mapping [37], resulting in $\mathcal{P} \cap \mathcal{B}_d$ heterogeneity.
|
| 100 |
+
|
| 101 |
+
All the works discussed in this subsection focus on a given heterogeneity class and problem, and develop a targeted solution for
|
| 102 |
+
|
| 103 |
+
that setting. To a large extent, the approaches leverage conventional control theoretical methods. Our work, in contrast, proposes a learning-based framework to synthesize communicating multiagent/robot policies, and can be applied to any heterogeneity class.
|
| 104 |
+
|
| 105 |
+
# 3.2 Heterogeneity in MARL
|
| 106 |
+
|
| 107 |
+
MARL has recently gained increasing traction as an effective technique to tackle multi-robot problems [61]. Using MARL, it is possible to synthesize efficient decentralized multi-agent controllers for hard coordination problems [5]. Homogeneous policies (that share parameters) for physically identical agents are abundant in MARL [20, 24, 29, 44, 53] and constitute the core of the research literature. In an attempt to emulate heterogeneous behavior, a common practice is to augment each agent's observation space with a unique index that represents the agent's type [19, 24]. In this case, agents share the same homogeneous multimodal policy, conditioned on a unique constant index. We define and discuss in depth the limitations of this approach in Sec. 6. $\mathcal{P} \setminus \mathcal{B}$ heterogeneity in MARL focuses on leveraging the power of parameter sharing and homogeneous training for physically different agents. This is achieved by mapping heterogeneous observation spaces into homogeneous fixed-length encodings [55], or by padding and including the agent index into observations [54].
|
| 108 |
+
|
| 109 |
+
The majority of heterogeneous MARL literature falls in the $\mathcal{B}$ heterogeneity class. Different behavioral roles for physically identical agents can be learned through various techniques, such as conditioning agents' policies on a latent representation [57], decomposing and clustering action spaces [58], or by an intrinsic reward that maximizes the mutual information between the agent's trajectory and its role [11]. All the aforementioned works consider physically identical agents with the same objective, thus leveraging $\mathcal{B}_s \setminus \mathcal{P}$ heterogeneity. Furthermore, they do not use inter-agent communication, and hence their application to highly partially observable coordination problems is limited. When considering physically different robots, heterogeneous action or observation spaces have to be taken into account. Such $\mathcal{P} \cap \mathcal{B}_s$ heterogeneity with communicating agents can be modeled, for instance, by an ad-hoc GNN layer for each physically different robot type [49]. While this may be suitable for some tasks where robot types are known beforehand, it prevents physically identical agents from learning heterogeneous behavior.
|
| 110 |
+
|
| 111 |
+
Behavioral heterogeneity with different objectives $(\mathcal{B}_d)$ emerges due to different agent reward functions, as discussed in Sec. 2. MADDPG [33] uses this paradigm in a centralized training approach to learn individual (not shared) actors and critics. They test their approach in mixed cooperative-competitive tasks. In these tasks, both physically identical and physically different agents (i.e., different maximum speeds) are considered. Thus, MADDPG leverages heterogeneity classes $\mathcal{B}_d \setminus \mathcal{P}$ and $\mathcal{P} \cap \mathcal{B}_d$ . The same heterogeneity classes are studied in [12], which proposes to use parameter sharing among sub-groups of agents which are physically identical and share the same reward function. This approach, however, prevents physically identical agents with the same objective to employ different behavioral roles to solve a task.
|
| 112 |
+
|
| 113 |
+
Most works discussed in this section propose solutions to problems that sit exclusively within one given heterogeneity subclass.
|
| 114 |
+
|
| 115 |
+
While a selected few could be applied to multiple classes [11, 33, 57], they leverage centralized training methods and do not consider inter-agent communication. These are two key features needed to make MARL suitable for multi-robot problems.
|
| 116 |
+
|
| 117 |
+
# 4 PROBLEM FORMULATION
|
| 118 |
+
|
| 119 |
+
We now formulate the multi-robot MARL problem tackled in this work. To do so, we first introduce the multi-agent extension of a Partially Observable Markov Decision Process (POMDP) [27].
|
| 120 |
+
|
| 121 |
+
Partially Observable Markov Games. A Partially Observable Markov Game (POMG) is defined as a tuple
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\left\langle \mathcal {V}, S, \mathcal {O}, \{\sigma_ {i} \} _ {i \in \mathcal {V}}, \mathcal {A}, \{\mathcal {R} _ {i} \} _ {i \in \mathcal {V}}, \mathcal {T}, \gamma \right\rangle ,
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $\mathcal{V} = \{1, \dots, n\}$ denotes the set of agents, $S$ is the state space, shared by all agents, and, $O \equiv O_1 \times \ldots \times O_n$ and $\mathcal{A} \equiv \mathcal{A}_1 \times \ldots \times \mathcal{A}_n$ are the observation and action spaces, with $O_i \subseteq S$ , $\forall i \in \mathcal{V}$ . Further, $\{\sigma_i\}_{i \in \mathcal{V}}$ and $\{\mathcal{R}_i\}_{i \in \mathcal{V}}$ are the agent observation and reward functions<sup>1</sup>, such that $\sigma_i : S \mapsto O_i$ , and, $\mathcal{R}_i : S \times \mathcal{A} \times S \mapsto \mathbb{R}$ . $\mathcal{T}$ is the stochastic state transition model, defined as $\mathcal{T} : S \times \mathcal{A} \times S \mapsto [0, 1]$ . Lastly, $\gamma$ is the discount factor.
|
| 128 |
+
|
| 129 |
+
We structure the agents in a communication graph $\mathcal{G} = (\mathcal{V},\mathcal{E})$ . Nodes $i\in \mathcal{V}$ represent agents and edges $e_{ij}\in \mathcal{E}$ represent communication links. The set of edges is dependent of the maximum agent communication range and changes over time. The communication neighborhood of each agent is defined as $\mathcal{N}_i\equiv \{v_j\mid e_{ij}\in \mathcal{E}\}$ .
|
| 130 |
+
|
| 131 |
+
At each timestep $t$ , each agent $i$ gets an observation $o_i^t = \sigma_i(s^t) \in O_i$ that is a portion of the global state $s^t \in S$ . This is communicated to the neighboring agents $\mathcal{N}_i^t$ . A stochastic policy $\pi_i$ uses this information to compute an action $a_i^t \sim \pi_i(\cdot | o_{\mathcal{N}_i}^t)$ . The agents' actions $\mathbf{a}^t = (a_1^t, \dots, a_n^t) \in \mathcal{A}$ , along with the current state $s^t$ , are then used in the transition model to obtain the next state $s^{t+1} \sim \mathcal{T}(\cdot | s^t, \mathbf{a}^t)$ . A reward $r_i^t = \mathcal{R}_i(s^t, \mathbf{a}^t, s^{t+1})$ is then fed to agent $i$ .
|
| 132 |
+
|
| 133 |
+
The goal of each agent is to maximize the sum of discounted rewards $v_{i}^{t} = \sum_{k=0}^{T} \gamma^{k} r_{i}^{t+k}$ over an episode with horizon $T$ , potentially infinite. $v_{i}^{t}$ is called the return. Each agent has a value function $V_{i}(o_{\mathcal{N}_{i}}) = \mathbb{E}_{\pi_{i}} \left[ v_{i}^{t} \Big| o_{\mathcal{N}_{i}}^{t} = o_{\mathcal{N}_{i}} \right]$ , which represents the expected return starting from observations $o_{\mathcal{N}_{i}}$ and following policy $\pi_{i}$ . This function estimates the "goodness" of an observation. In this work, we use the Proximal Policy Optimization (PPO) actor-critic algorithm [48], which approximates the policy (actor) and the value function (critic) using neural networks and a constrained policy gradient update.
|
| 134 |
+
|
| 135 |
+
Problem. Learn heterogeneous policies $\pi_i(o_{\mathcal{N}_i}^t;\theta_i)$ and critics $V_{i}(o_{\mathcal{N}_{i}};\theta_{i})$ conditioned on the neural network parameters $\theta_{i}$ , different for each agent. The observations $o_{\mathcal{N}_i}^t$ from the agent's neighborhood $\mathcal{N}_i$ are obtained through a differentiable communication channel, making learning inherently decentralizable.
|
| 136 |
+
|
| 137 |
+
Our objective is to crystallize the role of heterogeneity in MARL policies. Towards this end, we develop a model that addresses
|
| 138 |
+
|
| 139 |
+
the problem description above, motivating it with an empirically-backed discourse on the shortcomings of homogeneous policies (and the behavioral typing techniques that they rely on).
|
| 140 |
+
|
| 141 |
+
# 5 HETEROGENEOUS MODEL
|
| 142 |
+
|
| 143 |
+
We introduce the two MARL models that constitute the methodology leveraged in this work: Graph Neural Network Proximal Policy Optimization (GPPO) and its heterogeneous counterpart, HetGPPO.
|
| 144 |
+
|
| 145 |
+
GPPO builds upon Independent Proximal Policy Optimization (IPPO) [13]. In IPPO, each agent learns a local critic $V_{i}(o_{i})$ and actor $\pi_{i}(o_{i})$ , conditioned only on its own observations. Conditioning the critic only on local observations and not on the full state introduces non-stationarity during training. This results in other agents being considered as part of the environment and not explicitly modeled in the critic. While this can be problematic, it has the advantage of not requiring global information during training. Furthermore, IPPO has been shown to outperform many fully-observable critic models on state-of-the-art MARL benchmarks [13].
|
| 146 |
+
|
| 147 |
+
GPPO overcomes the limitations of IPPO while maintaining its benefits. It uses a GNN communication layer, allowing agents to share information in neighborhoods to coordinate and overcome partial observability. Thanks to this, the GPPO critic $V_{i}(o_{\mathcal{N}_{i}})$ and actor $\pi_{i}(o_{\mathcal{N}_{i}})$ are conditioned on local communication neighborhood observations $o_{\mathcal{N}_i}$ . This helps overcome non-stationarity, while requiring only local information and communication during training.
|
| 148 |
+
|
| 149 |
+
The GPPO model is illustrated in Fig. 2. At each timestep, each agent $i$ observes the environment, collecting the observations $o_i$ . These observations contain absolute geometrical features, such as the agent position $\mathbf{p}_i \in \mathbb{R}^2$ . The non-absolute features are passed through a Multi Layer Perceptron (MLP) encoder, obtaining an embedding $z_i$ . The absolute position and the agent velocity $\mathbf{v}_i \in \mathbb{R}^2$ are used to compute edge features $e_{ij}$ , which are relative features of agents $i$ and $j$ . In this work, we use the relative position $\mathbf{p}_{ij} = \mathbf{p}_i - \mathbf{p}_j$ and relative velocity $\mathbf{v}_{ij} = \mathbf{v}_i - \mathbf{v}_j$ as edge features $e_{ij} = \mathbf{p}_{ij}||\mathbf{v}_{ij}$ , where $||$ indicates the concatenation operation. This process ensures that GNN outputs are invariant to translations in $\mathbb{R}^2$ (i.e., the same output is obtained if all the team is translated in space), helping the model generalize [21]. The edge features $e_{ij}$ and the agent embedding $z_i$ are then used in the message-passing GNN kernel:
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
h _ {i} = \psi_ {\theta_ {i}} (z _ {i}) + \bigoplus_ {j \in \mathcal {N} _ {i}} \phi_ {\theta_ {i}} (z _ {j} | | e _ {i j}).
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
Here, $\psi_{\theta_i}$ and $\phi_{\theta_i}$ are two MLPs, parameterized by the agent parameters $\theta_i^3$ , and $\bigoplus$ is an aggregation operator (e.g., sum). The GNN output $h_i$ is then fed to two different MLP decoders, which output the action $a_i \sim \pi_i(\cdot | o_{\mathcal{N}_i})$ and the value $V_i(o_{\mathcal{N}_i})$ . Similar to IPPO, GPPO uses parameter sharing to improve sample efficiency. Thus $\theta_1 = \ldots = \theta_n$ . Parameter sharing allows agents to benefit from collective experiences and thereby reduces training time. On the other hand, it enforces centralized training and constraints agents' policies to be identical (i.e., homogeneous).
|
| 156 |
+
|
| 157 |
+
HetGPPO, removes the parameter sharing constraint of GPPO, thus allowing agent policies to diverge, $\theta_{1} \neq \dots \neq \theta_{n}$ . However, the impact of not sharing parameters in the context of GNN communications is profound: the permutation equivariance property
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
Figure 2: Architecture of GPPO and HetGPPO: MARL models with communicating agents. Each agent passes its observation through an encoder, then aggregates messages received from its neighbors using a translation-invariant message-passing GNN and updates its hidden state $h_i$ . $h_i$ is then used as input to the policy and value decoders (Dec). HetGPPO is equivalent to GPPO without parameter sharing.
|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
Figure 3: Different forms of behavioral typing. Homogeneous policies use typing to differentiate among agents and emulate heterogeneous behavior.
|
| 164 |
+
|
| 165 |
+
of GNNs [59] does not hold, since the agents now learn different message encoding and interpreting strategies. This results in the GNN having to learn a different team output for all the possible permutations of a given team input, instead of learning only one output. This can lead to decreases in generalization power and sample efficiency. On the other hand, gradients are backpropagated through communication neighborhoods, enabling agents to learn collectively from local interactions.
|
| 166 |
+
|
| 167 |
+
The structure of HetGPPO, shown in Fig. 2, allows for Decentralized Training Decentralized Execution (DTDE). This is thanks to the fact that GPPO critics are not conditioned on global information. While GPPO uses parameter sharing, HetGPPO removes this need, thus enabling training in any environment where just inter-agent communication is possible. We note that, by implementing an ad-hoc mechanism to achieve decentralized parameter sharing (e.g., through distributed optimization [60]), GPPO could be trained in a decentralized fashion as well.
|
| 168 |
+
|
| 169 |
+
We implement HetGPPO and GPPO in PyTorch [41] and employ the RLlib [32] framework for training. The code is available here<sup>4</sup>. Simulations are executed in custom created scenarios using the VMAS simulator [6], available at this link<sup>5</sup>.
|
| 170 |
+
|
| 171 |
+
# 6 BEHAVIORAL TYPING
|
| 172 |
+
|
| 173 |
+
HetGPPO, introduced above, allows us to learn truly heterogeneous policies. Counter-intuitively, it is also possible to learn apparently heterogeneous behavior with homogeneous models like GPPO. This allows agents to emulate heterogeneous behavior while leveraging the sample efficiency benefits of parameter sharing. A shared model can encompass different behavioral types which are activated by particular combinations of the input observations. For example, if two robots are transporting a package towards a destination, the model can identify if an agent is in the back (further from the goal) and assign it a different behavioral type from that of the agent in
|
| 174 |
+
|
| 175 |
+

|
| 176 |
+
|
| 177 |
+
the front. The input observations provide the conditions for the model to assign behavioral types to the agents.
|
| 178 |
+
|
| 179 |
+
We refer to this identification process as typing. Fig. 3 depicts a classification of behavioral typing techniques which we describe in the following subsections. Note that behavioral types lie in a continuous behavioral space (and are not part of a discrete set) [2].
|
| 180 |
+
|
| 181 |
+
# 6.1 Explicit behavioral typing
|
| 182 |
+
|
| 183 |
+
The most popular form of behavioral typing consists in feeding the index $i$ of the agent explicitly as part of the observation. This practice has been used extensively in the MARL literature [12, 19, 24, 54]. However, it requires the model to learn a multimodal policy, which switches modes based on this integer index. This can lead to discontinuities in the agents' policy and has been shown to perform sub-optimally [12].
|
| 184 |
+
|
| 185 |
+
Definition 6.1 (Explicit behavioral typing). Explicit behavioral typing occurs when a shared decentralized MARL policy is able to type agents based on a constant value concatenated to the input, different for each agent.
|
| 186 |
+
|
| 187 |
+
When no explicit index is available, a shared policy may still be able to emulate heterogeneous behavior [15]. We refer to this phenomenon as inferred behavioral typing. Inferred typing can occur for both physically heterogeneous and physically identical agents.
|
| 188 |
+
|
| 189 |
+
# 6.2 Inferred behavioral typing for physically heterogeneous agents
|
| 190 |
+
|
| 191 |
+
We first present a case study of inferred behavioral typing for agents that are physically heterogeneous.
|
| 192 |
+
|
| 193 |
+
Definition 6.2 (Inferred behavioral typing for physically heterogeneous agents). Inferred behavioral typing for physically heterogeneous agents occurs when a shared decentralized MARL policy is able to type $\mathcal{P}$ heterogeneous agents through their observations.
|
| 194 |
+
|
| 195 |
+
Scenario A (Fig. 4). Consider two robots with different masses, $m_{1} > m_{2}$ , located in a 1D workspace at random positions. The robots observe their own position $\mathbf{p}_i \in \mathbb{R}$ and velocity $\mathbf{v}_i \in \mathbb{R}$ and share them via communication. Their action is a force $\mathbf{f}_i \in \mathbb{R}$ . They are rewarded collectively to maximize the maximum speed in the team while minimizing the energy consumed. The optimal policy in this case is, clearly, for the robot with the higher mass to not move at all, while the lighter robot moves at the maximum speed. Evidently these behaviors are heterogeneous, since $\mathbf{f}_1 \neq \mathbf{f}_2$ when both agents receive the same observations.
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
(a) Heterogeneous
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
(b) Homogeneous
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
(c) Heterogeneous with noise
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
(d) Homogenous with noise
|
| 208 |
+
|
| 209 |
+
We train the agents in this scenario using GPPO and HetGPPO. Fig. 4 shows a graphical representation of the learned policies of each model. In these plots, an arrow represents the team action vector $\vec{\mathbf{f}} (\vec{\mathbf{v}}) = [\mathbf{f}_1(\vec{\mathbf{v}}),\mathbf{f}_2(\vec{\mathbf{v}})]$ as a function of the observation $\vec{\mathbf{v}} = [\mathbf{v}_1,\mathbf{v}_2]$ . A vertical arrow at $[0,0]$ indicates that, when the agents are both still, agent 1 wants to stay still while agent 2 wants to increase its velocity. We plot the policy mean action for every observation pair with a gray vector field, and show a rollout of the policy in red. In Fig. 4a, we observe how HetGPPO is able to learn the optimal policy, which is not dependent on any observation. Thanks to physically inferred typing, GPPO (Fig. 4b) is surprisingly also able to learn a policy that grants optimal rollouts. We observe how, due to homogeneity, the GPPO policy is forced to be symmetric about the $\mathbf{v}_1 = \mathbf{v}_2$ axis. Thus, when the GPPO agents are spawned at $[0,0]$ , they forcibly take the same action of increasing their velocities. Due to their $\mathcal{P}$ differences, however, this action produces different velocities, making the rollout quickly diverge from the symmetry and thus producing optimal behavior. The fact that a physical difference (i.e., different agent mass) produces different observations (i.e., different agent speed) for the same action, enables the homogeneous model to learn an optimal policy with apparent heterogeneous behavior - an example of inferred behavioral typing for physically heterogeneous agents.
|
| 210 |
+
|
| 211 |
+
Physically inferred typing proves to be a brittle solution. When additive uniform observation noise is injected during rollouts, we observe how the HetGPPO rollout (Fig. 4c) is not impacted at all, while the GPPO rollout (Fig. 4d) occasionally falls on the other side of the diagonal, producing the symmetrical opposite of the optimal behavior, and causing the heavy agent to move (horizontal arrows).
|
| 212 |
+
|
| 213 |
+
# 6.3 Inferred behavioral typing for physically identical agents
|
| 214 |
+
|
| 215 |
+
We now present a case study of inferred behavioral typing for agents that are physically identical.
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
(a) Scenario
|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
Figure 4: Policies learned for Scenario A represented as vector fields (gray) and rollouts in the environment (red). (a) and (b) are not subject to deployment noise. (c) and (d) are subject to $\pm 0.3$ uniform noise on the observations. In these plots, an arrow represents the team action vector $\vec{\mathbf{f}}(\vec{\mathbf{v}}) = [\mathbf{f}_1(\vec{\mathbf{v}}), \mathbf{f}_2(\vec{\mathbf{v}})]$ as a function of the observation $\vec{\mathbf{v}} = [\mathbf{v}_1, \mathbf{v}_2]$ . The rollouts always start in the origin ( $\vec{\mathbf{v}} = [0,0]$ ). We can observe how the vector field representing the homogeneous policies is forced to be invariant to permutations of the two inputs and thus is symmetric along $\mathbf{v}_1 = \mathbf{v}_2$ . This causes it to become brittle in the presence of noise (d), which makes the observations fall in the wrong part of the plane where the symmetry enforces a suboptimal policy (horizontal arrows).
|
| 222 |
+
(b) Training performance
|
| 223 |
+
Figure 5: Scenario B. (a): The setup with two robots (bigger circles) on opposite sides of a corridor which need to give way to each other to reach their goals (smaller circles). (b) The training curve for Scenario B, showing that, while the heterogeneous model is able to solve the scenario immediately, homogeneous models need around 300 training iterations to learn inferred behavioral typing for physically identical agents. We plot the mean and standard deviation of 10 different runs. Each iteration is performed over 200 episodes.
|
| 224 |
+
|
| 225 |
+
Definition 6.3 (Inferred behavioral typing for physically identical agents). Inferred behavioral typing for physically identical agents occurs when a shared decentralized MARL policy is able to type physically identical agents through their observations.
|
| 226 |
+
|
| 227 |
+
Scenario B (Fig. 5). Consider now two physically identical robots, initialized at different ends of a narrow corridor, depicted in Fig. 5a. Each robot is positioned in front of the other's goal. The corridor is wide enough to fit only one robot, but contains two robot-sized recesses in the center. The robots observe and communicate their respective 2D positions and velocities, and are tasked with reaching their goals without colliding. Thus, the task can only be solved when one robot gives way to the other.
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
(a) Scenario A
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
(b) Scenario B
|
| 234 |
+
Figure 7: Performance evaluation in the passage scenario with differently sized robots. Here, the homogeneous model is not able to perform inferred behavioral typing for physically heterogeneous agents since $\mathcal{P}$ heterogeneity does not affect the robots' observations. Thus, only the heterogeneous model is able to solve the task. We plot the mean and standard deviation success rate of 4 runs. Each iteration is performed over 200 episodes of experience.
|
| 235 |
+
|
| 236 |
+
Again, we train the agents with both GPPO and HetGPPO in this scenario. By looking at the training reward plot in Fig. 5b, we can see that both models are able to learn the correct behavior (reward $>700$ ). GPPO leverages inferred typing for physically identical agents and is able to assign the "give way" role dynamically according to the relative position and velocity of the two robots. However, we observe that learning behavioral typing takes all 300 training iterations, while the heterogeneous model learns the optimal solution with only 20 iterations.
|
| 237 |
+
|
| 238 |
+
# 6.4 Limitations of behavioral typing
|
| 239 |
+
|
| 240 |
+
Although homogeneous models can use behavioral typing to learn apparently heterogeneous behavior, this does not prove to be a reliable and scalable solution.
|
| 241 |
+
|
| 242 |
+
In [12] it is shown that the performance of explicit behavioral typing degrades as a function of the number of types to be learned. Furthermore, the authors empirically show that this performance decrease is not related to the capacity of the shared homogeneous model (i.e., the number of parameters).
|
| 243 |
+
|
| 244 |
+
Inferred indexing also proves to be a brittle solution. To characterize this brittleness, we perform an evaluation by injecting observation noise during execution. This is shown in Fig. 6. We report the mean and standard deviation of the normalized reward on 100 runs for 50 noise values between 0 and 2. As we can observe, all models start with the optimal policy with a reward of 1 when the noise is 0. As the noise increases, we observe how homogeneous models either almost immediately lose functionality (like for Scenario B in Fig. 6b), or degrade in performance rapidly (like for Scenario A in Fig. 6a). A heterogeneous policy, in contrast, is able to tolerate higher magnitudes of noise, and, even in the difficult corridor scenario, still manages to complete the task about $20\%$ of the time at high noise values.
|
| 245 |
+
|
| 246 |
+
# 7 EXPERIMENTAL EVALUATIONS
|
| 247 |
+
|
| 248 |
+
We now present some evaluations of the proposed models in simulated and real environments.
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
Figure 6: Performance of homogeneous and heterogeneous models in the presence of deployment noise on the two inferred typing scenarios. Reward is normalized between 0 and 1. Uniform noise is applied to all observations and it is in the same units as the observations. We report the mean and standard deviation of the normalized reward on 100 runs for 50 noise values between 0 and 2.
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
(a) Scenario
|
| 255 |
+
(b) Training performance
|
| 256 |
+
|
| 257 |
+
Performance evaluation. We evaluate HetGPPO on a simulated 2D task which requires heterogeneous behavior. The task is shown in Fig. 7a. Here, two robots of different sizes (blue circles), connected by a rigid linkage through two revolute joints, need to cross a passage while keeping the linkage parallel to it and then match the desired goal position (green circles) on the other side. The passage is comprised of a bigger and a smaller gap, which are spawned in a random position and order on the wall, but always at the same distance between each other. The team is spawned in a random order and position on the lower side with the linkage always perpendicular to the passage. The goal is spawned horizontally in a random position on the upper side. Each robot observes and communicates its velocity, relative position to each gap, and relative position to the goal center. The shaped global reward is composed of two convex terms. Before the passage, the robots are rewarded to keep the linkage parallel to the goal and to bring its center to the center of the passage. After the passage, the robots are rewarded for bringing it to the goal at the desired orientation. Collisions are also penalized.
|
| 258 |
+
|
| 259 |
+
Fig. 7b shows training success rate (i.e., percentage of episodes in each batch that complete the task). The heterogeneous model is able to learn two behaviorally different policies: the bigger robot passes through the bigger gap and the smaller robot through the smaller gap, achieving the optimal solution. On the other hand, the homogeneous model is not able to assign these two behavioral types using inferred behavioral typing for physically heterogeneous agents, since the $\mathcal{P}$ heterogeneity caused by different robot sizes does not affect the robots' observations. Agents with homogeneous policies never manage to cross the passage, being deterred by unavoidable collisions.
|
| 260 |
+
|
| 261 |
+
Resilience to training noise. As elucidated in Sec. 6, homogeneous models can learn heterogeneous behavior. In this subsection, we evaluate the resilience of this paradigm in the presence of observation noise during training. We consider the task depicted in Fig. 8a. This is the same as in Fig. 7a with the difference that the robots are now physically identical, but the linkage has an asymmetric mass (black circle) that causes a different type of $\mathcal{P}$ heterogeneity,
|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
(a) Scenario
|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
(b) Resilience to training noise
|
| 268 |
+
|
| 269 |
+
reflected in the velocity observations. The passage is a single gap, positioned randomly on the wall. The agents need to cross it while keeping the linkage perpendicular to the wall and avoiding collisions. The team and the goal are spawned in a random position, order, and rotation on opposite sides of the passage.
|
| 270 |
+
|
| 271 |
+
In Fig. 8b we report the training success rate for different observation noise values. Thanks to inferred behavioral typing for physically heterogeneous agents we see that both models solve the task optimally when 0 noise is added. As noise increases, the heterogeneous model is able to maintain significantly better performance. For example, with 0.2 observation noise, HetGPPO still achieves more than $80\%$ success rate, while GPPO is below $40\%$ .
|
| 272 |
+
|
| 273 |
+
Real-world deployment. To demonstrate the resilience of heterogeneous policies, we deploy Scenario B (Sec. 6.3) to a real-world setting. The setup of the task is shown in Fig. 9b and is the same as in simulation. We use two holonomic RoboMaster S1 ground robots [16] (Fig. 9a), each running a customized model-based controller onboard [51]. We perform 10 runs for the trained HetGPPO and GPPO models both in simulation (Fig. 9c) and in the real world (Fig. 9d). As already discussed in Sec. 6.3, both the heterogeneous and the homogeneous models are able to solve the scenario in simulation, with the homogeneous model leveraging inferred behavioral typing for physically identical agents. This is shown in Fig. 9c, where all the runs of both models reach $100\%$ task completion within 15s. On the other hand, as seen in Fig. 9d, the performance of the homogeneous model is heavily impacted in the real world. This is because, in this symmetric scenario, the homogeneous model cannot type agents based on position only, and has to rely on velocity observations to build the behavioral types. In practice, however, real-world estimated velocities can be noisy due to factors such as control/process delays and variability in the robot's measurement and motion models. Thus, relying on these observations makes the homogeneous (memory-less) model susceptible to erroneously switching the behavioral types dynamically (i.e., failing to distinguish if the robots are currently moving towards or away from the center). This leads to the plotted rollouts, where robots alternate the role of giving way to each other near the passage. Out of 10 runs,
|
| 274 |
+
|
| 275 |
+

|
| 276 |
+
(a) Robot
|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
(b) Scenario B (real world)
|
| 280 |
+
|
| 281 |
+

|
| 282 |
+
Figure 8: Resilience to uniform observation noise during training in the passage scenario with asymmetric package. Here, the heterogeneous model is able to maintain higher performance as the noise increases. We train the two models for 7 different noise values. For each noise value, we report the mean and standard deviation of the success rate after 1000 training iterations for 5 runs. Each training iteration is performed over 200 episodes of experience.
|
| 283 |
+
(c) Simulation
|
| 284 |
+
Figure 9: Real-world deployment of Scenario B (Fig. 5). We report 10 runs for each model both in simulation and in the real world. We plot task completion (the scaled sum of the negative distances of each robot from its goal) over time. While in simulation both models are able to perform the task, real world imperfections make the homogeneous model dynamically switch between learned behavioral types, leading to the robots switching positions multiple times near the central area. This causes the zigzag behavior in (d) with certain rollouts failing or taking over the maximum allocated time of 60s.
|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
(d) Real world
|
| 288 |
+
|
| 289 |
+
only 5 are completed within 60s. The heterogeneous model, on the other hand, does not rely on behavioral typing and is not impacted by the deployment noises, performing as well as in simulations.
|
| 290 |
+
|
| 291 |
+
# 8 CONCLUSION
|
| 292 |
+
|
| 293 |
+
In this paper, we introduced a new paradigm for learning heterogeneous policies in MARL. We motivated it with a categorization of techniques that homogeneous models can use to emulate heterogeneous behavior and empirically demonstrated their limits. Finally, we showed the benefits of policy heterogeneity for both performance and resilience on multi-robot tasks in simulation and in the real world. While we do not employ any methods to control the degree of heterogeneity of the agents' policies, we observe that training is already a good heterogeneity regularizer. In other words, if the system has heterogeneous requirements, HetGPPO will be able to learn them, while, if the system benefits from homogeneous policies, HetGPPO will learn the same policy as GPPO (with some loss in sample efficiency). In future work, we are interested in developing mechanisms that measure and actively tune the degree of policy heterogeneity in the team, allowing us to control the trade-offs between sample efficiency (of homogeneous policies) and resilience (of heterogeneous policies).
|
| 294 |
+
|
| 295 |
+
# ACKNOWLEDGMENTS
|
| 296 |
+
|
| 297 |
+
This work was supported by ARL DCIST CRA W911NF-17-2-0181, the European Research Council (ERC) Project 949940 (gAIA), and in part by a gift from Arm.
|
| 298 |
+
|
| 299 |
+
# REFERENCES
|
| 300 |
+
|
| 301 |
+
[1] Nora Ayanian. 2019. Dart: Diversity-enhanced autonomy in robot teams. The International Journal of Robotics Research 38, 12-13 (2019), 1329-1337.
|
| 302 |
+
[2] Tucker Balch. 2000. Hierarchic social entropy: An information theoretic measure of robot group diversity. Autonomous robots 8, 3 (2000), 209-238.
|
| 303 |
+
[3] Tucker Balch et al. 1997. Learning roles: Behavioral diversity in robot teams. In AAAI Workshop on Multiagent Learning.
|
| 304 |
+
[4] Spring Berman, Adam Halasz, Vijay Kumar, and Stephen Pratt. 2007. Bio-inspired group behaviors for the deployment of a swarm of robots to multiple destinations. In Proceedings 2007 IEEE international conference on robotics and automation. IEEE, 2318-2323.
|
| 305 |
+
[5] Daniel S Bernstein, Robert Givan, Neil Immerman, and Shlomo Zilberstein. 2002. The complexity of decentralized control of Markov decision processes. Mathematics of operations research 27, 4 (2002), 819-840.
|
| 306 |
+
[6] Matteo Bettini, Ryan Kortvelesy, Jan Blumenkamp, and Amanda Prorok. 2022. VMAS: A Vectorized Multi-Agent Simulator for Collective Robot Learning. The 16th International Symposium on Distributed Autonomous Robotic Systems (2022).
|
| 307 |
+
[7] Jan Blumenkamp and Amanda Prorok. 2021. The Emergence of Adversarial Communication in Multi-Agent Reinforcement Learning. In Conference on Robot Learning. PMLR, 1394-1414.
|
| 308 |
+
[8] Elizabeth R Boroson and Nora Ayanian. 2019. 3D keypoint repeatability for heterogeneous multi-robot SLAM. In 2019 International Conference on Robotics and Automation (ICRA). IEEE, 6337-6343.
|
| 309 |
+
[9] Olli Bräysy and Michel Gendreau. 2005. Vehicle routing problem with time windows, Part II: Metaheuristics. Transportation science 39, 1 (2005), 119-139.
|
| 310 |
+
[10] Praneel Chand and Dale A Carnegie. 2013. Mapping and exploration in a hierarchical heterogeneous multi-robot system using limited capability robots. Robotics and autonomous Systems 61, 6 (2013), 565-579.
|
| 311 |
+
[11] Li Chenghao, Tonghan Wang, Chengjie Wu, Qianchuan Zhao, Jun Yang, and Chongjie Zhang. 2021. Celebrating diversity in shared multi-agent reinforcement learning. Advances in Neural Information Processing Systems 34 (2021).
|
| 312 |
+
[12] Filippos Christianos, Georgios Papoudakis, Muhammad A Rahman, and Stefano V Albrecht. 2021. Scaling multi-agent reinforcement learning with selective parameter sharing. In International Conference on Machine Learning. PMLR, 1989-1998.
|
| 313 |
+
[13] Christian Schroeder de Witt, Tarun Gupta, Denys Makoviichuk, Viktor Makoviychuk, Philip HS Torr, Mingfei Sun, and Shimon Whiteson. 2020. Is independent learning all you need in the starcraft multi-agent challenge? arXiv preprint arXiv:2011.09533 (2020).
|
| 314 |
+
[14] Mark Debord, Wolfgang Honig, and Nora Ayanian. 2018. Trajectory planning for heterogeneous robot teams. In 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 7924-7931.
|
| 315 |
+
[15] Ankur Deka and Katia Sycara. 2021. Natural Emergence of Heterogeneous Strategies in Artificially Intelligent Competitive Teams. In Advances in Swarm Intelligence: 12th International Conference. 13-25.
|
| 316 |
+
[16] DJI. Accessed: 2023-01-17. Robomaster S1. https://www.dji.com/robomaster-s1.
|
| 317 |
+
[17] Yousef Emam, Siddharth Mayya, Gennaro Notomista, Addison Bohannon, and Magnus Egerstedt. 2020. Adaptive task allocation for heterogeneous multi-robot teams with evolving and unknown robot capabilities. In 2020 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 7719-7725.
|
| 318 |
+
[18] Yousef Emam, Gennaro Notomista, Paul Glotfelter, and Magnus Egerstedt. 2021. Data-Driven Adaptive Task Allocation for Heterogeneous Multi-Robot Teams Using Robust Control Barrier Functions. In 2021 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 9124–9130.
|
| 319 |
+
[19] Jakob Foerster, Ioannis Alexandros Assael, Nando De Freitas, and Shimon Whiteson. 2016. Learning to communicate with deep multi-agent reinforcement learning. Advances in neural information processing systems 29 (2016).
|
| 320 |
+
[20] Jakob Foerster, Gregory Farquhar, Triantafyllos Afouras, Nantas Nardelli, and Shimon Whiteson. 2018. Counterfactual multi-agent policy gradients. In Proceedings of the AAAI conference on artificial intelligence, Vol. 32.
|
| 321 |
+
[21] Fabian Fuchs, Daniel Worrall, Volker Fischer, and Max Welling. 2020. Se (3)-transformers: 3d roto-translation equivariant attention networks. Advances in Neural Information Processing Systems 33 (2020), 1970–1981.
|
| 322 |
+
[22] Brian P. Gerkey and Maja J. Mataric. 2002. Pusher-watcher: An approach to fault-tolerant tightly-coupled robot coordination. In Proceedings 2002 IEEE International Conference on Robotics and Automation, Vol. 1. IEEE, 464-469.
|
| 323 |
+
[23] Dani Goldberg and Maja J Mataric. 1997. Interference as a tool for designing and evaluating multi-robot controllers. In Aaii/iaai. 637-642.
|
| 324 |
+
[24] Jayesh K Gupta, Maxim Egorov, and Mykel Kochenderfer. 2017. Cooperative multi-agent control using deep reinforcement learning. In International conference on autonomous agents and multiagent systems. Springer, 66-83.
|
| 325 |
+
|
| 326 |
+
[25] Natasha Jaques, Angeliki Lazaridou, Edward Hughes, Caglar Gulcehre, Pedro Ortega, DJ Strouse, Joel Z Leibo, and Nando De Freitas. 2019. Social influence as intrinsic motivation for multi-agent deep reinforcement learning. In International Conference on Machine Learning. PMLR, 3040-3049.
|
| 327 |
+
[26] Chanyoung Ju and Hyoung II Son. 2019. Modeling and control of heterogeneous agricultural field robots based on Ramadge-Wonham theory. IEEE Robotics and Automation Letters 5, 1 (2019), 48-55.
|
| 328 |
+
[27] Leslie Pack Kaelbling, Michael L Littman, and Anthony R Cassandra. 1998. Planning and acting in partially observable stochastic domains. Artificial intelligence 101, 1-2 (1998), 99-134.
|
| 329 |
+
[28] Soobum Kim, María Santos, Luis Guerrero-Bonilla, Anthony Yezzi, and Magnus Egerstedt. 2022. Coverage Control of Mobile Robots With Different Maximum Speeds for Time-Sensitive Applications. IEEE Robotics and Automation Letters 7, 2 (2022), 3001-3007.
|
| 330 |
+
[29] Ryan Kortvelesy and Amanda Prorok. 2022. QGNN: Value Function Factorisation with Graph Neural Networks. arXiv preprint arXiv:2205.13005 (2022).
|
| 331 |
+
[30] Karol Kurach, Anton Raichuk, Piotr Stanczyk, Michal Zajac, Olivier Bachem, Lasse Espeholt, Carlos Riquelme, Damien Vincent, Marcin Michalski, Olivier Bousquet, et al. 2020. Google research football: A novel reinforcement learning environment. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 34. 4501-4510.
|
| 332 |
+
[31] Ling Li, Alcherio Martinoli, and Yaser S Abu-Mostafa. 2004. Learning and measuring specialization in collaborative swarm systems. Adaptive Behavior 12, 3-4 (2004), 199-212.
|
| 333 |
+
[32] Eric Liang, Richard Liaw, Robert Nishihara, Philipp Moritz, Roy Fox, Ken Goldberg, Joseph Gonzalez, Michael Jordan, and Ion Stoica. 2018. RLib: Abstractions for distributed reinforcement learning. In International Conference on Machine Learning. PMLR, 3053-3062.
|
| 334 |
+
[33] Ryan Lowe, Yi I Wu, Aviv Tamar, Jean Harb, OpenAI Pieter Abbeel, and Igor Mordatch. 2017. Multi-agent actor-critic for mixed cooperative-competitive environments. Advances in neural information processing systems 30 (2017).
|
| 335 |
+
[34] Matthew Malencia, Sandeep Manjanna, M Ani Hsieh, George Pappas, and Vijay Kumar. 2022. Adaptive Sampling of Latent Phenomena using Heterogeneous Robot Teams (ASLaP-HR). arXiv preprint arXiv:2208.06053 (2022).
|
| 336 |
+
[35] Sandeep Manjanna, Alberto Quattrini Li, Ryan N Smith, Ioannis Rekleitis, and Gregory Dudek. 2018. Heterogeneous multi-robot system for exploration and strategic water sampling. In 2018 IEEE international conference on robotics and automation (ICRA). IEEE, 4873-4880.
|
| 337 |
+
[36] Siddharth Mayya, Diego S D'antonio, David Saldana, and Vijay Kumar. 2021. Resilient task allocation in heterogeneous multi-robot systems. IEEE Robotics and Automation Letters 6, 2 (2021), 1327-1334.
|
| 338 |
+
[37] Nathan Michael, Shaojie Shen, Kartik Mohta, Vijay Kumar, Keiji Nagatani, Yoshito Okada, Seiga Kiribayashi, Kazuki Otake, Kazuya Yoshida, Kazunori Ohno, et al. 2014. Collaborative mapping of an earthquake damaged building via ground and aerial robots. In Field and service robotics. Springer, 33-47.
|
| 339 |
+
[38] John F Nash Jr. 1950. Equilibrium points in n-person games. Proceedings of the national academy of sciences 36, 1 (1950), 48-49.
|
| 340 |
+
[39] Gennaro Notomista, Siddharth Mayya, Yousef Emam, Christopher Kroninger, Addison Bohannon, Seth Hutchinson, and Magnus Egerstedt. 2021. A resilient and energy-aware task allocation framework for heterogeneous multirobot systems. IEEE Transactions on Robotics 38, 1 (2021), 159-179.
|
| 341 |
+
[40] Gennaro Notomista, Siddharth Mayya, Seth Hutchinson, and Magnus Egerstedt. 2019. An optimal task allocation strategy for heterogeneous multi-robot systems. In 2019 18th European Control Conference (ECC), IEEE, 2071-2076.
|
| 342 |
+
[41] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. 2019. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32 (2019).
|
| 343 |
+
[42] Luciano CA Pimenta, Vijay Kumar, Renato C Mesquita, and Guilherme AS Pereira. 2008. Sensing and coverage for a network of heterogeneous robots. In 2008 47th IEEE conference on decision and control. IEEE, 3947-3952.
|
| 344 |
+
[43] Amanda Prorok, M Ani Hsieh, and Vijay Kumar. 2017. The impact of diversity on optimal control policies for heterogeneous robot swarms. IEEE Transactions on Robotics 33, 2 (2017), 346-358.
|
| 345 |
+
[44] Tabish Rashid, Mikayel Samvelyan, Christian Schroeder, Gregory Farquhar, Jakob Foerster, and Shimon Whiteson. 2018. Qmix: Monotonic value function factorisation for deep multi-agent reinforcement learning. In International Conference on Machine Learning, PMLR, 4295-4304.
|
| 346 |
+
[45] Mikayel Samvelyan, Tabish Rashid, Christian Schroeder de Witt, Gregory Farquhar, Nantas Nardelli, Tim GJ Rudner, Chia-Man Hung, Philip HS Torr, Jakob Foerster, and Shimon Whiteson. 2019. The StarCraft Multi-Agent Challenge. In Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems. 2186-2188.
|
| 347 |
+
[46] Maria Santos, Yancy Diaz-Mercado, and Magnus Egerstedt. 2018. Coverage control for multirobot teams with heterogeneous sensing capabilities. IEEE Robotics and Automation Letters 3, 2 (2018), 919-925.
|
| 348 |
+
[47] Miguel Schneider-Fontan and Maja J Mataric. 1998. Territorial multi-robot task division. IEEE Transactions on Robotics and Automation 14, 5 (1998), 815–822.
|
| 349 |
+
|
| 350 |
+
[48] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347 (2017).
|
| 351 |
+
[49] Esmaeil Seraj, Zheyuan Wang, Rohan Paleja, Daniel Martin, Matthew Sklar, Anirudh Patel, and Matthew Gombolay. 2022. Learning efficient diverse communication for cooperative heterogeneous teaming. In Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems. 1173-1182.
|
| 352 |
+
[50] Beining Shang, Richard Crowder, and Klaus-Peter Zauner. 2014. Swarm behavioral sorting based on robotic hardware variation. In 2014 4th International Conference On Simulation And Modeling Methodologies, Technologies And Applications (SIMULTECH). IEEE, 631-636.
|
| 353 |
+
[51] Ajay Shankar, Sebastian Elbaum, and Carrick Detweiler. 2021. Freyja: A full multirotor system for agile & precise outdoor flights. In 2021 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 217-223.
|
| 354 |
+
[52] Riccardo Spica, Eric Cristofalo, Zijian Wang, Eduardo Montijano, and Mac Schwager. 2020. A real-time game theoretic planner for autonomous two-player drone racing. IEEE Transactions on Robotics 36, 5 (2020), 1389-1403.
|
| 355 |
+
[53] Sainbayar Sukhbaatar, Rob Fergus, et al. 2016. Learning multiagent communication with backpropagation. Advances in neural information processing systems 29 (2016).
|
| 356 |
+
[54] Justin K Terry, Nathaniel Grammel, Ananth Hari, Luis Santos, and Benjamin Black. 2020. Revisiting parameter sharing in multi-agent deep reinforcement learning. arXiv preprint arXiv:2005.13625 (2020).
|
| 357 |
+
[55] Ceyer Wakilpoor, Patrick J Martin, Carrie Rebuhn, and Amanda Vu. 2020. Heterogeneous multi-agent reinforcement learning for unknown environment mapping. arXiv preprint arXiv:2010.02663 (2020).
|
| 358 |
+
[56] Mingyu Wang, Zijian Wang, John Talbot, J Christian Gerdes, and Mac Schwager. 2021. Game-theoretic planning for self-driving cars in multivehicle competitive scenarios. IEEE Transactions on Robotics 37, 4 (2021), 1313-1325.
|
| 359 |
+
[57] Tonghan Wang, Heng Dong, Victor Lesser, and Chongjie Zhang. 2020. ROMA: Multi-Agent Reinforcement Learning with Emergent Roles. In International Conference on Machine Learning. PMLR, 9876-9886.
|
| 360 |
+
[58] T Wang, T Gupta, B Peng, A Mahajan, S Whiteson, and C Zhang. 2021. RODE: learning roles to decompose multi-agent tasks. In Proceedings of the International Conference on Learning Representations.
|
| 361 |
+
[59] Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2018. How Powerful are Graph Neural Networks?. In International Conference on Learning Representations.
|
| 362 |
+
[60] Javier Yu, Joseph A Vincent, and Mac Schwager. 2022. DiNNO: Distributed Neural Network Optimization for Multi-Robot Collaborative Learning. IEEE Robotics and Automation Letters 7, 2 (2022), 1896-1903.
|
| 363 |
+
[61] Kaiqing Zhang, Zhuoran Yang, and Tamer Başar. 2021. Multi-agent reinforcement learning: A selective overview of theories and algorithms. Handbook of Reinforcement Learning and Control (2021), 321-384.
|
| 364 |
+
|
| 365 |
+
# A EXPERIMENTAL SETUP
|
| 366 |
+
|
| 367 |
+
# A.1 Simulation
|
| 368 |
+
|
| 369 |
+
We attach all the code used for simulations and training. Simulations are performed in the VMAS [6] simulator. All environments are customly created apart from Scenario B which is adapted from one of the scenarios already available in the simulator. The training is performed in RLlib [32] using PyTorch [41] and an implementation of the PPO algorithm for multi-agent training. The general training parameters used are shown in Tab. 2. Small variations of these are done on a per-environment basis and can be seen in the training scripts attached. The GPPO and HetGPPO model implementations and details are available in the code. Training is performed on a NVIDIA GeForce RTX 2080 Ti GPU. Each worker collects experience from the simulator using an Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz.
|
| 370 |
+
|
| 371 |
+
# A.2 Real-world
|
| 372 |
+
|
| 373 |
+
Real-world experiments are performed using an Optitrack motion capture system with 12 cameras to provide positional information to the robots. The robots used are holonomic RoboMaster
|
| 374 |
+
|
| 375 |
+
Table 2: PPO training parameters.
|
| 376 |
+
|
| 377 |
+
<table><tr><td colspan="2">Training</td><td colspan="2">PPO</td></tr><tr><td>Batch size</td><td>60000</td><td>ε</td><td>0.2</td></tr><tr><td>Minibatch size</td><td>4096</td><td>γ</td><td>0.99</td></tr><tr><td>SDG Iterations</td><td>40</td><td>λ</td><td>0.9</td></tr><tr><td># Workers</td><td>5</td><td>Entropy coeff</td><td>0</td></tr><tr><td># Envs per worker</td><td>50</td><td>KL coeff</td><td>0.01</td></tr><tr><td>Learning rate</td><td>5e-5</td><td>KL target</td><td>0.01</td></tr></table>
|
| 378 |
+
|
| 379 |
+
S1 ground robots<sup>8</sup>, running a customized model-based controller onboard [51].
|
| 380 |
+
|
| 381 |
+
# B SCENARIO B REWARD STRUCTURE
|
| 382 |
+
|
| 383 |
+
The reward used to train Scenario B is comprised of two components: a positional reward and a collision reward.
|
| 384 |
+
|
| 385 |
+
The positional reward is proportional to the time delta in relative distance of an agent from its goal. In other words, a positive reward is assigned if an agent moves towards its goal and a negative one if it moves away. The agents receive a shared positional reward equal to the sum of their individual positional rewards. When both agents are placed on their goal, they keep receiving an additional final reward. The episode ends after 500 timesteps.
|
| 386 |
+
|
| 387 |
+
The collision reward is a constant penalty assigned to each agent in the presence of collisions. When training starts, the only collisions penalized are inter-agent ones. A curriculum is set up throughout training so that, when the agents' positional reward gets high enough to symbolize that they solved the task, collisions at the recesses start being penalized as well. This is done so that the agents are able to first learn to solve the task and can then fine-tune their performance by removing collisions.
|
| 388 |
+
|
| 389 |
+
# C SIM TO REAL TRANSFER
|
| 390 |
+
|
| 391 |
+
To deploy policies trained in the VMAS simulator to the real world, we iteratively tune some simulation hyperparameters to fit the real-world conditions. These parameters are dependent just on the robots and their interaction with the real-world. Once tuned, they can be used for any training scenario.
|
| 392 |
+
|
| 393 |
+
The parameters that were key to a successfully deployment are linear friction and drag. Since we operate with ground robots at relatively low speeds, we set drag to 0 and tune linear friction. Through 3 real to sim iterations of binary search we were able to find the correct friction value for our robot-ground pair. Together with friction, we tuned the maximum acceleration in simulation to fit the real robot one. These parameters were tested and validated on simple single-robot tasks such as trajectory following and moving to a goal position.
|
2301.07xxx/2301.07137/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:075817939cf806c07a6eeb0d9c8d25b13e4e45f7bb688c84d710ec5bab025c92
|
| 3 |
+
size 367622
|
2301.07xxx/2301.07137/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07184/5668b376-c13b-4815-86e0-6a33d65c52df_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07184/5668b376-c13b-4815-86e0-6a33d65c52df_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07184/5668b376-c13b-4815-86e0-6a33d65c52df_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7530c0df87e96dc4408c29ce1bc625f909f6fdd0e0d0d36fa10b84801a1faf35
|
| 3 |
+
size 2568673
|
2301.07xxx/2301.07184/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07184/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1116ac0b25b14b3daf5a74f36612164a899b7b6950dfcce4a1db855a2cceba8c
|
| 3 |
+
size 277980
|
2301.07xxx/2301.07184/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07255/37aa1763-1477-4988-8975-d9d015e39afb_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07255/37aa1763-1477-4988-8975-d9d015e39afb_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07255/37aa1763-1477-4988-8975-d9d015e39afb_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb2f2caa021612312d40e97e94ca19096529d1ac78b9ba643278e18670e6c5ba
|
| 3 |
+
size 4736155
|
2301.07xxx/2301.07255/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07255/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:32d5458c9670a0cd0cef205b1f053763cbdd2d849e8be24a662398b9426407a3
|
| 3 |
+
size 413856
|
2301.07xxx/2301.07255/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2301.07xxx/2301.07277/9592a50b-d6aa-4260-b4c3-a053b4776d58_content_list.json
ADDED
|
@@ -0,0 +1,1342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Mixed Near- and Far-Field Communications for Extremely Large-Scale Array: An Interference Perspective",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
101,
|
| 8 |
+
56,
|
| 9 |
+
893,
|
| 10 |
+
118
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Yunpu Zhang, Changsheng You, Member, IEEE, Li Chen, Senior Member, IEEE, and Beixiong Zheng, Member, IEEE",
|
| 17 |
+
"bbox": [
|
| 18 |
+
196,
|
| 19 |
+
125,
|
| 20 |
+
800,
|
| 21 |
+
160
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract—Extremely large-scale array (XL-array) is envisioned to achieve super-high spectral efficiency in future wireless networks. Different from the existing works that mostly focus on the nearfield communications, we consider in this paper a new and practical scenario, called mixed near- and far-field communications, where there exist both near- and far-field users in the network. For this scenario, we first obtain a closed-form expression for the inter-user interference at the near-field user caused by the far-field beam by using Fresnel functions, based on which the effects of the number of BS antennas, far-field user (FU) angle, near-field user (NU) angle and distance are analyzed. We show that the strong interference exists when the number of the BS antennas and the NU distance are relatively small, and/or the NU and FU angle-difference is small. Then, we further obtain the achievable rate of the NU as well as its rate loss caused by the FU interference. Last, numerical results are provided to corroborate our analytical results.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
58,
|
| 30 |
+
191,
|
| 31 |
+
491,
|
| 32 |
+
395
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Index Terms—Extremely large-scale array/surface (XL-array/surface), mixed near- and far-field communications, interference analysis.",
|
| 39 |
+
"bbox": [
|
| 40 |
+
58,
|
| 41 |
+
400,
|
| 42 |
+
491,
|
| 43 |
+
440
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "I. INTRODUCTION",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
207,
|
| 53 |
+
446,
|
| 54 |
+
341,
|
| 55 |
+
459
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Extremely large-scale array/surface (XL-array/surface) has emerged as a promising technology to achieve the ever-increasing performance requirements of future sixth-generation (6G) wireless networks, such as super-high spectral efficiency and spatial resolution [1], [2]. This fundamentally leads to the communication paradigm shift from the conventional far-field communications (with planar-wave propagation) to the near-field communications (with spherical-wave propagation) [3] and even the new mixed near- and far-field communications (with both planar/spherical-wave propagation). For example, consider an XL-array communication system where a base station (BS) equipped with an antenna of diameter 0.5 meter (m) communicates with users at $30\\mathrm{GHz}$ frequency. In this case, the well-known Rayleigh distance is about $50\\mathrm{m}$ [4]. As such, for a typical communication scenario, it may happen that some users reside in the nearfield region, while the others locate in the far-field region, thus leading to several new design issues such as mixed-field channel estimation, joint near-/far-field beamforming, etc.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
58,
|
| 64 |
+
465,
|
| 65 |
+
491,
|
| 66 |
+
738
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "In particular, consider the inter-user interference in different communication scenarios. First, if the users considered are all in the far-field region, spatial division multiple access (SDMA) [5] or beam division multiple access (BDMA) [6] can be employed to simultaneously serve multiple users with low inter-user interference. This is because different far-field directional beams pointing towards different users are asymptotically orthogonal in",
|
| 73 |
+
"bbox": [
|
| 74 |
+
57,
|
| 75 |
+
739,
|
| 76 |
+
490,
|
| 77 |
+
845
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "list",
|
| 83 |
+
"sub_type": "text",
|
| 84 |
+
"list_items": [
|
| 85 |
+
"Y. Zhang and C. You are with the Department of Electronic and Electrical Engineering, Southern University of Science and Technology, Shenzhen 518055, China (e-mail: zhangyp2022@mail.sustech.edu.cn; youcs@sustech.edu.cn).",
|
| 86 |
+
"L. Chen is with the CAS Key Laboratory of Wireless-Optical Communications, University of Science and Technology of China, Hefei 230027, China (e-mail: chenli87@ustc.edu.cn).",
|
| 87 |
+
"B. Zheng is with the School of Microelectronics, South China University of Technology, Guangzhou 511442, China (e-mail: bxzheng@scut.edu.cn)."
|
| 88 |
+
],
|
| 89 |
+
"bbox": [
|
| 90 |
+
57,
|
| 91 |
+
857,
|
| 92 |
+
491,
|
| 93 |
+
949
|
| 94 |
+
],
|
| 95 |
+
"page_idx": 0
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"type": "image",
|
| 99 |
+
"img_path": "images/197602dbd3e72f1cf39455eb15a177b9aca73941347da7e41d70aaef9bdf24d1.jpg",
|
| 100 |
+
"image_caption": [
|
| 101 |
+
"Fig. 1: Interference power at a near-field user versus the spatial angle of a far-field beam, where the BS antenna number is 256, the BS transmit power is $30\\mathrm{dBm}$ , the carrier frequency is $30\\mathrm{GHz}$ , and the BS-user distance is $3\\mathrm{m}$ ."
|
| 102 |
+
],
|
| 103 |
+
"image_footnote": [],
|
| 104 |
+
"bbox": [
|
| 105 |
+
586,
|
| 106 |
+
199,
|
| 107 |
+
839,
|
| 108 |
+
354
|
| 109 |
+
],
|
| 110 |
+
"page_idx": 0
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"type": "text",
|
| 114 |
+
"text": "the angular domain, thus eliminating the inter-user interference. Next, if the users locate in the near-field region, it has been recently shown in [5], [7] that the near-field users locating at different angles and/or (BS-user) distances can be served simultaneously by using the near-field beams with reduced inter-user interference. This is achieved by exploiting the unique near-field beam focusing effect that enables the beam to be focused at a specific location rather than a specific direction in conventional far-field communications.",
|
| 115 |
+
"bbox": [
|
| 116 |
+
501,
|
| 117 |
+
417,
|
| 118 |
+
937,
|
| 119 |
+
551
|
| 120 |
+
],
|
| 121 |
+
"page_idx": 0
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"type": "text",
|
| 125 |
+
"text": "Nevertheless, for the new mixed-field communication scenario, the inter-user interference analysis becomes more complicated. Specifically, the interference at the near-field user (NU) caused by the far-field user (FU) is fundamentally determined by the correlation between the NU channel steering vector and the FU beam. To examine it, we show in Fig. 1 the interference power at a NU caused by the FU beam. An interesting observation is that the NU suffers strong interference from the FU beam, even when it locates at a spatial angle different from that of the FU (see the shadow area), which significantly differs from the results in the scenarios with NUs or FUs only. This new phenomenon, however, has not been studied in the existing literature, which thus motivates the current work.",
|
| 126 |
+
"bbox": [
|
| 127 |
+
501,
|
| 128 |
+
555,
|
| 129 |
+
937,
|
| 130 |
+
750
|
| 131 |
+
],
|
| 132 |
+
"page_idx": 0
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"type": "text",
|
| 136 |
+
"text": "In this paper, we consider an XL-array communication system with one NU and one FU. First, we characterize the normalized interference power at the NU caused by the FU beam in closed form by using Fresnel functions, based on which the effects of the number of BS antennas, FU angle, NU angle and distance are analyzed. It is shown that there is strong interference when the number of BS antennas and the NU distance are relatively small, and/or the FU and NU angles are very close. Then, the resulting rate loss due to inter-user interference is obtained, and numerical results are provided to verify our theoretical results.",
|
| 137 |
+
"bbox": [
|
| 138 |
+
501,
|
| 139 |
+
753,
|
| 140 |
+
937,
|
| 141 |
+
904
|
| 142 |
+
],
|
| 143 |
+
"page_idx": 0
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"type": "page_number",
|
| 147 |
+
"text": "1",
|
| 148 |
+
"bbox": [
|
| 149 |
+
928,
|
| 150 |
+
17,
|
| 151 |
+
937,
|
| 152 |
+
25
|
| 153 |
+
],
|
| 154 |
+
"page_idx": 0
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"type": "aside_text",
|
| 158 |
+
"text": "arXiv:2301.07277v2 [eess.SP] 29 Jan 2023",
|
| 159 |
+
"bbox": [
|
| 160 |
+
22,
|
| 161 |
+
267,
|
| 162 |
+
57,
|
| 163 |
+
724
|
| 164 |
+
],
|
| 165 |
+
"page_idx": 0
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"type": "page_footnote",
|
| 169 |
+
"text": "<sup>1</sup>The obtained results can be readily extended to analyze the interference at the FU caused by the NU beam.",
|
| 170 |
+
"bbox": [
|
| 171 |
+
504,
|
| 172 |
+
924,
|
| 173 |
+
937,
|
| 174 |
+
949
|
| 175 |
+
],
|
| 176 |
+
"page_idx": 0
|
| 177 |
+
},
|
| 178 |
+
{
|
| 179 |
+
"type": "image",
|
| 180 |
+
"img_path": "images/bf5b210c9d214babef713706502cb890597b5708399c34f37be54888e263c85f.jpg",
|
| 181 |
+
"image_caption": [
|
| 182 |
+
"Fig. 2: A two-user XL-array wireless communication system."
|
| 183 |
+
],
|
| 184 |
+
"image_footnote": [],
|
| 185 |
+
"bbox": [
|
| 186 |
+
86,
|
| 187 |
+
56,
|
| 188 |
+
460,
|
| 189 |
+
181
|
| 190 |
+
],
|
| 191 |
+
"page_idx": 1
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"type": "text",
|
| 195 |
+
"text": "II. SYSTEM MODEL",
|
| 196 |
+
"text_level": 1,
|
| 197 |
+
"bbox": [
|
| 198 |
+
200,
|
| 199 |
+
214,
|
| 200 |
+
346,
|
| 201 |
+
227
|
| 202 |
+
],
|
| 203 |
+
"page_idx": 1
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"type": "text",
|
| 207 |
+
"text": "As shown in Fig. 2, we consider a two-user XL-array wireless communication system under the mixed near- and far-field communication scenario, where an $N$ -antenna BS with a uniform linear array (ULA) serves a single-antenna NU and a single-antenna FU. Specifically, the NU and FU are respectively located near and far from the BS with the corresponding BS-user distance smaller and larger than the so-called Rayleigh distance, denoted by $Z = \\frac{2D^2}{\\lambda}$ with $D$ and $\\lambda$ representing the array aperture and carrier wavelength, respectively.",
|
| 208 |
+
"bbox": [
|
| 209 |
+
57,
|
| 210 |
+
234,
|
| 211 |
+
490,
|
| 212 |
+
371
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 1
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "text",
|
| 218 |
+
"text": "A. Channel Models",
|
| 219 |
+
"text_level": 1,
|
| 220 |
+
"bbox": [
|
| 221 |
+
57,
|
| 222 |
+
377,
|
| 223 |
+
194,
|
| 224 |
+
390
|
| 225 |
+
],
|
| 226 |
+
"page_idx": 1
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"type": "text",
|
| 230 |
+
"text": "1) Far-field channel model: Similar to [8], when the user locates sufficiently far from the BS (in the far-field region), the $\\mathrm{BS} \\rightarrow \\mathrm{FU}$ channel can be characterized based on the planar-wave assumption, which is given by",
|
| 231 |
+
"bbox": [
|
| 232 |
+
57,
|
| 233 |
+
397,
|
| 234 |
+
491,
|
| 235 |
+
457
|
| 236 |
+
],
|
| 237 |
+
"page_idx": 1
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"type": "equation",
|
| 241 |
+
"text": "\n$$\n\\mathbf {h} _ {\\text {f a r}} ^ {H} = \\sqrt {N} h _ {\\text {f a r}} \\mathbf {a} ^ {H} (\\psi), \\tag {1}\n$$\n",
|
| 242 |
+
"text_format": "latex",
|
| 243 |
+
"bbox": [
|
| 244 |
+
194,
|
| 245 |
+
464,
|
| 246 |
+
488,
|
| 247 |
+
483
|
| 248 |
+
],
|
| 249 |
+
"page_idx": 1
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"type": "text",
|
| 253 |
+
"text": "where $h_{\\mathrm{far}}$ denotes the complex-valued channel gain between the BS and FU. $\\mathbf{a}^H (\\psi)$ denotes the far-field steering vector, given by",
|
| 254 |
+
"bbox": [
|
| 255 |
+
57,
|
| 256 |
+
487,
|
| 257 |
+
488,
|
| 258 |
+
531
|
| 259 |
+
],
|
| 260 |
+
"page_idx": 1
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"type": "equation",
|
| 264 |
+
"text": "\n$$\n\\mathbf {a} (\\psi) \\triangleq \\frac {1}{\\sqrt {N}} \\left[ 1, e ^ {j \\pi \\psi}, \\dots , e ^ {j \\pi (N - 1) \\psi} \\right] ^ {T}, \\tag {2}\n$$\n",
|
| 265 |
+
"text_format": "latex",
|
| 266 |
+
"bbox": [
|
| 267 |
+
130,
|
| 268 |
+
531,
|
| 269 |
+
488,
|
| 270 |
+
563
|
| 271 |
+
],
|
| 272 |
+
"page_idx": 1
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"type": "text",
|
| 276 |
+
"text": "where $\\psi = 2d\\cos (\\varphi) / \\lambda$ denotes the spatial angle of the FU with $\\varphi$ being the physical angle-of-departure (AoD) from the BS center to the FU. Without loss of generality, we assume that the $N$ -antenna BS is placed along the $y$ -axis and the $n$ -th antenna is located at $(0,\\delta_{n}d)$ m, where $\\delta_{n} = \\frac{2n - N + 1}{2}$ with $n = 0,1,\\dots ,N - 1$ , and $d = \\frac{\\lambda}{2}$ denotes the antenna spacing.",
|
| 277 |
+
"bbox": [
|
| 278 |
+
57,
|
| 279 |
+
565,
|
| 280 |
+
488,
|
| 281 |
+
656
|
| 282 |
+
],
|
| 283 |
+
"page_idx": 1
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"type": "text",
|
| 287 |
+
"text": "2) Near-field channel model: For the NU, we consider the more accurate spherical-wave propagation model, which applies as well when the user locates in the far-field region. As such, the BS $\\rightarrow$ NU channel can be modeled as",
|
| 288 |
+
"bbox": [
|
| 289 |
+
57,
|
| 290 |
+
657,
|
| 291 |
+
488,
|
| 292 |
+
715
|
| 293 |
+
],
|
| 294 |
+
"page_idx": 1
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"type": "equation",
|
| 298 |
+
"text": "\n$$\n\\mathbf {h} _ {\\text {n e a r}} ^ {H} = \\sqrt {N} h _ {\\text {n e a r}} \\mathbf {b} ^ {H} (\\theta , r), \\tag {3}\n$$\n",
|
| 299 |
+
"text_format": "latex",
|
| 300 |
+
"bbox": [
|
| 301 |
+
179,
|
| 302 |
+
723,
|
| 303 |
+
488,
|
| 304 |
+
742
|
| 305 |
+
],
|
| 306 |
+
"page_idx": 1
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"type": "text",
|
| 310 |
+
"text": "where $h_{\\mathrm{near}} = \\frac{\\sqrt{\\beta}}{r} e^{-\\frac{y2\\pi r}{\\lambda}}$ is the complex-valued channel gain<sup>3</sup> with $\\beta$ and $r$ denoting the reference channel gain at a distance of $1\\mathrm{m}$ and the distance between the BS center and the NU, respectively. $\\mathbf{b}^H (\\theta ,r)$ denotes the near-field steering vector, which is given by [10]",
|
| 311 |
+
"bbox": [
|
| 312 |
+
57,
|
| 313 |
+
750,
|
| 314 |
+
488,
|
| 315 |
+
827
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 1
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "equation",
|
| 321 |
+
"text": "\n$$\n\\mathbf {b} (\\theta , r) = \\frac {1}{\\sqrt {N}} \\left[ e ^ {- j 2 \\pi (r ^ {(0)} - r) / \\lambda}, \\dots , e ^ {- j 2 \\pi (r ^ {(N - 1)} - r) / \\lambda} \\right] ^ {T}, \\tag {4}\n$$\n",
|
| 322 |
+
"text_format": "latex",
|
| 323 |
+
"bbox": [
|
| 324 |
+
66,
|
| 325 |
+
833,
|
| 326 |
+
488,
|
| 327 |
+
866
|
| 328 |
+
],
|
| 329 |
+
"page_idx": 1
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"type": "text",
|
| 333 |
+
"text": "2The NU reduces to a FU when $N$ is sufficiently small and/or the BS-user distance is sufficiently large.",
|
| 334 |
+
"bbox": [
|
| 335 |
+
57,
|
| 336 |
+
878,
|
| 337 |
+
488,
|
| 338 |
+
901
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 1
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "text",
|
| 344 |
+
"text": "In this paper, we consider the Fresnel region with the BS-NU distance $r$ larger than $1.2D$ , for which the amplitude variations over XL-array antennas are negligible [9], while the case with non-negligible amplitude variations will be left for future work.",
|
| 345 |
+
"bbox": [
|
| 346 |
+
57,
|
| 347 |
+
902,
|
| 348 |
+
491,
|
| 349 |
+
948
|
| 350 |
+
],
|
| 351 |
+
"page_idx": 1
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"type": "text",
|
| 355 |
+
"text": "where $\\theta = 2d\\cos (\\phi) / \\lambda$ denotes the spatial angle of the NU with $\\phi$ denoting the physical AoD from the BS center to the NU; $r^{(n)} = \\sqrt{r^2 + \\delta_n^2d^2 - 2r\\theta\\delta_n d}$ represents the distance between the $n$ -th antenna at the BS (i.e., $(0,\\delta_{n}d)$ ) and NU.",
|
| 356 |
+
"bbox": [
|
| 357 |
+
503,
|
| 358 |
+
55,
|
| 359 |
+
936,
|
| 360 |
+
114
|
| 361 |
+
],
|
| 362 |
+
"page_idx": 1
|
| 363 |
+
},
|
| 364 |
+
{
|
| 365 |
+
"type": "text",
|
| 366 |
+
"text": "B. Signal Model under Mixed-Field Communications",
|
| 367 |
+
"text_level": 1,
|
| 368 |
+
"bbox": [
|
| 369 |
+
504,
|
| 370 |
+
122,
|
| 371 |
+
866,
|
| 372 |
+
136
|
| 373 |
+
],
|
| 374 |
+
"page_idx": 1
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"type": "text",
|
| 378 |
+
"text": "The BS is equipped with $N_{\\mathrm{RF}} \\geq K$ radio frequency (RF) chains to enable multiuser communications (i.e., $K$ users). Without loss of generality, we assume $N_{\\mathrm{RF}} = 2$ for the two-user case. Moreover, to facilitate the interference analysis in the sequel, we assume the analog-only beamforming for each user, while the analysis can be extended when digital beamforming is applied to further reduce the interference. Let $x_{k}$ , $k = \\{1,2\\}$ denote the transmitted symbol by the BS to user $k$ with power $P_{k}$ , and $\\mathbf{v}_{\\mathrm{near}}$ and $\\mathbf{v}_{\\mathrm{far}}$ represent the transmit beamformers for the NU and FU, respectively. Then, the received signal at the NU is given by",
|
| 379 |
+
"bbox": [
|
| 380 |
+
501,
|
| 381 |
+
140,
|
| 382 |
+
936,
|
| 383 |
+
292
|
| 384 |
+
],
|
| 385 |
+
"page_idx": 1
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"type": "equation",
|
| 389 |
+
"text": "\n$$\n\\begin{array}{l} y _ {\\mathrm {n e a r}} = \\mathbf {h} _ {\\mathrm {n e a r}} ^ {H} \\mathbf {v} _ {\\mathrm {n e a r}} x _ {\\mathrm {n e a r}} + \\mathbf {h} _ {\\mathrm {n e a r}} ^ {H} \\mathbf {v} _ {\\mathrm {f a r}} x _ {\\mathrm {f a r}} + z _ {0} \\\\ = \\sqrt {N} h _ {\\mathrm {n e a r}} \\mathbf {b} ^ {H} (\\theta , r) \\bigl (\\mathbf {v} _ {\\mathrm {n e a r}} x _ {\\mathrm {n e a r}} + \\mathbf {v} _ {\\mathrm {f a r}} x _ {\\mathrm {f a r}} \\bigr) + z _ {0}, (5) \\\\ \\end{array}\n$$\n",
|
| 390 |
+
"text_format": "latex",
|
| 391 |
+
"bbox": [
|
| 392 |
+
521,
|
| 393 |
+
297,
|
| 394 |
+
936,
|
| 395 |
+
338
|
| 396 |
+
],
|
| 397 |
+
"page_idx": 1
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"type": "text",
|
| 401 |
+
"text": "where $z_0$ is the received additive white Gaussian noise (AWGN) at the NU with power $\\sigma^2$ .",
|
| 402 |
+
"bbox": [
|
| 403 |
+
503,
|
| 404 |
+
344,
|
| 405 |
+
934,
|
| 406 |
+
375
|
| 407 |
+
],
|
| 408 |
+
"page_idx": 1
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"type": "text",
|
| 412 |
+
"text": "As such, the receive signal-to-interference-plus-noise ratio (SINR) at the NU is given by",
|
| 413 |
+
"bbox": [
|
| 414 |
+
503,
|
| 415 |
+
375,
|
| 416 |
+
936,
|
| 417 |
+
404
|
| 418 |
+
],
|
| 419 |
+
"page_idx": 1
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"type": "equation",
|
| 423 |
+
"text": "\n$$\n\\operatorname {S I N R} _ {\\text {n e a r}} = \\frac {P _ {\\text {n e a r}} \\left| \\sqrt {N} h _ {\\text {n e a r}} \\mathbf {b} ^ {H} (\\theta , r) \\mathbf {v} _ {\\text {n e a r}} \\right| ^ {2}}{P _ {\\text {f a r}} \\left| \\sqrt {N} h _ {\\text {n e a r}} \\mathbf {b} ^ {H} (\\theta , r) \\mathbf {v} _ {\\text {f a r}} \\right| ^ {2} + \\sigma^ {2}}. \\tag {6}\n$$\n",
|
| 424 |
+
"text_format": "latex",
|
| 425 |
+
"bbox": [
|
| 426 |
+
550,
|
| 427 |
+
411,
|
| 428 |
+
934,
|
| 429 |
+
467
|
| 430 |
+
],
|
| 431 |
+
"page_idx": 1
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"type": "text",
|
| 435 |
+
"text": "For ease of implementation and maximizing the received signal power at the intended user, the beamformers for these two users are respectively designed as $\\mathbf{v}_{\\mathrm{near}} = \\mathbf{b}(\\theta ,r)$ and $\\mathbf{v}_{\\mathrm{far}} = \\mathbf{a}(\\psi)$ . Hence, the achievable rate of the NU in bits per second per Hertz (bps/Hz) is given by",
|
| 436 |
+
"bbox": [
|
| 437 |
+
501,
|
| 438 |
+
470,
|
| 439 |
+
936,
|
| 440 |
+
546
|
| 441 |
+
],
|
| 442 |
+
"page_idx": 1
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"type": "equation",
|
| 446 |
+
"text": "\n$$\n\\begin{array}{l} R _ {\\text {n e a r}} = \\log_ {2} (1 + \\mathrm {S I N R} _ {\\text {n e a r}}) \\\\ = \\log_ {2} \\left(1 + \\frac {P _ {\\text {n e a r}} g _ {\\text {n e a r}}}{P _ {\\text {f a r}} g _ {\\text {n e a r}} f ^ {2} (N , \\psi , \\theta , r) + \\sigma^ {2}}\\right), \\tag {7} \\\\ \\end{array}\n$$\n",
|
| 447 |
+
"text_format": "latex",
|
| 448 |
+
"bbox": [
|
| 449 |
+
545,
|
| 450 |
+
555,
|
| 451 |
+
936,
|
| 452 |
+
606
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 1
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "text",
|
| 458 |
+
"text": "where $g_{\\mathrm{near}} = N\\beta /r^2$ and $f(N,\\psi ,\\theta ,r) = \\left|\\mathbf{b}^{H}(\\theta ,r)\\mathbf{a}(\\psi)\\right|$ is defined as the normalized (mixed-field) interference power caused by the far-field beam to the near-field channel steering vector. In the following, we first characterize useful properties of the normalized interference power and then obtain the achievable rate of the NU.",
|
| 459 |
+
"bbox": [
|
| 460 |
+
503,
|
| 461 |
+
611,
|
| 462 |
+
936,
|
| 463 |
+
702
|
| 464 |
+
],
|
| 465 |
+
"page_idx": 1
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"type": "text",
|
| 469 |
+
"text": "III. NORMALIZED INTERFERENCE POWER ANALYSIS",
|
| 470 |
+
"text_level": 1,
|
| 471 |
+
"bbox": [
|
| 472 |
+
532,
|
| 473 |
+
712,
|
| 474 |
+
906,
|
| 475 |
+
724
|
| 476 |
+
],
|
| 477 |
+
"page_idx": 1
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"type": "text",
|
| 481 |
+
"text": "A. Normalized Interference Power",
|
| 482 |
+
"text_level": 1,
|
| 483 |
+
"bbox": [
|
| 484 |
+
503,
|
| 485 |
+
731,
|
| 486 |
+
741,
|
| 487 |
+
744
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 1
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"text": "First, with the definitions of $\\mathbf{a}(\\psi)$ in (2) and $\\mathbf{b}^H (\\theta ,r)$ in (4), the normalized interference power $f(N,\\psi ,\\theta ,r)$ can be explicitly expressed as",
|
| 494 |
+
"bbox": [
|
| 495 |
+
503,
|
| 496 |
+
750,
|
| 497 |
+
936,
|
| 498 |
+
794
|
| 499 |
+
],
|
| 500 |
+
"page_idx": 1
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "equation",
|
| 504 |
+
"text": "\n$$\n\\begin{array}{l} f (N, \\psi , \\theta , r) \\triangleq \\left| \\mathbf {b} ^ {H} (\\theta , r) \\mathbf {a} (\\psi) \\right| \\\\ \\stackrel {(a _ {1})} {\\approx} \\frac {1}{N} \\left| \\sum_ {\\delta_ {n}} e ^ {j \\frac {2 \\pi}{\\lambda} \\left(- \\delta_ {n} d \\theta + \\frac {\\delta_ {n} ^ {2} d ^ {2} (1 - \\theta^ {2})}{2 r}\\right) + j \\pi \\left(\\left(\\delta_ {n} + \\frac {N - 1}{2}\\right) \\psi\\right)} \\right| \\\\ \\stackrel {(a _ {2})} {=} \\frac {1}{N} \\left| \\sum_ {n = 0} ^ {N - 1} e ^ {j \\pi \\left(\\frac {n ^ {2} d (1 - \\theta^ {2})}{2 r} - n \\left(\\theta - \\psi + \\frac {d (N - 1) (1 - \\theta^ {2})}{2 r}\\right)\\right)} \\right|, \\tag {8} \\\\ \\end{array}\n$$\n",
|
| 505 |
+
"text_format": "latex",
|
| 506 |
+
"bbox": [
|
| 507 |
+
524,
|
| 508 |
+
801,
|
| 509 |
+
936,
|
| 510 |
+
914
|
| 511 |
+
],
|
| 512 |
+
"page_idx": 1
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"type": "text",
|
| 516 |
+
"text": "where $(a_{1})$ follows from the taylor expansion and Fresnel approximation with $r^{(n)} = \\sqrt{r^2 + \\delta_n^2d^2 - 2r\\theta\\delta_nd} \\approx r -$",
|
| 517 |
+
"bbox": [
|
| 518 |
+
503,
|
| 519 |
+
919,
|
| 520 |
+
936,
|
| 521 |
+
950
|
| 522 |
+
],
|
| 523 |
+
"page_idx": 1
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"type": "page_number",
|
| 527 |
+
"text": "2",
|
| 528 |
+
"bbox": [
|
| 529 |
+
926,
|
| 530 |
+
17,
|
| 531 |
+
936,
|
| 532 |
+
26
|
| 533 |
+
],
|
| 534 |
+
"page_idx": 1
|
| 535 |
+
},
|
| 536 |
+
{
|
| 537 |
+
"type": "image",
|
| 538 |
+
"img_path": "images/40f44f9e0ba5115502f23c15127cdc34e20a562c394a296a622bf02bc67c09e1.jpg",
|
| 539 |
+
"image_caption": [
|
| 540 |
+
"(a) 3D illustration"
|
| 541 |
+
],
|
| 542 |
+
"image_footnote": [],
|
| 543 |
+
"bbox": [
|
| 544 |
+
58,
|
| 545 |
+
61,
|
| 546 |
+
269,
|
| 547 |
+
179
|
| 548 |
+
],
|
| 549 |
+
"page_idx": 2
|
| 550 |
+
},
|
| 551 |
+
{
|
| 552 |
+
"type": "image",
|
| 553 |
+
"img_path": "images/dc647a7425ff25a18f369d12e6dc0c489b0b53771a370a379eff844a10be6ab8.jpg",
|
| 554 |
+
"image_caption": [
|
| 555 |
+
"(b) 2D illustration",
|
| 556 |
+
"Fig. 3: Illustrations of function $G(\\beta_{1}, \\beta_{2})$ in (9) versus $\\beta_{1}$ and $\\beta_{2}$ ."
|
| 557 |
+
],
|
| 558 |
+
"image_footnote": [],
|
| 559 |
+
"bbox": [
|
| 560 |
+
274,
|
| 561 |
+
61,
|
| 562 |
+
475,
|
| 563 |
+
183
|
| 564 |
+
],
|
| 565 |
+
"page_idx": 2
|
| 566 |
+
},
|
| 567 |
+
{
|
| 568 |
+
"type": "text",
|
| 569 |
+
"text": "$\\delta_{n}d\\theta + \\frac{\\delta_{n}^{2}d^{2}(1 - \\theta^{2})}{2r}$ , and $(a_{2})$ is obtained by replacing $\\delta_{n}$ with $n$ , i.e., $\\delta_{n} = \\frac{2n - N + 1}{2}$ . Note that $(a_{1})$ is accurate when the BS-NU/scatter distance is larger than $0.5\\sqrt{D^3 / \\lambda}$ , which is much smaller than the Rayleigh distance $2D^{2} / \\lambda$ .",
|
| 570 |
+
"bbox": [
|
| 571 |
+
57,
|
| 572 |
+
236,
|
| 573 |
+
490,
|
| 574 |
+
301
|
| 575 |
+
],
|
| 576 |
+
"page_idx": 2
|
| 577 |
+
},
|
| 578 |
+
{
|
| 579 |
+
"type": "text",
|
| 580 |
+
"text": "However, the normalized interference power in (8) is still in a complicated form, thus making it hard to characterize the properties of normalized interference power. To tackle this issue, similar to [11], we approximate the normalized interference power in a more tractable form by using Fresnel functions.",
|
| 581 |
+
"bbox": [
|
| 582 |
+
57,
|
| 583 |
+
301,
|
| 584 |
+
491,
|
| 585 |
+
378
|
| 586 |
+
],
|
| 587 |
+
"page_idx": 2
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"type": "text",
|
| 591 |
+
"text": "Theorem 1. The normalized interference power $f(N, \\psi, \\theta, r)$ in (8) can be approximated as",
|
| 592 |
+
"bbox": [
|
| 593 |
+
57,
|
| 594 |
+
385,
|
| 595 |
+
491,
|
| 596 |
+
415
|
| 597 |
+
],
|
| 598 |
+
"page_idx": 2
|
| 599 |
+
},
|
| 600 |
+
{
|
| 601 |
+
"type": "equation",
|
| 602 |
+
"text": "\n$$\nf (N, \\psi , \\theta , r) \\approx G (\\beta_ {1}, \\beta_ {2}) = \\left| \\frac {\\hat {C} (\\beta_ {1} , \\beta_ {2}) + j \\hat {S} (\\beta_ {1} , \\beta_ {2})}{2 \\beta_ {2}} \\right|, \\tag {9}\n$$\n",
|
| 603 |
+
"text_format": "latex",
|
| 604 |
+
"bbox": [
|
| 605 |
+
75,
|
| 606 |
+
420,
|
| 607 |
+
490,
|
| 608 |
+
460
|
| 609 |
+
],
|
| 610 |
+
"page_idx": 2
|
| 611 |
+
},
|
| 612 |
+
{
|
| 613 |
+
"type": "text",
|
| 614 |
+
"text": "where",
|
| 615 |
+
"bbox": [
|
| 616 |
+
58,
|
| 617 |
+
465,
|
| 618 |
+
102,
|
| 619 |
+
477
|
| 620 |
+
],
|
| 621 |
+
"page_idx": 2
|
| 622 |
+
},
|
| 623 |
+
{
|
| 624 |
+
"type": "equation",
|
| 625 |
+
"text": "\n$$\n\\beta_ {1} = (\\theta - \\psi) \\sqrt {\\frac {r}{d \\left(1 - \\theta^ {2}\\right)}}, \\tag {10}\n$$\n",
|
| 626 |
+
"text_format": "latex",
|
| 627 |
+
"bbox": [
|
| 628 |
+
181,
|
| 629 |
+
474,
|
| 630 |
+
488,
|
| 631 |
+
507
|
| 632 |
+
],
|
| 633 |
+
"page_idx": 2
|
| 634 |
+
},
|
| 635 |
+
{
|
| 636 |
+
"type": "equation",
|
| 637 |
+
"text": "\n$$\n\\beta_ {2} = \\frac {N}{2} \\sqrt {\\frac {d \\left(1 - \\theta^ {2}\\right)}{r}}. \\tag {11}\n$$\n",
|
| 638 |
+
"text_format": "latex",
|
| 639 |
+
"bbox": [
|
| 640 |
+
199,
|
| 641 |
+
513,
|
| 642 |
+
488,
|
| 643 |
+
546
|
| 644 |
+
],
|
| 645 |
+
"page_idx": 2
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"text": "Moreover, $\\hat{C} (\\beta_{1},\\beta_{2})\\triangleq C(\\beta_{1} + \\beta_{2}) - C(\\beta_{1} - \\beta_{2})$ and $\\hat{S} (\\beta_1,\\beta_2)\\triangleq$ $S(\\beta_{1} + \\beta_{2}) - S(\\beta_{1} - \\beta_{2})$ , where $C(\\cdot)$ and $S(\\cdot)$ are the Fresnel integrals, defined as $C(x) = \\int_0^x\\cos (\\frac{\\pi}{2} t^2)\\mathrm{d}t$ and $S(x) =$ $\\int_0^x\\sin (\\frac{\\pi}{2} t^2)\\mathrm{d}t.$",
|
| 650 |
+
"bbox": [
|
| 651 |
+
58,
|
| 652 |
+
551,
|
| 653 |
+
490,
|
| 654 |
+
616
|
| 655 |
+
],
|
| 656 |
+
"page_idx": 2
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"text": "Proof: First, from (8), we have",
|
| 661 |
+
"bbox": [
|
| 662 |
+
58,
|
| 663 |
+
622,
|
| 664 |
+
277,
|
| 665 |
+
636
|
| 666 |
+
],
|
| 667 |
+
"page_idx": 2
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "equation",
|
| 671 |
+
"text": "\n$$\nf (N, \\psi , \\theta , r) = \\left| \\frac {1}{N} \\sum_ {n = 0} ^ {N - 1} e ^ {j \\pi \\left(A _ {1} n - A _ {2}\\right) ^ {2}} \\right| = | F (A _ {1}, A _ {2}) |,\n$$\n",
|
| 672 |
+
"text_format": "latex",
|
| 673 |
+
"bbox": [
|
| 674 |
+
86,
|
| 675 |
+
642,
|
| 676 |
+
460,
|
| 677 |
+
683
|
| 678 |
+
],
|
| 679 |
+
"page_idx": 2
|
| 680 |
+
},
|
| 681 |
+
{
|
| 682 |
+
"type": "text",
|
| 683 |
+
"text": "where $A_{1} = \\sqrt{\\frac{d(1 - \\theta^{2})}{2r}}$ and $A_{2} = \\frac{1}{A_{1}}\\left(\\frac{\\theta - \\psi}{2} +\\frac{(N - 1)d(1 - \\theta^{2})}{4r}\\right)$ . Then, the summation in $F(A_{1},A_{2})$ can be approximated as an integral,",
|
| 684 |
+
"bbox": [
|
| 685 |
+
57,
|
| 686 |
+
689,
|
| 687 |
+
488,
|
| 688 |
+
743
|
| 689 |
+
],
|
| 690 |
+
"page_idx": 2
|
| 691 |
+
},
|
| 692 |
+
{
|
| 693 |
+
"type": "equation",
|
| 694 |
+
"text": "\n$$\n\\begin{array}{l} F \\left(A _ {1}, A _ {2}\\right) \\stackrel {(b _ {1})} {\\approx} \\frac {1}{N} \\int_ {0} ^ {N} e ^ {j \\pi \\left(A _ {1} n - A _ {2}\\right) ^ {2}} d n \\\\ \\stackrel {(b _ {2})} {=} \\frac {1}{N \\sqrt {2} A _ {1}} \\int_ {- \\sqrt {2} A _ {2}} ^ {\\sqrt {2} A _ {1} N - \\sqrt {2} A _ {2}} e ^ {j \\pi \\frac {1}{2} t ^ {2}} \\mathrm {d} t, \\tag {12} \\\\ \\end{array}\n$$\n",
|
| 695 |
+
"text_format": "latex",
|
| 696 |
+
"bbox": [
|
| 697 |
+
89,
|
| 698 |
+
747,
|
| 699 |
+
488,
|
| 700 |
+
821
|
| 701 |
+
],
|
| 702 |
+
"page_idx": 2
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"type": "text",
|
| 706 |
+
"text": "where $(b_{1})$ is accurate when $N\\to \\infty$ according to the Riemann integral, and $(b_{2})$ is obtained by letting $A_{1}n - A_{2} = \\frac{\\sqrt{2}}{2} t$",
|
| 707 |
+
"bbox": [
|
| 708 |
+
57,
|
| 709 |
+
825,
|
| 710 |
+
488,
|
| 711 |
+
858
|
| 712 |
+
],
|
| 713 |
+
"page_idx": 2
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "text",
|
| 717 |
+
"text": "Next, based on the Fresnel integrals, we have",
|
| 718 |
+
"bbox": [
|
| 719 |
+
73,
|
| 720 |
+
857,
|
| 721 |
+
385,
|
| 722 |
+
871
|
| 723 |
+
],
|
| 724 |
+
"page_idx": 2
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "equation",
|
| 728 |
+
"text": "\n$$\n\\begin{array}{l} F \\left(A _ {1}, A _ {2}\\right) = \\frac {1}{N \\sqrt {2} A _ {1}} \\int_ {- \\sqrt {2} A _ {2}} ^ {\\sqrt {2} A _ {1} N - \\sqrt {2} A _ {2}} e ^ {j \\pi \\frac {1}{2} t ^ {2}} d t \\\\ = \\frac {\\int_ {0} ^ {\\sqrt {2} A _ {1} N - \\sqrt {2} A _ {2}} e ^ {j \\pi \\frac {1}{2} t ^ {2}} d t - \\int_ {0} ^ {- \\sqrt {2} A _ {2}} e ^ {j \\pi \\frac {1}{2} t ^ {2}} d t}{\\sqrt {2} A _ {1} N} \\\\ \\end{array}\n$$\n",
|
| 729 |
+
"text_format": "latex",
|
| 730 |
+
"bbox": [
|
| 731 |
+
60,
|
| 732 |
+
876,
|
| 733 |
+
388,
|
| 734 |
+
955
|
| 735 |
+
],
|
| 736 |
+
"page_idx": 2
|
| 737 |
+
},
|
| 738 |
+
{
|
| 739 |
+
"type": "image",
|
| 740 |
+
"img_path": "images/8dd1ff3366182b64911dcfb8dcef3db658da5ecff565ff28b8abc11fece62627.jpg",
|
| 741 |
+
"image_caption": [
|
| 742 |
+
"(a) Accuracy of approximation in (9). (b) Effect of number of BS antennas.",
|
| 743 |
+
"Fig. 4: Normalized interference power vs. number of BS antennas."
|
| 744 |
+
],
|
| 745 |
+
"image_footnote": [],
|
| 746 |
+
"bbox": [
|
| 747 |
+
506,
|
| 748 |
+
54,
|
| 749 |
+
717,
|
| 750 |
+
186
|
| 751 |
+
],
|
| 752 |
+
"page_idx": 2
|
| 753 |
+
},
|
| 754 |
+
{
|
| 755 |
+
"type": "image",
|
| 756 |
+
"img_path": "images/93454f83eb37d857453f38d9ae025517c04f7ef53b1c843a3134360004866782.jpg",
|
| 757 |
+
"image_caption": [],
|
| 758 |
+
"image_footnote": [],
|
| 759 |
+
"bbox": [
|
| 760 |
+
718,
|
| 761 |
+
55,
|
| 762 |
+
919,
|
| 763 |
+
185
|
| 764 |
+
],
|
| 765 |
+
"page_idx": 2
|
| 766 |
+
},
|
| 767 |
+
{
|
| 768 |
+
"type": "equation",
|
| 769 |
+
"text": "\n$$\n= \\frac {C \\left(\\beta_ {1} + \\beta_ {2}\\right) - C \\left(\\beta_ {1} - \\beta_ {2}\\right) + j \\left(S \\left(\\beta_ {1} + \\beta_ {2}\\right) - S \\left(\\beta_ {1} - \\beta_ {2}\\right)\\right)}{2 \\beta_ {2}},\n$$\n",
|
| 770 |
+
"text_format": "latex",
|
| 771 |
+
"bbox": [
|
| 772 |
+
511,
|
| 773 |
+
239,
|
| 774 |
+
933,
|
| 775 |
+
272
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 2
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "text",
|
| 781 |
+
"text": "where $\\beta_{1} = (\\theta -\\psi)\\sqrt{\\frac{r}{d(1 - \\theta^{2})}}$ and $\\beta_{2} = \\frac{N}{2}\\sqrt{\\frac{d(1 - \\theta^2)}{r}}$",
|
| 782 |
+
"bbox": [
|
| 783 |
+
504,
|
| 784 |
+
279,
|
| 785 |
+
870,
|
| 786 |
+
305
|
| 787 |
+
],
|
| 788 |
+
"page_idx": 2
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "text",
|
| 792 |
+
"text": "By defining $G(\\beta_1, \\beta_2) = |F(A_1, A_2)|$ and combining the above leads to the desired result in (9).",
|
| 793 |
+
"bbox": [
|
| 794 |
+
503,
|
| 795 |
+
304,
|
| 796 |
+
936,
|
| 797 |
+
334
|
| 798 |
+
],
|
| 799 |
+
"page_idx": 2
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "text",
|
| 803 |
+
"text": "Remark 1 (Useful properties of function $G(\\cdot)$ ). To draw useful insights, we first illustrate in Fig. 3 that function $G(\\beta_{1},\\beta_{2})$ versus $\\beta_{1}$ and $\\beta_{2}$ in both 2D and 3D. Several important properties of the function $G(\\cdot)$ are summarized as follows.",
|
| 804 |
+
"bbox": [
|
| 805 |
+
503,
|
| 806 |
+
338,
|
| 807 |
+
937,
|
| 808 |
+
398
|
| 809 |
+
],
|
| 810 |
+
"page_idx": 2
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "list",
|
| 814 |
+
"sub_type": "text",
|
| 815 |
+
"list_items": [
|
| 816 |
+
"- First, $G(\\cdot)$ is symmetric with respect to $\\beta_{1}$ . With $\\beta_{1}$ defined in (10), this symmetry indicates that the normalized interference power keeps unchanged when the absolute values of the near-and-far user angle-difference $\\theta - \\psi$ are the same. Moreover, it is worth mentioning that $G(\\cdot)$ is almost invariable with $\\beta_{1}$ when $\\beta_{2}$ is sufficiently small.",
|
| 817 |
+
"- Second, given $\\beta_{1}$ , $G(\\cdot)$ generally decreases with $\\beta_{2}$ , while there exist large and small fluctuations in $G(\\cdot)$ with growing $\\beta_{2}$ , when $|\\beta_{1}|$ is relatively large (e.g., $|\\beta_{1}| > 0.6$ ) and small (e.g., $|\\beta_{1}| \\leq 0.6$ ).",
|
| 818 |
+
"- Moreover, it is worth noting that the derived closed-form expression for $G(\\cdot)$ in (9) is a generalization of the column coherence between two near-field steering vectors defined in [12] by setting $\\beta_{1} = 0$ , i.e., both near- and far-field steering vectors are at the same angle $(\\theta = \\psi)$ ."
|
| 819 |
+
],
|
| 820 |
+
"bbox": [
|
| 821 |
+
519,
|
| 822 |
+
401,
|
| 823 |
+
936,
|
| 824 |
+
628
|
| 825 |
+
],
|
| 826 |
+
"page_idx": 2
|
| 827 |
+
},
|
| 828 |
+
{
|
| 829 |
+
"type": "text",
|
| 830 |
+
"text": "Remark 2 (What determines the normalized interference power?). Theorem 1 shows an important result that the normalized interference power is fundamentally determined by the function $G(\\cdot)$ , as well as the two parameters $\\beta_{1}$ and $\\beta_{2}$ . To be more specific, $\\beta_{1}$ is a function of the FU (spatial) angle, the NU angle and distance, while $\\beta_{2}$ is jointly determined by the number of BS antennas, as well as the NU angle and distance. These factors will be studied in more details in the next.",
|
| 831 |
+
"bbox": [
|
| 832 |
+
501,
|
| 833 |
+
633,
|
| 834 |
+
937,
|
| 835 |
+
753
|
| 836 |
+
],
|
| 837 |
+
"page_idx": 2
|
| 838 |
+
},
|
| 839 |
+
{
|
| 840 |
+
"type": "text",
|
| 841 |
+
"text": "B. Effects of Key Parameters",
|
| 842 |
+
"text_level": 1,
|
| 843 |
+
"bbox": [
|
| 844 |
+
504,
|
| 845 |
+
763,
|
| 846 |
+
705,
|
| 847 |
+
779
|
| 848 |
+
],
|
| 849 |
+
"page_idx": 2
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"type": "text",
|
| 853 |
+
"text": "1) Effect of the Number of BS Antennas: To begin with, we first plot Fig. 4(a) to compare the obtained approximated normalized interference power in (9) and its actual value in (8). It is observed that the approximation in (9) is accurate under different numbers of BS antennas and NU distances. Next, given the fixed FU angle, NU angle and distance, $\\beta_{2}$ is affected by the number of BS antennas $N$ only, while $\\beta_{1}$ becomes a constant. Moreover, the BS antenna size affects the near-field region, since the Rayleigh distance, $Z = \\frac{1}{2} N^2\\lambda$ , is quadratically increasing with the number of BS antennas. To characterize the effect of $N$ , we plot in Fig. 4(b) the normalized",
|
| 854 |
+
"bbox": [
|
| 855 |
+
501,
|
| 856 |
+
784,
|
| 857 |
+
937,
|
| 858 |
+
950
|
| 859 |
+
],
|
| 860 |
+
"page_idx": 2
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"type": "page_number",
|
| 864 |
+
"text": "3",
|
| 865 |
+
"bbox": [
|
| 866 |
+
926,
|
| 867 |
+
17,
|
| 868 |
+
936,
|
| 869 |
+
25
|
| 870 |
+
],
|
| 871 |
+
"page_idx": 2
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"type": "image",
|
| 875 |
+
"img_path": "images/a385f6ac0564e1023149ffb968c54cb19caecb88fe6645445ef4259cf8f3bb28.jpg",
|
| 876 |
+
"image_caption": [
|
| 877 |
+
"(a) Effect of FU angle."
|
| 878 |
+
],
|
| 879 |
+
"image_footnote": [],
|
| 880 |
+
"bbox": [
|
| 881 |
+
66,
|
| 882 |
+
54,
|
| 883 |
+
277,
|
| 884 |
+
191
|
| 885 |
+
],
|
| 886 |
+
"page_idx": 3
|
| 887 |
+
},
|
| 888 |
+
{
|
| 889 |
+
"type": "image",
|
| 890 |
+
"img_path": "images/94be6983a335e9d9adfa8c865bdde210a2b6909f6d7c58891f7da629cc6f1328.jpg",
|
| 891 |
+
"image_caption": [
|
| 892 |
+
"(b) Effect of angle-difference."
|
| 893 |
+
],
|
| 894 |
+
"image_footnote": [],
|
| 895 |
+
"bbox": [
|
| 896 |
+
279,
|
| 897 |
+
55,
|
| 898 |
+
500,
|
| 899 |
+
191
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 3
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "image",
|
| 905 |
+
"img_path": "images/4f31078d4f534d5c24f7d96877e5d8a1fe33e7dc46a99ecd4101fa3547f5cbec.jpg",
|
| 906 |
+
"image_caption": [
|
| 907 |
+
"(c) Effect of NU angle.",
|
| 908 |
+
"Fig. 5: Normalized interference power versus key parameters, where the normalized interference power is obtained based on (9)."
|
| 909 |
+
],
|
| 910 |
+
"image_footnote": [],
|
| 911 |
+
"bbox": [
|
| 912 |
+
491,
|
| 913 |
+
55,
|
| 914 |
+
704,
|
| 915 |
+
191
|
| 916 |
+
],
|
| 917 |
+
"page_idx": 3
|
| 918 |
+
},
|
| 919 |
+
{
|
| 920 |
+
"type": "image",
|
| 921 |
+
"img_path": "images/f5507fb49c18b5d96398fe4292076ec26708385ad7254e9330f9f7b09e68504e.jpg",
|
| 922 |
+
"image_caption": [
|
| 923 |
+
"(d) Effect of NU distance."
|
| 924 |
+
],
|
| 925 |
+
"image_footnote": [],
|
| 926 |
+
"bbox": [
|
| 927 |
+
707,
|
| 928 |
+
55,
|
| 929 |
+
919,
|
| 930 |
+
191
|
| 931 |
+
],
|
| 932 |
+
"page_idx": 3
|
| 933 |
+
},
|
| 934 |
+
{
|
| 935 |
+
"type": "text",
|
| 936 |
+
"text": "interference power versus the number of BS antennas. First, it is observed that the normalized interference power is first increasing and then decreasing with $N$ . The reason is that, when $N$ is small, increasing $N$ leads to a wider interference region in the angular domain (see Fig. 1); hence, increasing $N$ renders the NU more likely reside in the interference region and thus growing interference power. Nevertheless, when $N$ is sufficiently large, the NU (in the interference region) suffers decreasing interference power with an increasing $N$ due to the wider interference region. Second, when $N$ is relatively small (i.e., $\\beta_{2}$ is small), a larger angle-difference (i.e., larger $\\beta_{1}$ ) leads to a smaller interference, which is consistent with the second property in Remark 1. By contrast, in the large- $N$ regime (e.g., $N > 400$ ), the interference powers for different angle differences are comparable. This is expected since the interference region becomes wider with a larger $N$ and will occupy almost the whole spatial region when $N$ is sufficiently large, for which different angle-differences will suffer comparable interference power.",
|
| 937 |
+
"bbox": [
|
| 938 |
+
60,
|
| 939 |
+
241,
|
| 940 |
+
491,
|
| 941 |
+
513
|
| 942 |
+
],
|
| 943 |
+
"page_idx": 3
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"type": "text",
|
| 947 |
+
"text": "2) Effect of the FU Angle: Next, given the fixed number of BS antennas, the NU angle and distance, we characterize the effect of FU angle. In this case, $\\beta_{1}$ is determined by the FU angle $\\psi$ , while $\\beta_{2}$ is a constant. To be more specific, we plot in Fig. 5(a) the normalized interference power versus the FU angle. An interesting observation is that when the FU beam angle locates in the neighborhood of the NU angle, there is always strong interference power at the NU. Moreover, the interference power is symmetric with respect to the NU angle, which is in accordance with the first property in Remark 1.",
|
| 948 |
+
"bbox": [
|
| 949 |
+
57,
|
| 950 |
+
520,
|
| 951 |
+
491,
|
| 952 |
+
671
|
| 953 |
+
],
|
| 954 |
+
"page_idx": 3
|
| 955 |
+
},
|
| 956 |
+
{
|
| 957 |
+
"type": "text",
|
| 958 |
+
"text": "Given the NU angle, the FU angle also determines the near- and far user angle-difference, whose effects are discussed below.",
|
| 959 |
+
"bbox": [
|
| 960 |
+
58,
|
| 961 |
+
675,
|
| 962 |
+
488,
|
| 963 |
+
705
|
| 964 |
+
],
|
| 965 |
+
"page_idx": 3
|
| 966 |
+
},
|
| 967 |
+
{
|
| 968 |
+
"type": "text",
|
| 969 |
+
"text": "Remark 3 (Near-and-far user angle-difference). In Fig. 5(b), we show the effect of the angle-difference on the normalized interference power. First, it is observed that, when the angle-difference is sufficiently small, e.g., $\\theta -\\psi < 0.01$ , a longer NU distance (e.g., $r = 30~\\mathrm{m}$ ) yields a higher interference power. This is intuitively expected since when the NU and FU are very close, the interference power will increase with the NU distance, and the maximum interference power is obtained when the NU distance exceeds the Rayleigh distance (i.e., far-field region). Second, as the angle-difference increases, the interference power for different angle-differences generally decreases, and eventually appears very close when the angle-difference exceeds a threshold (e.g., $\\theta -\\psi >0.35$ ). Besides, the interference power with a longer NU distance decreases faster. This reason is that a longer NU distance leads to a narrower interference region.",
|
| 970 |
+
"bbox": [
|
| 971 |
+
60,
|
| 972 |
+
723,
|
| 973 |
+
491,
|
| 974 |
+
950
|
| 975 |
+
],
|
| 976 |
+
"page_idx": 3
|
| 977 |
+
},
|
| 978 |
+
{
|
| 979 |
+
"type": "text",
|
| 980 |
+
"text": "3) Effect of the NU Angle: Note that both $\\beta_{1}$ and $\\beta_{2}$ are functions of NU angle $\\theta$ with the fixed number of BS antennas, FU angle, and NU distance, thus making it difficult to obtain clear insights with respect to the effect of the NU angle. To this end, we plot in Fig. 5(c) the normalized interference power versus the NU angle $\\theta$ . It is observed that the effect of NU angle is very similar to that of the FU angle, whereas in the large- $\\theta$ regime, the interference power fluctuates more drastically.",
|
| 981 |
+
"bbox": [
|
| 982 |
+
501,
|
| 983 |
+
241,
|
| 984 |
+
937,
|
| 985 |
+
359
|
| 986 |
+
],
|
| 987 |
+
"page_idx": 3
|
| 988 |
+
},
|
| 989 |
+
{
|
| 990 |
+
"type": "text",
|
| 991 |
+
"text": "4) Effect of the NU Distance: Last, we study the effect of the NU distance $r$ on the normalized interference power, given the fixed number of BS antennas, NU and FU angles. Based on Theorem 1, it can be easily verified that $\\beta_{1}$ monotonically increases with $r$ , while $\\beta_{2}$ decreases with $r$ . Although it is difficult to analytically characterize the effect of NU distance, we provide the numerical result in Fig. 5(d) for illustration. Specifically, similar to Fig. 4(b), for the case with a small angle-difference (e.g., $\\theta - \\psi = 0.1, 0.15$ and 0.2), the normalized interference power first slightly increases and then drastically decreases with the NU distance. In contrast, when the angle-difference is sufficiently small (e.g., $\\theta - \\psi = 0.005$ ), the normalized interference power first increases and then saturates when $r$ is sufficiently large, which is in accordance with Remark 3. Moreover, it is observed that, with a very small angle-difference (e.g., $\\theta - \\psi = 0.005$ ), the NU suffers the strongest interference power when it is sufficiently far from the BS, for which case it reduces to a FU.",
|
| 992 |
+
"bbox": [
|
| 993 |
+
501,
|
| 994 |
+
361,
|
| 995 |
+
937,
|
| 996 |
+
632
|
| 997 |
+
],
|
| 998 |
+
"page_idx": 3
|
| 999 |
+
},
|
| 1000 |
+
{
|
| 1001 |
+
"type": "text",
|
| 1002 |
+
"text": "IV. RATE LOSS ANALYSIS",
|
| 1003 |
+
"text_level": 1,
|
| 1004 |
+
"bbox": [
|
| 1005 |
+
625,
|
| 1006 |
+
633,
|
| 1007 |
+
815,
|
| 1008 |
+
647
|
| 1009 |
+
],
|
| 1010 |
+
"page_idx": 3
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"type": "text",
|
| 1014 |
+
"text": "In this section, we study the effect of normalized interference power on the achievable rate of the NU. For notational brevity, we use $f$ to denote $f(N,\\psi ,\\theta ,r)$ in the sequel.",
|
| 1015 |
+
"bbox": [
|
| 1016 |
+
503,
|
| 1017 |
+
652,
|
| 1018 |
+
937,
|
| 1019 |
+
698
|
| 1020 |
+
],
|
| 1021 |
+
"page_idx": 3
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"type": "text",
|
| 1025 |
+
"text": "A. Rate Loss",
|
| 1026 |
+
"text_level": 1,
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
503,
|
| 1029 |
+
700,
|
| 1030 |
+
596,
|
| 1031 |
+
714
|
| 1032 |
+
],
|
| 1033 |
+
"page_idx": 3
|
| 1034 |
+
},
|
| 1035 |
+
{
|
| 1036 |
+
"type": "text",
|
| 1037 |
+
"text": "To characterize the rate loss due to the FU interference, we first provide the ideal achievable rate of the NU with no interference, which is given by",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
501,
|
| 1040 |
+
719,
|
| 1041 |
+
937,
|
| 1042 |
+
765
|
| 1043 |
+
],
|
| 1044 |
+
"page_idx": 3
|
| 1045 |
+
},
|
| 1046 |
+
{
|
| 1047 |
+
"type": "equation",
|
| 1048 |
+
"text": "\n$$\nR _ {\\text {n e a r}} ^ {*} = \\log_ {2} \\left(1 + \\frac {P _ {\\text {n e a r}} g _ {\\text {n e a r}}}{\\sigma^ {2}}\\right), \\tag {13}\n$$\n",
|
| 1049 |
+
"text_format": "latex",
|
| 1050 |
+
"bbox": [
|
| 1051 |
+
607,
|
| 1052 |
+
770,
|
| 1053 |
+
936,
|
| 1054 |
+
803
|
| 1055 |
+
],
|
| 1056 |
+
"page_idx": 3
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "text",
|
| 1060 |
+
"text": "Then, we define $\\Delta_R = R_{\\mathrm{near}}^* - R_{\\mathrm{near}}$ as the rate loss caused by the FU interference, which is upper-bounded as follows.",
|
| 1061 |
+
"bbox": [
|
| 1062 |
+
503,
|
| 1063 |
+
803,
|
| 1064 |
+
937,
|
| 1065 |
+
834
|
| 1066 |
+
],
|
| 1067 |
+
"page_idx": 3
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "text",
|
| 1071 |
+
"text": "Lemma 1. The rate performance loss $\\Delta_R$ can be upper-bounded as",
|
| 1072 |
+
"bbox": [
|
| 1073 |
+
503,
|
| 1074 |
+
840,
|
| 1075 |
+
937,
|
| 1076 |
+
869
|
| 1077 |
+
],
|
| 1078 |
+
"page_idx": 3
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "equation",
|
| 1082 |
+
"text": "\n$$\n\\Delta_ {R} \\leq \\log_ {2} \\left(1 + \\frac {P _ {\\text {n e a r}} g _ {\\text {n e a r}}}{\\sigma^ {2}} \\cdot \\frac {P _ {\\text {f a r}} f ^ {2}}{P _ {\\text {f a r}} f ^ {2} + P _ {\\text {n e a r}}}\\right). \\tag {14}\n$$\n",
|
| 1083 |
+
"text_format": "latex",
|
| 1084 |
+
"bbox": [
|
| 1085 |
+
542,
|
| 1086 |
+
875,
|
| 1087 |
+
936,
|
| 1088 |
+
907
|
| 1089 |
+
],
|
| 1090 |
+
"page_idx": 3
|
| 1091 |
+
},
|
| 1092 |
+
{
|
| 1093 |
+
"type": "text",
|
| 1094 |
+
"text": "Proof: Based on (7) and (14), we have",
|
| 1095 |
+
"bbox": [
|
| 1096 |
+
504,
|
| 1097 |
+
912,
|
| 1098 |
+
777,
|
| 1099 |
+
926
|
| 1100 |
+
],
|
| 1101 |
+
"page_idx": 3
|
| 1102 |
+
},
|
| 1103 |
+
{
|
| 1104 |
+
"type": "equation",
|
| 1105 |
+
"text": "\n$$\n\\Delta_ {R} = R _ {\\mathrm {n e a r}} ^ {*} - R _ {\\mathrm {n e a r}}\n$$\n",
|
| 1106 |
+
"text_format": "latex",
|
| 1107 |
+
"bbox": [
|
| 1108 |
+
519,
|
| 1109 |
+
934,
|
| 1110 |
+
665,
|
| 1111 |
+
950
|
| 1112 |
+
],
|
| 1113 |
+
"page_idx": 3
|
| 1114 |
+
},
|
| 1115 |
+
{
|
| 1116 |
+
"type": "page_number",
|
| 1117 |
+
"text": "4",
|
| 1118 |
+
"bbox": [
|
| 1119 |
+
926,
|
| 1120 |
+
17,
|
| 1121 |
+
937,
|
| 1122 |
+
25
|
| 1123 |
+
],
|
| 1124 |
+
"page_idx": 3
|
| 1125 |
+
},
|
| 1126 |
+
{
|
| 1127 |
+
"type": "table",
|
| 1128 |
+
"img_path": "images/65dd37f4b4c447381f93e67d0f9c4f0c9041119f02ee7decc751df098facde93.jpg",
|
| 1129 |
+
"table_caption": [
|
| 1130 |
+
"Table I: Simulation parameters"
|
| 1131 |
+
],
|
| 1132 |
+
"table_footnote": [],
|
| 1133 |
+
"table_body": "<table><tr><td>Parameter</td><td>Value</td></tr><tr><td>Number of BS antennas</td><td>N = 256</td></tr><tr><td>Carrier frequency</td><td>f = 30 GHz</td></tr><tr><td>Reference path-loss</td><td>β = (λ/4π)2 = -62 dB</td></tr><tr><td>Transmit power for NU</td><td>Pnear = 20 dBm</td></tr><tr><td>Transmit power for FU</td><td>Pfar = 30 dBm</td></tr><tr><td>Noise power</td><td>σ2 = -70 dBm</td></tr><tr><td>Distance from BS to NU</td><td>r = 3 m</td></tr></table>",
|
| 1134 |
+
"bbox": [
|
| 1135 |
+
114,
|
| 1136 |
+
70,
|
| 1137 |
+
431,
|
| 1138 |
+
167
|
| 1139 |
+
],
|
| 1140 |
+
"page_idx": 4
|
| 1141 |
+
},
|
| 1142 |
+
{
|
| 1143 |
+
"type": "equation",
|
| 1144 |
+
"text": "\n$$\n\\begin{array}{l} = \\log_ {2} \\left(1 + \\frac {P _ {\\mathrm {n e a r}} g _ {\\mathrm {n e a r}}}{\\sigma^ {2}}\\right) - \\log_ {2} \\left(1 + \\frac {P _ {\\mathrm {n e a r}} g _ {\\mathrm {n e a r}}}{P _ {\\mathrm {f a r}} g _ {\\mathrm {n e a r}} f ^ {2} + \\sigma^ {2}}\\right) \\\\ = \\log_ {2} \\left(1 + \\frac {P _ {\\text {n e a r}} P _ {\\text {f a r}} g _ {\\text {n e a r}} ^ {2} f ^ {2}}{P _ {\\text {f a r}} g _ {\\text {n e a r}} f ^ {2} \\sigma^ {2} + P _ {\\text {n e a r}} g _ {\\text {n e a r}} \\sigma^ {2} + \\sigma^ {4}}\\right) \\\\ \\stackrel {(c)} {\\leq} \\log_ {2} \\left(1 + \\frac {P _ {\\text {n e a r}} P _ {\\text {f a r}} g _ {\\text {n e a r}} ^ {2} f ^ {2}}{P _ {\\text {f a r}} g _ {\\text {n e a r}} f ^ {2} \\sigma^ {2} + P _ {\\text {n e a r}} g _ {\\text {n e a r}} \\sigma^ {2}}\\right) \\\\ = \\log_ {2} \\left(1 + \\frac {P _ {\\text {n e a r}} g _ {\\text {n e a r}}}{\\sigma^ {2}} \\cdot \\frac {P _ {\\text {f a r}} f ^ {2}}{P _ {\\text {f a r}} f ^ {2} + P _ {\\text {n e a r}}}\\right), \\tag {15} \\\\ \\end{array}\n$$\n",
|
| 1145 |
+
"text_format": "latex",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
80,
|
| 1148 |
+
174,
|
| 1149 |
+
490,
|
| 1150 |
+
311
|
| 1151 |
+
],
|
| 1152 |
+
"page_idx": 4
|
| 1153 |
+
},
|
| 1154 |
+
{
|
| 1155 |
+
"type": "text",
|
| 1156 |
+
"text": "where $(c)$ is obtained by dropping the term $\\sigma^4$ , thus completing the proof.",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
57,
|
| 1159 |
+
316,
|
| 1160 |
+
491,
|
| 1161 |
+
347
|
| 1162 |
+
],
|
| 1163 |
+
"page_idx": 4
|
| 1164 |
+
},
|
| 1165 |
+
{
|
| 1166 |
+
"type": "text",
|
| 1167 |
+
"text": "Lemma 1 is intuitively expected since a higher normalized interference power leads to a larger rate loss $\\Delta_R$ . Combined with the results for the analysis of the interference power, it can be concluded that there is a larger rate loss when the NU and FU angles are very close. However, the analysis for the effects of number of BS antennas and NU distance are not straightforward. For example, when $N$ decreases, it can be shown that $g_{\\mathrm{near}}$ monotonically decreases, while $f^2$ generally increases, leading to a larger $\\frac{P_{\\mathrm{far}} f^2}{P_{\\mathrm{far}} f^2 + P_{\\mathrm{near}}}$ . Thus, we further provide numerical results in the next to examine these effects.",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
57,
|
| 1170 |
+
349,
|
| 1171 |
+
490,
|
| 1172 |
+
501
|
| 1173 |
+
],
|
| 1174 |
+
"page_idx": 4
|
| 1175 |
+
},
|
| 1176 |
+
{
|
| 1177 |
+
"type": "text",
|
| 1178 |
+
"text": "B. Numerical Results",
|
| 1179 |
+
"text_level": 1,
|
| 1180 |
+
"bbox": [
|
| 1181 |
+
58,
|
| 1182 |
+
506,
|
| 1183 |
+
207,
|
| 1184 |
+
518
|
| 1185 |
+
],
|
| 1186 |
+
"page_idx": 4
|
| 1187 |
+
},
|
| 1188 |
+
{
|
| 1189 |
+
"type": "text",
|
| 1190 |
+
"text": "Last, we show the effects of the four key parameters on the achievable rate of the NU in Figs. 6(a)–6(d) with the simulation parameters shown in Table I. To begin with, we plot in Figs. 6(a) and 6(b) the results based on the used Fresnel approximation in (9) and its actual value in (8) versus $N$ and $\\psi$ . It is observed that the used approximation is highly accurate with the actual value under different $N$ and $\\psi$ . Second, it is observed from Fig. 6(a) that the rate loss is monotonically decreasing with $N$ , which agrees with the result in Lemma 1. An interesting observation is that there still exists a large rate loss when the number of antennas becomes very large (i.e., very low interference power). This is because, with an increasing $N$ , the interference power generally decreases, while the beamforming gain (i.e., $g_{\\mathrm{near}}$ in (14)) increases, thus leading to the performance gap between the achievable and ideal rates of NU. On the other hand, the rate loss fluctuates when the FU angle is in the neighborhood of the NU angle, and drastically decreases when the angle is larger than a threshold (see Figs. 6(b) and 6(c)). Next, in Fig. 6(d), we observe that the rate loss first slightly fluctuates when $r$ is small, and sharply decreases when $r$ increases. Last, it is observed from Figs. 6(b)–6(d) that the achievable rate of NU will eventually approach to the ideal rate when the rate loss is sufficiently small.",
|
| 1191 |
+
"bbox": [
|
| 1192 |
+
60,
|
| 1193 |
+
523,
|
| 1194 |
+
490,
|
| 1195 |
+
854
|
| 1196 |
+
],
|
| 1197 |
+
"page_idx": 4
|
| 1198 |
+
},
|
| 1199 |
+
{
|
| 1200 |
+
"type": "text",
|
| 1201 |
+
"text": "V. CONCLUSIONS",
|
| 1202 |
+
"text_level": 1,
|
| 1203 |
+
"bbox": [
|
| 1204 |
+
209,
|
| 1205 |
+
871,
|
| 1206 |
+
339,
|
| 1207 |
+
883
|
| 1208 |
+
],
|
| 1209 |
+
"page_idx": 4
|
| 1210 |
+
},
|
| 1211 |
+
{
|
| 1212 |
+
"type": "text",
|
| 1213 |
+
"text": "In this paper, we analyzed the inter-user interference in the new mixed near- and far-field communication scenario. Specifically, we first obtained a closed-form expression for the normalized interference power at the NU caused by the FU beam, based",
|
| 1214 |
+
"bbox": [
|
| 1215 |
+
57,
|
| 1216 |
+
888,
|
| 1217 |
+
490,
|
| 1218 |
+
950
|
| 1219 |
+
],
|
| 1220 |
+
"page_idx": 4
|
| 1221 |
+
},
|
| 1222 |
+
{
|
| 1223 |
+
"type": "image",
|
| 1224 |
+
"img_path": "images/f483ab79456cc847e8f869cb2916bb04446a5bb570179acee46500c77e09e986.jpg",
|
| 1225 |
+
"image_caption": [
|
| 1226 |
+
"(a) $\\theta = 0.05$ $\\psi = 0$ $r = 3$"
|
| 1227 |
+
],
|
| 1228 |
+
"image_footnote": [],
|
| 1229 |
+
"bbox": [
|
| 1230 |
+
514,
|
| 1231 |
+
61,
|
| 1232 |
+
699,
|
| 1233 |
+
180
|
| 1234 |
+
],
|
| 1235 |
+
"page_idx": 4
|
| 1236 |
+
},
|
| 1237 |
+
{
|
| 1238 |
+
"type": "image",
|
| 1239 |
+
"img_path": "images/9cad86bf9d597d81ed22f455a898a3f9dbd0b9a05e94a03e7744ca1fb1ca5e18.jpg",
|
| 1240 |
+
"image_caption": [
|
| 1241 |
+
"(b) $\\theta = 0, N = 256, r = 3$"
|
| 1242 |
+
],
|
| 1243 |
+
"image_footnote": [],
|
| 1244 |
+
"bbox": [
|
| 1245 |
+
699,
|
| 1246 |
+
61,
|
| 1247 |
+
934,
|
| 1248 |
+
180
|
| 1249 |
+
],
|
| 1250 |
+
"page_idx": 4
|
| 1251 |
+
},
|
| 1252 |
+
{
|
| 1253 |
+
"type": "image",
|
| 1254 |
+
"img_path": "images/2140f9d4cc4ef58eb95d33e42cbb690bf805b799b46ed97c813a0c19555fa084.jpg",
|
| 1255 |
+
"image_caption": [
|
| 1256 |
+
"(c) $\\psi = 0$ , $N = 256$ , $r = 3$"
|
| 1257 |
+
],
|
| 1258 |
+
"image_footnote": [],
|
| 1259 |
+
"bbox": [
|
| 1260 |
+
511,
|
| 1261 |
+
207,
|
| 1262 |
+
705,
|
| 1263 |
+
327
|
| 1264 |
+
],
|
| 1265 |
+
"page_idx": 4
|
| 1266 |
+
},
|
| 1267 |
+
{
|
| 1268 |
+
"type": "image",
|
| 1269 |
+
"img_path": "images/9a09494685781357054e6826cdcbf3db3ce103d83b745aab6b9744f3359fdf3b.jpg",
|
| 1270 |
+
"image_caption": [
|
| 1271 |
+
"(d) $\\theta = 0.05$ $\\psi = 0$ $N = 256$",
|
| 1272 |
+
"Fig. 6: Rate loss versus the number of BS antennas, the FU angle, the NU angle and distance."
|
| 1273 |
+
],
|
| 1274 |
+
"image_footnote": [],
|
| 1275 |
+
"bbox": [
|
| 1276 |
+
699,
|
| 1277 |
+
207,
|
| 1278 |
+
934,
|
| 1279 |
+
327
|
| 1280 |
+
],
|
| 1281 |
+
"page_idx": 4
|
| 1282 |
+
},
|
| 1283 |
+
{
|
| 1284 |
+
"type": "text",
|
| 1285 |
+
"text": "on which, the effects of the number of BS antennas, FU angle, NU angle and distance were analyzed. Moreover, the explicit rate-loss expression caused by the FU interference was obtained, which was verified by numerical results. In the future, this work can be extended in several directions. For example, it is interesting to consider different precoding designs (e.g., fully-digital and hybrid architectures), and multi-access techniques (e.g., non-orthogonal multiple access (NOMA)) to suppress inter-user interference.",
|
| 1286 |
+
"bbox": [
|
| 1287 |
+
501,
|
| 1288 |
+
386,
|
| 1289 |
+
936,
|
| 1290 |
+
518
|
| 1291 |
+
],
|
| 1292 |
+
"page_idx": 4
|
| 1293 |
+
},
|
| 1294 |
+
{
|
| 1295 |
+
"type": "text",
|
| 1296 |
+
"text": "REFERENCES",
|
| 1297 |
+
"text_level": 1,
|
| 1298 |
+
"bbox": [
|
| 1299 |
+
673,
|
| 1300 |
+
520,
|
| 1301 |
+
767,
|
| 1302 |
+
532
|
| 1303 |
+
],
|
| 1304 |
+
"page_idx": 4
|
| 1305 |
+
},
|
| 1306 |
+
{
|
| 1307 |
+
"type": "list",
|
| 1308 |
+
"sub_type": "ref_text",
|
| 1309 |
+
"list_items": [
|
| 1310 |
+
"[1] M. Cui, Z. Wu, Y. Lu, X. Wei, and L. Dai, “Near-field communications for 6G: Fundamentals, challenges, potentials, and future directions,” IEEE Commun. Mag., early access, 2022.",
|
| 1311 |
+
"[2] Q. Wu, S. Zhang, B. Zheng, C. You, and R. Zhang, \"Intelligent reflecting surface-aided wireless communications: A tutorial,\" IEEE Trans. Commun., vol. 69, no. 5, pp. 3313-3351, 2021.",
|
| 1312 |
+
"[3] H. Lu and Y. Zeng, \"Near-field modeling and performance analysis for multi-user extremely large-scale MIMO communication,\" IEEE Commu. Lett., vol. 26, no. 2, pp. 277-281, 2022.",
|
| 1313 |
+
"[4] Y. Zhang, X. Wu, and C. You, \"Fast near-field beam training for extremely large-scale array,\" IEEE Wireless Commun. Lett., vol. 11, no. 12, pp. 2625-2629, 2022.",
|
| 1314 |
+
"[5] Z. Wu, M. Cui, and L. Dai, \"Multiple access for near-field communications: SDMA or LDMA?\" arXiv preprint arXiv:2208.06349, 2022.",
|
| 1315 |
+
"[6] C. Sun, X. Gao, S. Jin, M. Matthaiou, Z. Ding, and C. Xiao, “Beam division multiple access transmission for massive MIMO communications,” IEEE Trans. Commun., vol. 63, no. 6, pp. 2170–2184, 2015.",
|
| 1316 |
+
"[7] H. Zhang, N. Shlezinger, F. Guidi, D. Dardari, M. F. Imani, and Y. C. Eldar, “Beam focusing for near-field multiuser MIMO communications,” IEEE Trans. Wireless Commun., vol. 21, no. 9, pp. 7476–7490, 2022.",
|
| 1317 |
+
"[8] C. You, B. Zheng, and R. Zhang, \"Fast beam training for IRS-assisted multiuser communications,\" IEEE Wireless Commun. Lett., vol. 9, no. 11, pp. 1845-1849, 2020.",
|
| 1318 |
+
"[9] E. Björnson, Ö. T. Demir, and L. Sanguinetti, “A primer on near-field beamforming for arrays and reconfigurable intelligent surfaces,” in 2021 55th Asilomar Conference on Signals, Systems, and Computers. IEEE, 2021, pp. 105–112.",
|
| 1319 |
+
"[10] H. Luo and F. Gao, “Beam squint assisted user localization in near-field communications systems,” arXiv preprint arXiv:2205.11392, 2022.",
|
| 1320 |
+
"[11] N. Deshpande, M. R. Castellanos, S. R. Khosravirad, J. Du, H. Viswanathan, and R. W. Heath Jr, \"A wideband generalization of the near-field region for extremely large phased-arrays,\" arXiv preprint arXiv:2206.14323, 2022.",
|
| 1321 |
+
"[12] M. Cui and L. Dai, \"Channel estimation for extremely large-scale MIMO: Far-field or near-field?\" IEEE Trans. Commun., vol. 70, no. 4, pp. 2663-2677, 2022."
|
| 1322 |
+
],
|
| 1323 |
+
"bbox": [
|
| 1324 |
+
506,
|
| 1325 |
+
539,
|
| 1326 |
+
936,
|
| 1327 |
+
946
|
| 1328 |
+
],
|
| 1329 |
+
"page_idx": 4
|
| 1330 |
+
},
|
| 1331 |
+
{
|
| 1332 |
+
"type": "page_number",
|
| 1333 |
+
"text": "5",
|
| 1334 |
+
"bbox": [
|
| 1335 |
+
928,
|
| 1336 |
+
17,
|
| 1337 |
+
936,
|
| 1338 |
+
25
|
| 1339 |
+
],
|
| 1340 |
+
"page_idx": 4
|
| 1341 |
+
}
|
| 1342 |
+
]
|