Add MinerU batch cbb97d6f-4691-419d-98bd-c6e037692ea4
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +64 -0
- data/2025/2503_00xxx/2503.00897/cd6704f7-7dbe-4791-961a-2f7d386a8aac_content_list.json +0 -0
- data/2025/2503_00xxx/2503.00897/cd6704f7-7dbe-4791-961a-2f7d386a8aac_model.json +0 -0
- data/2025/2503_00xxx/2503.00897/cd6704f7-7dbe-4791-961a-2f7d386a8aac_origin.pdf +3 -0
- data/2025/2503_00xxx/2503.00897/full.md +499 -0
- data/2025/2503_00xxx/2503.00897/images.zip +3 -0
- data/2025/2503_00xxx/2503.00897/layout.json +0 -0
- data/2025/2503_00xxx/2503.00936/ee8e137b-6353-4aea-9f54-e9f3f0a4de81_content_list.json +1534 -0
- data/2025/2503_00xxx/2503.00936/ee8e137b-6353-4aea-9f54-e9f3f0a4de81_model.json +2244 -0
- data/2025/2503_00xxx/2503.00936/ee8e137b-6353-4aea-9f54-e9f3f0a4de81_origin.pdf +3 -0
- data/2025/2503_00xxx/2503.00936/full.md +342 -0
- data/2025/2503_00xxx/2503.00936/images.zip +3 -0
- data/2025/2503_00xxx/2503.00936/layout.json +0 -0
- data/2025/2503_00xxx/2503.00986/6e80f082-67b0-4f10-b55b-09a86bd4f660_content_list.json +0 -0
- data/2025/2503_00xxx/2503.00986/6e80f082-67b0-4f10-b55b-09a86bd4f660_model.json +0 -0
- data/2025/2503_00xxx/2503.00986/6e80f082-67b0-4f10-b55b-09a86bd4f660_origin.pdf +3 -0
- data/2025/2503_00xxx/2503.00986/full.md +439 -0
- data/2025/2503_00xxx/2503.00986/images.zip +3 -0
- data/2025/2503_00xxx/2503.00986/layout.json +0 -0
- data/2025/2503_01xxx/2503.01006/4628333c-96b8-46e4-8a1e-35d1d5352495_content_list.json +0 -0
- data/2025/2503_01xxx/2503.01006/4628333c-96b8-46e4-8a1e-35d1d5352495_model.json +0 -0
- data/2025/2503_01xxx/2503.01006/4628333c-96b8-46e4-8a1e-35d1d5352495_origin.pdf +3 -0
- data/2025/2503_01xxx/2503.01006/full.md +0 -0
- data/2025/2503_01xxx/2503.01006/images.zip +3 -0
- data/2025/2503_01xxx/2503.01006/layout.json +0 -0
- data/2025/2503_01xxx/2503.01013/203f0b74-b02f-43c0-886b-6f8f030e9940_content_list.json +0 -0
- data/2025/2503_01xxx/2503.01013/203f0b74-b02f-43c0-886b-6f8f030e9940_model.json +0 -0
- data/2025/2503_01xxx/2503.01013/203f0b74-b02f-43c0-886b-6f8f030e9940_origin.pdf +3 -0
- data/2025/2503_01xxx/2503.01013/full.md +0 -0
- data/2025/2503_01xxx/2503.01013/images.zip +3 -0
- data/2025/2503_01xxx/2503.01013/layout.json +0 -0
- data/2025/2503_01xxx/2503.01067/691037fc-a94b-4747-9631-284615a2f840_content_list.json +0 -0
- data/2025/2503_01xxx/2503.01067/691037fc-a94b-4747-9631-284615a2f840_model.json +0 -0
- data/2025/2503_01xxx/2503.01067/691037fc-a94b-4747-9631-284615a2f840_origin.pdf +3 -0
- data/2025/2503_01xxx/2503.01067/full.md +0 -0
- data/2025/2503_01xxx/2503.01067/images.zip +3 -0
- data/2025/2503_01xxx/2503.01067/layout.json +0 -0
- data/2025/2503_01xxx/2503.01082/86c07b72-b761-4017-9dc2-5a173571c1cd_content_list.json +2147 -0
- data/2025/2503_01xxx/2503.01082/86c07b72-b761-4017-9dc2-5a173571c1cd_model.json +0 -0
- data/2025/2503_01xxx/2503.01082/86c07b72-b761-4017-9dc2-5a173571c1cd_origin.pdf +3 -0
- data/2025/2503_01xxx/2503.01082/full.md +339 -0
- data/2025/2503_01xxx/2503.01082/images.zip +3 -0
- data/2025/2503_01xxx/2503.01082/layout.json +0 -0
- data/2025/2503_01xxx/2503.01103/f985c22c-01b0-4300-b25f-fa458f9f9eb1_content_list.json +0 -0
- data/2025/2503_01xxx/2503.01103/f985c22c-01b0-4300-b25f-fa458f9f9eb1_model.json +0 -0
- data/2025/2503_01xxx/2503.01103/f985c22c-01b0-4300-b25f-fa458f9f9eb1_origin.pdf +3 -0
- data/2025/2503_01xxx/2503.01103/full.md +0 -0
- data/2025/2503_01xxx/2503.01103/images.zip +3 -0
- data/2025/2503_01xxx/2503.01103/layout.json +0 -0
- data/2025/2503_01xxx/2503.01113/30257a40-975b-4eb4-a204-a1e160fd3415_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -1987,3 +1987,67 @@ data/2025/2503_02xxx/2503.02390/635e71fe-44a1-404f-ae63-3692c9e475c1_origin.pdf
|
|
| 1987 |
data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1988 |
data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1989 |
data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1987 |
data/2025/2503_02xxx/2503.02407/659ebb2e-73c9-4ddb-871a-4d8552af4b45_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1988 |
data/2025/2503_02xxx/2503.02424/8601c576-a221-400a-ab72-3a3c50fe8e1d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1989 |
data/2025/2503_02xxx/2503.02445/79b9e0b6-153b-484b-87de-9d51773ebdf4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1990 |
+
data/2025/2503_00xxx/2503.00897/cd6704f7-7dbe-4791-961a-2f7d386a8aac_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1991 |
+
data/2025/2503_00xxx/2503.00936/ee8e137b-6353-4aea-9f54-e9f3f0a4de81_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1992 |
+
data/2025/2503_00xxx/2503.00986/6e80f082-67b0-4f10-b55b-09a86bd4f660_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1993 |
+
data/2025/2503_01xxx/2503.01006/4628333c-96b8-46e4-8a1e-35d1d5352495_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1994 |
+
data/2025/2503_01xxx/2503.01013/203f0b74-b02f-43c0-886b-6f8f030e9940_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1995 |
+
data/2025/2503_01xxx/2503.01067/691037fc-a94b-4747-9631-284615a2f840_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1996 |
+
data/2025/2503_01xxx/2503.01082/86c07b72-b761-4017-9dc2-5a173571c1cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1997 |
+
data/2025/2503_01xxx/2503.01103/f985c22c-01b0-4300-b25f-fa458f9f9eb1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1998 |
+
data/2025/2503_01xxx/2503.01113/30257a40-975b-4eb4-a204-a1e160fd3415_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1999 |
+
data/2025/2503_01xxx/2503.01129/b2b2ccc4-4715-402f-b6ce-141e8c525518_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2000 |
+
data/2025/2503_01xxx/2503.01136/7e070fe6-5c5b-47cc-ac44-3784fdd795de_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2001 |
+
data/2025/2503_01xxx/2503.01141/4d544370-aea9-479b-8ecb-10b8a339d7a4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2002 |
+
data/2025/2503_01xxx/2503.01174/a7594e21-f807-4d0b-b273-816f1c7f1c66_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2003 |
+
data/2025/2503_01xxx/2503.01210/b58b5d9c-69a2-4377-80ff-6d7cde29ffa3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2004 |
+
data/2025/2503_01xxx/2503.01238/a04ede76-5467-469e-a731-08da0deb8809_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2005 |
+
data/2025/2503_01xxx/2503.01245/69b518b2-8826-45b0-80f0-d91aac644b08_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2006 |
+
data/2025/2503_01xxx/2503.01298/31a63b75-d596-4169-b070-63e7da0b74b4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2007 |
+
data/2025/2503_01xxx/2503.01307/44402dcd-7b78-4f79-a002-682bdbf4fd6f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2008 |
+
data/2025/2503_01xxx/2503.01342/14b25ca3-b2b7-4f29-827e-10615df50d1c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2009 |
+
data/2025/2503_01xxx/2503.01374/15479e50-fe1a-4bd4-ae5c-41d0380924b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2010 |
+
data/2025/2503_01xxx/2503.01378/76f39c5b-b16b-4dce-8959-78135b57fe6c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2011 |
+
data/2025/2503_01xxx/2503.01386/c0ba346c-bb8a-4835-871b-ddbe8916585a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2012 |
+
data/2025/2503_01xxx/2503.01422/bc401d02-18e1-4a80-b229-9377395b1cf5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2013 |
+
data/2025/2503_01xxx/2503.01485/cc673f34-776d-4a51-ab90-c8af23928338_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2014 |
+
data/2025/2503_01xxx/2503.01491/a78fc0e4-1981-4cf5-b3d4-cf4fb94adc64_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2015 |
+
data/2025/2503_01xxx/2503.01532/1afd55fe-de25-4485-b2de-12445893987a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2016 |
+
data/2025/2503_01xxx/2503.01557/ffed5499-187e-419b-8759-5c1df5c278be_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2017 |
+
data/2025/2503_01xxx/2503.01610/aaa52245-1c59-403d-bf9e-3963ee21dfbe_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2018 |
+
data/2025/2503_01xxx/2503.01645/89c2f9dd-b10e-4857-8fce-9a303336b1db_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2019 |
+
data/2025/2503_01xxx/2503.01661/4a281918-460c-4612-91f0-f7e853ce6799_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2020 |
+
data/2025/2503_01xxx/2503.01710/83f4dfc1-c85a-47c3-a844-b14642c88e85_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2021 |
+
data/2025/2503_01xxx/2503.01736/26abb41b-243c-4977-b1b0-825ad5fe4f70_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2022 |
+
data/2025/2503_01xxx/2503.01738/a730426e-6f50-4e4b-b620-70579024c7be_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2023 |
+
data/2025/2503_01xxx/2503.01743/e4c3d79b-2750-48b4-876a-e548bf7c1648_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2024 |
+
data/2025/2503_01xxx/2503.01747/58f857e5-4b88-4424-b53f-6e0a6474ffa7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2025 |
+
data/2025/2503_01xxx/2503.01765/5bd5c221-b56a-4072-945a-e6ffafbb5c3d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2026 |
+
data/2025/2503_01xxx/2503.01773/377267fa-9167-4a6e-b167-bcb2889d42d9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2027 |
+
data/2025/2503_01xxx/2503.01774/81d02aed-9f0a-43ee-ba2b-dc2f1be806c0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2028 |
+
data/2025/2503_01xxx/2503.01781/cea877c2-ae89-4e7e-a0b1-ab041e72339e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2029 |
+
data/2025/2503_01xxx/2503.01785/835390f7-9a6c-4aee-9891-6c133c86bf64_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2030 |
+
data/2025/2503_01xxx/2503.01807/acbe6789-6b43-4156-9e23-0f1cbdb715d6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2031 |
+
data/2025/2503_01xxx/2503.01822/634e8534-6677-4cb2-b73f-d560343bac8f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2032 |
+
data/2025/2503_01xxx/2503.01830/aff2433f-ec6b-4697-be23-047d146887ce_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2033 |
+
data/2025/2503_01xxx/2503.01840/41239172-2aa7-4801-ac0f-b648b7fda67f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2034 |
+
data/2025/2503_01xxx/2503.01935/71150d4c-fa85-4f66-b81f-2eecbc654e8c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2035 |
+
data/2025/2503_02xxx/2503.02017/258159f6-5a0c-4ecc-83a3-e2a8ba233bda_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2036 |
+
data/2025/2503_02xxx/2503.02039/cedc8bbd-b029-4b63-8cef-aa752c9c0ef0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2037 |
+
data/2025/2503_02xxx/2503.02056/be517507-845b-4de7-b2f9-4bfdf16dd985_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2038 |
+
data/2025/2503_02xxx/2503.02068/9d139024-5dcb-4acf-a2bc-53e144218a62_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2039 |
+
data/2025/2503_02xxx/2503.02080/185b9528-5206-49ca-964f-6ca6ead81a53_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2040 |
+
data/2025/2503_02xxx/2503.02103/007d1ef6-6688-4ded-85a4-dc4b37f0cf39_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2041 |
+
data/2025/2503_02xxx/2503.02113/721e4ecf-934e-416d-b7ac-5ade241acb09_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2042 |
+
data/2025/2503_02xxx/2503.02130/c2e8d632-be1e-469e-824c-f14b34cd5462_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2043 |
+
data/2025/2503_02xxx/2503.02175/50efadb3-276a-4dc5-aadd-4843ec4ccf2d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2044 |
+
data/2025/2503_02xxx/2503.02194/9f8bccae-ba09-4f95-9c14-c29e36369415_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2045 |
+
data/2025/2503_02xxx/2503.02197/b2465bb8-f3ab-45f5-aa10-ebd5bddf81d0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2046 |
+
data/2025/2503_02xxx/2503.02199/d171d7d7-17f8-4656-a3e0-992b8946deb8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2047 |
+
data/2025/2503_02xxx/2503.02239/21e329eb-9172-419e-93fb-7d510a2ae929_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2048 |
+
data/2025/2503_02xxx/2503.02240/86a1ac24-b5ad-41be-a6a2-066dcc5cd3a7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2049 |
+
data/2025/2503_02xxx/2503.02246/ad7b0bdd-f89a-499b-927c-acb92e1604c7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2050 |
+
data/2025/2503_02xxx/2503.02247/c3cccb6b-c68b-4a3e-a3ae-4e2074ec1a52_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2051 |
+
data/2025/2503_02xxx/2503.02268/aa7077fc-2ea4-43f2-afde-36c95a306eae_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2052 |
+
data/2025/2503_02xxx/2503.02310/14565682-c2e4-4620-ae64-958832a8f9f1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2053 |
+
data/2025/2503_05xxx/2503.05804/b1f41bfa-e5d3-4e40-9916-a3295463e14a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2025/2503_00xxx/2503.00897/cd6704f7-7dbe-4791-961a-2f7d386a8aac_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_00xxx/2503.00897/cd6704f7-7dbe-4791-961a-2f7d386a8aac_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_00xxx/2503.00897/cd6704f7-7dbe-4791-961a-2f7d386a8aac_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:316528e9cfa57bd75d447ce397d218820a34ecf26080b9b9007b604f521e1c6f
|
| 3 |
+
size 21528890
|
data/2025/2503_00xxx/2503.00897/full.md
ADDED
|
@@ -0,0 +1,499 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Simple and Effective Reinforcement Learning Method for Text-to-Image Diffusion Fine-tuning
|
| 2 |
+
|
| 3 |
+
Shashank Gupta*
|
| 4 |
+
|
| 5 |
+
Meta and University of Amsterdam, The Netherlands
|
| 6 |
+
|
| 7 |
+
27392shashankgupta@gmail.com
|
| 8 |
+
|
| 9 |
+
Chaitanya Ahuja
|
| 10 |
+
|
| 11 |
+
Meta
|
| 12 |
+
|
| 13 |
+
chahuja@meta.com
|
| 14 |
+
|
| 15 |
+
Tsung-Yu Lin
|
| 16 |
+
|
| 17 |
+
Meta
|
| 18 |
+
|
| 19 |
+
tsungyulin@meta.com
|
| 20 |
+
|
| 21 |
+
Sreya Dutta Roy
|
| 22 |
+
|
| 23 |
+
Meta
|
| 24 |
+
|
| 25 |
+
sreyaduttaroy@meta.com
|
| 26 |
+
|
| 27 |
+
Harrie Oosterhuis
|
| 28 |
+
|
| 29 |
+
Radboud University, Nijmegen, The Netherlands
|
| 30 |
+
|
| 31 |
+
harrie.oosterhuis@ru.nl
|
| 32 |
+
|
| 33 |
+
Maarten de Rijke
|
| 34 |
+
|
| 35 |
+
University of Amsterdam, The Netherlands
|
| 36 |
+
|
| 37 |
+
m. derijke@uva.nl
|
| 38 |
+
|
| 39 |
+
Satya Narayan Shukla†
|
| 40 |
+
|
| 41 |
+
Meta
|
| 42 |
+
|
| 43 |
+
satyanshukla@meta.com
|
| 44 |
+
|
| 45 |
+
# Abstract
|
| 46 |
+
|
| 47 |
+
Reinforcement learning (RL)-based fine-tuning has emerged as a powerful approach for aligning diffusion models with black-box objectives. Proximal policy optimization (PPO) is the most popular choice of method for policy optimization. While effective in terms of performance, PPO is highly sensitive to hyper-parameters and involves substantial computational overhead. REINFORCE, on the other hand, mitigates some computational complexities such as high memory overhead and sensitive hyper-parameter tuning, but has suboptimal performance due to high-variance and sample inefficiency. While the variance of the REINFORCE can be reduced by sampling multiple actions per input prompt and using a baseline correction term, it still suffers from sample inefficiency. To address these challenges, we systematically analyze the efficiency-effectiveness trade-off between REINFORCE and PPO, and propose leave-one-out PPO (LOOP), a novel RL for diffusion fine-tuning method. LOOP combines variance reduction techniques from REINFORCE, such as sampling multiple actions per input prompt and a baseline correction term, with the robustness and sample efficiency of PPO via clipping and importance sampling. Our results demonstrate that LOOP effectively improves diffusion models on various black-box objectives, and achieves a better balance between computational efficiency and performance.
|
| 48 |
+
|
| 49 |
+
# 1 Introduction
|
| 50 |
+
|
| 51 |
+
Diffusion models have emerged as a powerful tool for generative modeling (Sohl-Dickstein et al., 2015; Ho et al., 2020), with a strong capacity to model complex data distributions from various modalities, like images (Rombach et al., 2022), text (Austin et al., 2021), natural molecules (Xu et al., 2023), and videos (Blattmann et al., 2023).
|
| 52 |
+
|
| 53 |
+
Diffusion models are typically pre-trained on a large-scale dataset, such that they can subsequently generate samples from the same data distribution. The training objective typically involves maximizing the data distribution likelihood. This pre-training stage helps generate high-quality samples from the model. However, some applications might require optimizing a custom reward function, for example, optimizing for generating aesthetically pleasing images (Xu et al., 2024), semantic alignment of image-text pairs based on human feedback (Schuhmann et al., 2022), or generating molecules with specific properties (Wang et al., 2024).
|
| 54 |
+
|
| 55 |
+
To optimize for such black-box objectives, RL-based fine-tuning has been successfully applied to diffusion models (Fan et al., 2024; Black et al., 2023; Wallace et al., 2024; Li et al., 2024; Gu et al., 2024). For RL-based fine-tuning, the reverse diffusion process is treated as a Markov decision process (MDP), wherein prompts are treated as part of the input state, the generated image at each time-step is mapped to an action, which receives a reward from a fixed reward model (environment in standard MDP), and finally the diffusion model is treated as a policy, which we optimize to maximize rewards. For optimization, typically PPO is applied (Fan et al., 2024; Black et al., 2023). In applications where getting a reward model is infeasible or undesirable, "RL-free" fine-tuning (typically offline) can also be applied (Wallace et al., 2024). For this work, we only focus on diffusion model fine-tuning using "online" RL methods, specifically PPO (Schulman et al., 2017).
|
| 56 |
+
|
| 57 |
+
An advantage of PPO is that it removes the incentive for the new policy to deviate too much from the previous reference policy, via importance sampling and clipping operation (Schulman et al., 2017). While effective, PPO can have significant computational overhead. In practice, RL fine-tuning for diffusion models via PPO requires concurrently loading three models in memory: (i) The reference policy: The base policy, which is usually initialized with the pre-trained diffusion model. (ii) The current policy: The policy that is RL fine-tuned, and also initialized with the pre-trained diffusion model. (iii) The reward model: Typically, a large vision-language model, trained via supervised fine-tuning objective (Lee et al., 2023), which assigns a scalar reward to the final generated image during the online optimization stage. This can result in a considerable computational burden, given that each policy can potentially have millions of parameters. In addition to computational overhead, PPO is also known to be sensitive to hyper-parameters (Engstrom et al., 2019; Zheng et al., 2023; Huang et al., 2024).
|
| 58 |
+
|
| 59 |
+
Simpler approaches, like REINFORCE (Williams, 1992) avoid such complexities, and could theoretically be more efficient. However, in practice, they often suffer from high variance and instability. A variant of REINFORCE: reinforce leave-one-out (RLOO) (Kool et al., 2019) has been proposed that samples multiple sequences per input prompt, and a baseline correction term to reduce the variance; however, it still suffers from sample inefficiency.
|
| 60 |
+
|
| 61 |
+
This raises a fundamental question about the efficiency-effectiveness trade-off in RL-based diffusion finetuning. In this work, first we systematically explore this trade-off between efficiency - a lower computational cost, and reduced implementation complexity (i.e., fewer hyper-parameters) - and effectiveness - stable training, and final performance. We compare a simple REINFORCE approach with the standard PPO framework, demonstrating that while REINFORCE greatly reduces computational complexity, it comes at the cost of reduced performance.
|
| 62 |
+
|
| 63 |
+
Motivated by this finding, we propose a novel RL for diffusion fine-tuning method, LOOP, which combines the best of the both worlds. To reduce the variance during policy optimization, LOOP leverages multiple actions (diffusion trajectories) and a (REINFORCE) baseline correction term per input prompt. To maintain the stability and robustness of PPO, LOOP leverages clipping and importance sampling.
|
| 64 |
+
|
| 65 |
+
Our approach is conceptually similar to the recently proposed GRPO method for RL fine-tuning of LLMs (Shao et al., 2024). The key technical differences are: (i) LOOP does not apply standard-deviation normalization in the advantage calculation. Recent work on LLM fine-tuning suggests that removing this normalization term can improve performance (Liu et al., 2025). (ii) Following this recent work, LOOP omits the KL penalty term. Prior studies indicate that explicit KL regularization has minimal practical effect on performance (Black et al., 2023), and recent theoretical work shows that on-policy RL methods implicitly maintain KL proximity to the base policy even without explicit regularization (Shenfeld et al., 2025). (iii) In the diffusion setting, the reverse process has a fixed sequence length across all generations, making sequence-length normalization unnecessary.
|
| 66 |
+
|
| 67 |
+
For the primary evaluation benchmark, we choose the text-to-image compositionality benchmark (T2I-CompBench; Huang et al.. Text-to-image models often fail to satisfy an essential reasoning ability of attribute binding, i.e., the generated image often fails to bind certain attributes specified in the instruction prompt (Huang et al., 2023; Ramesh et al., 2022; Fu & Cheng, 2024). As illustrated in Figure 1, LOOP outperforms previous diffusion methods on attribute binding. As attribute binding is a key skill necessary for real-world applications, we choose the T2I-CompBench benchmark alongside two other common tasks: aesthetic image generation and image-text semantic alignment.
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
SD v2
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
|
| 74 |
+

|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
|
| 78 |
+

|
| 79 |
+
|
| 80 |
+

|
| 81 |
+
DDPO
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
|
| 85 |
+

|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
LOOP
|
| 93 |
+
"White cat playing with black ball"
|
| 94 |
+
Figure 1: LOOP improves attribute binding. Qualitative examples presented from images generated via Stable Diffusion (SD) 2.0 (first row), DDPO (Black et al., 2023) (second row), and LOOP $k = 4$ (third row). In the first prompt, SD and DDPO both fail to bind the color black with the ball in the image, whereas LOOP binds the color black to the ball. In the second example, SD and DDPO fail to generate rusted bronze color lamppost, whereas LOOP manages to do that. In the third image, SD and DDPO fail to bind the shape hexagon to the watermelon, whereas LOOP manages so. In the fourth example, SD and DDPO fail to generate the black horse with flowing cyan patterns, whereas LOOP generates the horse with the correct color attribute. Finally, in the last image, SD and DDPO fail to bind cobalt blue color to the rock, whereas LOOP binds that successfully.
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
"Mechanical owl with cobalt blue feathers on a rusted bronze lamppost
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
"A hexagonal watermelon placed besides a knife"
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
"Black horse with glowing cyan patterns in maze of floating golden rings"
|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
"Neon pink cactus growing on a cobalt blue rock"
|
| 107 |
+
|
| 108 |
+
To summarize, our main contributions are as follows:
|
| 109 |
+
|
| 110 |
+
- PPO vs. REINFORCE efficiency-effectiveness trade-off. We systematically study how design elements like clipping, reference policy, value function in PPO compare to a simple REINFORCE method, highlighting the efficiency-effectiveness trade-off in diffusion fine-tuning. To the best of our knowledge, we are the first ones to present such a systematic study, highlighting the trade-offs in diffusion fine-tuning.
|
| 111 |
+
- Introducing LOOP. We propose LOOP, a novel RL for diffusion fine-tuning method combining the best of REINFORCE and PPO. LOOP leverages multiple diffusion trajectories and a REINFORCE baseline correction term for variance reduction, as well as clipping and importance sampling from PPO for robustness and sample efficiency.
|
| 112 |
+
|
| 113 |
+
- Empirical validation. To validate our claims empirically, we conduct experiments on the T2I-CompBench benchmark image compositionality benchmark. The benchmark evaluates the attribute binding capabilities of the text-to-image generative models and shows that LOOP succeeds where previous text-to-image generative models often fail. We also evaluate LOOP on two common objectives from the literature on RL for diffusion: image aesthetic and text-image semantic alignment (Black et al., 2023).
|
| 114 |
+
|
| 115 |
+
The remainder of the paper is organized as follows. In the next section, we provide the necessary background and discuss related work. Section 3 revisits the efficiency-effectiveness trade-off between REINFORCE and PPO. Section 4 introduces our proposed method, Leave-One-Out PPO (LOOP) for diffusion fine-tuning. Section 5 describes the experimental setup, and Section 6 presents the results and discussion. Finally, Section 7 concludes the paper.
|
| 116 |
+
|
| 117 |
+
# 2 Background and Related Work
|
| 118 |
+
|
| 119 |
+
# 2.1 Diffusion Models
|
| 120 |
+
|
| 121 |
+
We focus on denoising diffusion probabilistic models (DDPM) as the base model for text-to-image generative modeling (Ho et al., 2020; Sohl-Dickstein et al., 2015). Briefly, given a conditioning context variable $\mathbf{c}$ (a text prompt in our case), and the data sample $\mathbf{x}_0$ , DDPM models $p(\mathbf{x}_0 \mid \mathbf{c})$ via a Markov chain of length $T$ , with the following dynamics:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
p _ {\theta} \left(\mathbf {x} _ {0: T} \mid \mathbf {c}\right) = p \left(\mathbf {x} _ {T} \mid \mathbf {c}\right) \prod_ {t = 1} ^ {T} p _ {\theta} \left(\mathbf {x} _ {t - 1} \mid \mathbf {x} _ {t}, \mathbf {c}\right). \tag {1}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
Image generation in a diffusion model is achieved via the following ancestral sampling scheme, which is a reverse diffusion process:
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\mathbf {x} _ {T} \sim \mathcal {N} (\mathbf {0}, \mathbf {I}), \mathbf {x} _ {t} \sim N \left(\mathbf {x} _ {t} | \mu_ {\theta} (\mathbf {x} _ {t}, \mathbf {c}, t), \sigma_ {\theta} ^ {2} I\right), \forall t \in [ 0, T - 1 ], \tag {2}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
where the distribution at time-step $t$ is assumed to be a multivariate normal distribution with the predicted mean $\mu_{\theta}(\mathbf{x}_t,\mathbf{c},t)$ , and a constant variance.
|
| 134 |
+
|
| 135 |
+
# 2.2 Proximal Policy Optimization (PPO) for RL
|
| 136 |
+
|
| 137 |
+
PPO was introduced for optimizing a policy with the objective of maximizing the overall reward in the RL paradigm. PPO removes the incentive for the current policy $\pi_t$ to diverge from the previous policy $\pi_{t-1}$ outside the range $[1 - \epsilon, 1 + \epsilon]$ , where $\epsilon$ is a hyper-parameter. As long as the subsequent policies are closer to each other in the action space, the monotonic policy improvement bound guarantees a monotonic improvement in the policy's performance as the optimization progresses. This property justifies the clipping term in the mathematical formulation of the PPO objective function (Schulman, 2015; Achiam et al., 2017; Queeney et al., 2021). Formally, the PPO objective function is:
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
J (\theta) = \mathbb {E} \left[ \min \left(r _ {t} (\theta) \hat {A} _ {t}, \operatorname {c l i p} \left(r _ {t} (\theta), 1 - \epsilon , 1 + \epsilon\right) \hat {A} _ {t}\right) \right], \tag {3}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $r_t(\theta) = \frac{\pi_t(a|c)}{\pi_{t-1}(a|c)}$ is the importance sampling ratio between the current policy $\pi_t(a|c)$ and the previous reference policy $\pi_{t-1}(a|c)$ , $\hat{A}_t$ is the advantage function (Sutton & Barto, 2018), and the clip operator restricts the importance sampling ratio in the range $[1-\epsilon, 1+\epsilon]$ .
|
| 144 |
+
|
| 145 |
+
# 2.3 RL for Text-to-Image Diffusion Models
|
| 146 |
+
|
| 147 |
+
The diffusion process can be viewed as an MDP $(\mathcal{S},\mathcal{A},\mathcal{P},\mathcal{R},\rho_0)$ , where $\mathcal{S}$ is the state space, $\mathcal{A}$ is the action space, $\mathcal{P}$ is the state transition kernel, $\mathcal{R}$ is the reward function, and $\rho_0$ is the distribution of initial state $\mathbf{s}_0$ .
|
| 148 |
+
|
| 149 |
+
In the context of text-to-image diffusion models, the MDP is defined as:
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\begin{array}{l} \mathbf {s} _ {\mathbf {t}} = (\mathbf {c}, t, \mathbf {x} _ {\mathbf {t}}), \pi_ {\theta} (\mathbf {a} _ {\mathbf {t}} \mid \mathbf {s} _ {\mathbf {t}}) = p _ {\theta} (\mathbf {x} _ {\mathbf {t} - \mathbf {1}} \mid \mathbf {x} _ {\mathbf {t}}, \mathbf {c}), \mathcal {P} (\mathbf {s} _ {\mathbf {t} + \mathbf {1}} \mid \mathbf {s} _ {\mathbf {t}}, \mathbf {a} _ {\mathbf {t}}) = \delta (\mathbf {c}, \mathbf {a} _ {\mathbf {t}}), \mathbf {a} _ {\mathbf {t}} = \mathbf {x} _ {\mathbf {t} - \mathbf {1}}, \\ \rho_ {0} (\mathbf {s} _ {\mathbf {0}}) = \left(p (\mathbf {c}), \delta_ {T}, \mathcal {N} (0, \mathbf {I})\right), \quad \mathcal {R} (\mathbf {s} _ {\mathbf {t}}, \mathbf {a} _ {\mathbf {t}}) = \left\{ \begin{array}{l l} r (\mathbf {x} _ {\mathbf {0}}, \mathbf {c}) & \text {i f} t = 0, \\ 0 & \text {o t h e r w i s e .} \end{array} \right. \tag {4} \\ \end{array}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
The input state $\mathbf{s}_{\mathbf{t}}$ is defined in terms of the context $\mathbf{c}$ (prompt features), and the sampled image at the given time-step $t$ : $\mathbf{x}_{\mathbf{t}}$ . The policy $\pi_{\theta}$ is the diffusion model itself. The state transition kernel is a dirac delta function $\delta$ with the current sampled action $\mathbf{x}_t$ as the input. The reward is assigned only at the last step in the reverse diffusion process, when the final image is generated. The initial state $\rho_0$ corresponds to the last state in the forward diffusion process: $\mathbf{x}_T$ .
|
| 156 |
+
|
| 157 |
+
# 2.4 PPO for Diffusion Fine-tuning
|
| 158 |
+
|
| 159 |
+
The objective function of RL fine-tuning for a diffusion policy $\pi_{\theta}$ can be defined as follows:
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
J _ {\theta} (\pi) = \mathbb {E} _ {\tau \sim p (\tau | \pi_ {\theta})} \left[ \sum_ {t = 0} ^ {T} \mathcal {R} \left(\mathbf {s} _ {t}, \mathbf {a} _ {t}\right) \right] = \mathbb {E} _ {\tau \sim p (\tau | \pi_ {\theta})} \left[ r \left(\mathbf {x} _ {0}, \mathbf {c}\right) \right], \tag {5}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
where the trajectory $\tau = \{\mathbf{x}_T, \mathbf{x}_{T-1}, \dots, \mathbf{x}_0\}$ refers to the reverse diffusion process (Eq. 1), and the total reward of the trajectory is the reward of the final generated image $\mathbf{x}_0$ (Eq. 4). We ignore the KL-regularized version of the equation, which is commonly applied in the RLHF for LLM literature (Zhong et al., 2024; Zeng et al., 2024; Rafailov et al., 2024), and proposed by Fan et al. (2024) in the context of RL for diffusion models. As shown by Black et al. (2023), adding the KL-regularization term makes no empirical difference in terms of the final performance. The PPO objective is given as:
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
J _ {\theta} ^ {\mathrm {P P O}} (\pi) = \mathbb {E} \bigg [ \sum_ {t = 0} ^ {T} \operatorname {c l i p} \left(\frac {\pi_ {\theta} (\mathbf {x} _ {t - 1} | \mathbf {x} _ {t} , \mathbf {c})}{\pi_ {\mathrm {o l d}} (\mathbf {x} _ {t - 1} | \mathbf {x} _ {t} , \mathbf {c})}, 1 - \epsilon , 1 + \epsilon\right) r (\mathbf {x} _ {0}, \mathbf {c}) \bigg ],
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
where the clipping operation removes the incentive for the new policy $\pi_{\theta}$ to differ from the previous round policy $\pi_{\mathrm{old}}$ (Schulman et al., 2017; Black et al., 2023).
|
| 172 |
+
|
| 173 |
+
# 3 REINFORCE vs. PPO: An Efficiency-Effectiveness Trade-Off
|
| 174 |
+
|
| 175 |
+
In this section, we explore the efficiency-effectiveness trade-off between two prominent reinforcement learning methods for diffusion fine-tuning: REINFORCE and PPO. Understanding this trade-off is crucial for selecting the appropriate algorithm given constraints on computational resources and desired performance outcomes.
|
| 176 |
+
|
| 177 |
+
In the context of text-to-image diffusion models, we aim to optimize the policy $\pi$ to maximize the expected reward $\mathcal{R}(x_{0:T},c) = r(x_0,c)$ . Our objective function is defined as:
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
J _ {\theta} (\pi) = \mathbb {E} _ {c \sim p (C), x _ {0: T} \sim p _ {\theta} \left(x _ {0: T} \mid c\right)} [ r (x _ {0}, c) ]. \tag {6}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
REINFORCE for gradient calculation. For optimizing this objective, the REINFORCE policy gradient (also known as score function (SF)) (Williams, 1992) provides the following gradient estimate:
|
| 184 |
+
|
| 185 |
+
$$
|
| 186 |
+
\begin{array}{l} \nabla_ {\theta} J _ {\theta} ^ {\mathrm {S F}} (\pi) \\ = \mathbb {E} _ {\mathbf {x} _ {0: T}} \left[ \nabla_ {\theta} \log \left(\prod_ {t = 1} ^ {T} p _ {\theta} \left(\mathbf {x} _ {t - 1} \mid \mathbf {x} _ {t}, \mathbf {c}\right)\right) r \left(\mathbf {x} _ {0}, \mathbf {c}\right) \right] \tag {7} \\ = \mathbb {E} _ {\mathbf {x} _ {0: T}} \left[ \sum_ {t = 0} ^ {T} \nabla_ {\theta} \log p _ {\theta} \left(\mathbf {x} _ {t - 1} \mid \mathbf {x} _ {t}, \mathbf {c}\right) r \left(\mathbf {x} _ {0}, \mathbf {c}\right) \right], \\ \end{array}
|
| 187 |
+
$$
|
| 188 |
+
|
| 189 |
+
where the second step follows from the reverse diffusion policy decomposition (Eq. 1).
|
| 190 |
+
|
| 191 |
+
In practice, a batch of trajectories is sampled from the reverse diffusion distribution, i.e., $\mathbf{x}_{0:T} \sim p_{\theta}(\mathbf{x}_{0:T})$ and a Monte-Carlo estimate of the REINFORCE policy gradient (Eq. 7) is calculated for the model update.
|
| 192 |
+
|
| 193 |
+
REINFORCE with baseline correction. To reduce variance of the REINFORCE estimator, a common trick is to subtract a constant baseline correction term from the reward function (Greensmith et al., 2004; Mohamed et al., 2020):
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
\nabla_ {\theta} J _ {\theta} ^ {\mathrm {S F B}} (\pi) = \mathbb {E} \left[ \sum_ {t = 0} ^ {T} \nabla_ {\theta} \log p _ {\theta} \left(\mathbf {x} _ {t - 1} \mid \mathbf {x} _ {t}, \mathbf {c}\right) \left(r \left(\mathbf {x} _ {0}, \mathbf {c}\right) - b _ {t}\right) \right]. \tag {8}
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
REINFORCE Leave-one-out (RLOO). To further reduce the variance of the REINFORCE estimator, RLOO samples $K$ diffusion trajectories per prompt $(\{\mathbf{x}_{0:T}^k\} \sim \pi(.|\mathbf{c}))$ , for a better Monte-Carlo estimate of the expectation (Kool et al., 2019; Ahmadian et al., 2024). The RLOO estimator is:
|
| 200 |
+
|
| 201 |
+
$$
|
| 202 |
+
\nabla_ {\theta} J _ {\theta} ^ {\mathrm {R L O O}} (\pi) = \mathbb {E} \left[ K ^ {- 1} \sum_ {k = 0} ^ {K} \sum_ {t = 0} ^ {T} \nabla_ {\theta} \log p _ {\theta} \left(\mathbf {x} _ {t - 1} ^ {k} \mid \mathbf {x} _ {t} ^ {k}, \mathbf {c}\right) \left(r \left(\mathbf {x} _ {0} ^ {k}, \mathbf {c}\right) - b _ {t}\right) \right]. \tag {9}
|
| 203 |
+
$$
|
| 204 |
+
|
| 205 |
+
However, REINFORCE-based estimators have a significant disadvantage: they do not allow sample reuse (i.e., reusing trajectories collected from previous policies) due to a distribution shift between policy gradient updates during training. Sampled trajectories can only be used once, prohibiting mini-batch updates. This makes it sample inefficient.
|
| 206 |
+
|
| 207 |
+
To allow for sample reuse, the importance sampling (IS) trick can be applied (Schulman, 2015; Owen, 2013):
|
| 208 |
+
|
| 209 |
+
$$
|
| 210 |
+
J _ {\theta} ^ {\mathrm {I S}} (\pi) = \mathbb {E} _ {c _ {t} \sim p (C), a _ {t} \sim \pi_ {\text {o l d}} (a _ {t} | c _ {t})} \left[ \frac {\pi_ {\theta} (a _ {t} \mid c _ {t})}{\pi_ {\text {o l d}} (a _ {t} \mid c _ {t})} \mathcal {R} _ {t} \right], \tag {10}
|
| 211 |
+
$$
|
| 212 |
+
|
| 213 |
+
where $\pi_{\theta}$ is the current policy to be optimized, and $\pi_{\mathrm{old}}$ is the policy from the previous update round. With the IS trick, we can sample trajectories from the current policy in a batch, store it in a temporary buffer, and re-use them to apply mini-batch optimization (Schulman et al., 2017).
|
| 214 |
+
|
| 215 |
+
Motivation for PPO. With the IS trick, the samples from the old policy can be used to estimate the policy gradient under the current policy $\pi_{\theta}$ (Eq. 7) in a statistically unbiased fashion (Owen, 2013), i.e., in expectation the IS and REINFORCE gradients are equivalent (Eq. 10, Eq. 7). Thus, potentially, we can improve the sample efficiency of REINFORCE gradient estimation with IS.
|
| 216 |
+
|
| 217 |
+
While unbiased, the IS estimator can exhibit high variance (Owen, 2013). This high variance may lead to unstable training dynamics. Additionally, significant divergence between the current policy $\pi_{\theta}$ and the previous policy $\pi_{\mathrm{old}}$ can result in the updated diffusion policy performing worse than the previous one (Schulman, 2015; Achiam et al., 2017). Next, we will prove this formally. We note that this result has previously been established by (Achiam et al., 2017) for the more general RL setting. In this work, we extend this finding to the context of diffusion model fine-tuning.
|
| 218 |
+
|
| 219 |
+
A key component of the proof relies on the distribution of states under the current policy, i.e., $d^{\pi}(s)$ . In the case of diffusion models, the state transition kernel $P(s_{t + 1} \mid s_t, a_t)$ is deterministic, because the next state consists of the action sampled from the previous state (Eq. 4), i.e., $P(s_{t + 1} \mid s_t, a_t) = 1$ . While the state transition kernel is deterministic, the distribution of states is stochastic, given that it depends on the action at time $t$ , which is sampled from the policy (Eq. 4). We define the state distribution as:
|
| 220 |
+
|
| 221 |
+
Definition 1. Given the distribution over contexts $\mathbf{c} \sim p(\mathbf{C})$ , the (deterministic) distribution over time $t = \delta(t)$ , and the diffusion policy $\pi$ , the state distribution at time $t$ is:
|
| 222 |
+
|
| 223 |
+
$$
|
| 224 |
+
p (\mathbf {s} _ {t} \mid \pi) = p (\mathbf {c}) \delta (t) \int_ {\mathbf {x} _ {t + 1}} \pi (\mathbf {x} _ {t} | \mathbf {x} _ {t + 1}, \mathbf {c}, t) \pi (\mathbf {x} _ {t + 1} | \mathbf {c}, t) \mathbf {d} \mathbf {x} _ {t + 1}.
|
| 225 |
+
$$
|
| 226 |
+
|
| 227 |
+
Subsequently, the normalized discounted state visitation distribution can be defined as:
|
| 228 |
+
|
| 229 |
+
$$
|
| 230 |
+
d ^ {\pi} (\mathbf {s}) = (1 - \gamma) \sum_ {t = 0} ^ {\infty} \gamma^ {t} p \left(\mathbf {s} _ {t} = \mathbf {s} \mid \pi\right). \tag {11}
|
| 231 |
+
$$
|
| 232 |
+
|
| 233 |
+
The advantage function is defined as: $A^{\pi_k}(\mathbf{s},\mathbf{a}) = Q^{\pi_k}(\mathbf{s},\mathbf{a}) - V^{\pi_k}(\mathbf{s})$ (Sutton & Barto, 2018). Given this, the monotonic policy improvement bound can be derived:
|
| 234 |
+
|
| 235 |
+
Theorem 3.1. (Achiam et al., 2017) Consider a current policy $\pi_k$ . Let $C^{\pi, \pi_k} = \max_{s \in S} |\mathbb{E}_{a \sim \pi(\cdot|s)}[A^{\pi_k}(s, a)]|$ , and $\mathrm{TV}(\pi(\cdot|s), \pi_k(\cdot|s))$ represent the total variation distance between the policies $\pi(\cdot|s)$ and $\pi_k(\cdot|s)$ , and $s$ be the current state. For any future policy $\pi$ , we have:
|
| 236 |
+
|
| 237 |
+
$$
|
| 238 |
+
J (\pi) - J (\pi_ {k}) \geq \frac {1}{1 - \gamma} \mathbb {E} _ {(s, a) \sim d ^ {\pi_ {k}}} \left[ \frac {\pi (a \mid s)}{\pi_ {k} (a \mid s)} A ^ {\pi_ {k}} (s, a) \right] - \frac {2 \gamma C ^ {\pi , \pi_ {k}}}{(1 - \gamma) ^ {2}} \mathbb {E} _ {s \sim d ^ {\pi_ {k}}} \left[ \mathrm {T V} (\pi (\cdot \mid s), \pi_ {k} (\cdot \mid s)) \right].
|
| 239 |
+
$$
|
| 240 |
+
|
| 241 |
+
A direct consequence of this theorem is that when optimizing a policy with the IS objective (Eq. 10), to guarantee that the new policy will improve upon the previous policy, the policies should not diverge too much. Therefore, we need to apply a constraint on the current policy. This can be achieved by applying the clipping operator in the PPO objective (Eq. 3) (Queeney et al., 2021; Achiam et al., 2017; Schulman et al., 2017; Gupta et al., 2024c;a; Gupta, 2025).
|
| 242 |
+
|
| 243 |
+
This gives rise to an efficiency-effectiveness trade-off between REINFORCE and PPO. REINFORCE offers greater computational and implementation efficiency due to its simplicity, but it comes at the cost of lower sample efficiency and potential suboptimal performance. In contrast, PPO is more computationally demanding and involves more complex hyper-parameter tuning, yet it achieves higher performance and reliable policy improvements during training.
|
| 244 |
+
|
| 245 |
+
We note that a similar trade-off analysis was performed in the context of RL fine-tuning for large language models (LLM) (Ahmadian et al., 2024). However, their analysis was limited to an empirical study, whereas we present a theoretical analysis in addition to the empirical analysis. To the best of our knowledge, we are the first to conduct such a study for diffusion methods.
|
| 246 |
+
|
| 247 |
+
# 4 Method: Leave-One-Out PPO (LOOP) for Diffusion Fine-tuning
|
| 248 |
+
|
| 249 |
+
We demonstrated the importance of PPO in enhancing sample efficiency and achieving stable improvements during training for diffusion fine-tuning. Additionally, we showcased the RLOO method's effectiveness in reducing the variance of the REINFORCE method. In this section, we introduce our proposed method, LOOP, a novel RL for diffusion fine-tuning method. We start with highlighting the potential high-variance in the PPO objective.
|
| 250 |
+
|
| 251 |
+
The expectation in the PPO loss (Eq. 3) is typically estimated by sampling a single trajectory from the policy in the previous iteration $\pi_{old}$ : $\mathbf{x}_{0:T} \sim \pi_{old}$ for a given prompt $c$ :
|
| 252 |
+
|
| 253 |
+
$$
|
| 254 |
+
\sum_ {t = 0} ^ {T} \operatorname {c l i p} \left(\frac {\pi_ {\theta} (\mathbf {x} _ {t - 1} | \mathbf {x} _ {t} , \mathbf {c})}{\pi_ {\mathrm {o l d}} (\mathbf {x} _ {t - 1} | \mathbf {x} _ {t} , \mathbf {c})}, 1 - \epsilon , 1 + \epsilon\right) r (\mathbf {x} _ {0}, \mathbf {c}). \tag {12}
|
| 255 |
+
$$
|
| 256 |
+
|
| 257 |
+
Even though the single sample estimate is an unbiased Monte-Carlo approximation of the expectation, it has high-variance (Owen, 2013). Additionally, the IS term $\left(\frac{\pi_{\theta}(\mathbf{x}_{t - 1}|\mathbf{x}_t,\mathbf{c})}{\pi_{\mathrm{old}}(\mathbf{x}_{t - 1}|\mathbf{x}_t,\mathbf{c})}\right)$ can also contribute to high-variance of the PPO objective (Swaminathan & Joachims, 2015; Xie et al., 2023). Both factors combined, can lead to high-variance, and unstable training of the PPO.
|
| 258 |
+
|
| 259 |
+
Taking inspiration from RLOO (Eq. 9), we sample $K$ independent trajectories from the previous policy for a given prompt $c$ , and apply a baseline correction term from each trajectory's reward, to reduce the variance of the estimator:
|
| 260 |
+
|
| 261 |
+
$$
|
| 262 |
+
\hat {J} _ {\theta} ^ {\mathrm {L O O P}} (\pi) = \frac {1}{K} \sum_ {i = 1} ^ {K} \left[ \sum_ {t = 0} ^ {T} \operatorname {c l i p} \left(\frac {\pi_ {\theta} \left(\mathbf {x} _ {t - 1} ^ {i} | \mathbf {x} _ {t} ^ {i} , c\right)}{\pi_ {\mathrm {o l d}} \left(\mathbf {x} _ {t - 1} ^ {i} | \mathbf {x} _ {t} ^ {i} , c\right)}, 1 - \epsilon , 1 + \epsilon\right) \cdot \left(r (\mathbf {x} _ {0} ^ {i}, \mathbf {c}) - b ^ {i}\right) \right], \tag {13}
|
| 263 |
+
$$
|
| 264 |
+
|
| 265 |
+
where $\mathbf{x}_{0:T}^i \sim \pi_{old}, \forall i \in [1, K]$ . The baseline correction term $b^i$ reduces the variance of the gradient estimate, while being unbiased in expectation (Gupta et al., 2024b; Mohamed et al., 2020). A simple choice of baseline correction can be the average reward across the $K$ trajectories. However, it results in a biased estimator (Kool
|
| 266 |
+
|
| 267 |
+
et al., 2019). Therefore, we choose the leave-one-out average baseline, with average taken across all samples in the trajectory, except the current sample $i$ , i.e.:
|
| 268 |
+
|
| 269 |
+
$$
|
| 270 |
+
b ^ {i} = \frac {1}{k - 1} \sum_ {j \neq i} r \left(\mathbf {x} _ {0} ^ {j}\right). \tag {14}
|
| 271 |
+
$$
|
| 272 |
+
|
| 273 |
+
Originally RLOO sampling and baseline corrections were proposed in the context of REINFORCE, with a focus on on-policy optimization (Ahmadian et al., 2024; Kool et al., 2019), whereas we are applying these in the off-policy step of PPO. We call this method leave-one-out PPO (LOOP).
|
| 274 |
+
|
| 275 |
+
Our approach is conceptually similar to the recently popular GRPO method for RL fine-tuning of LLMs (Shao et al., 2024). Although our work was developed independently before GRPO gained widespread recognition, we do not include a head-to-head comparison.
|
| 276 |
+
|
| 277 |
+
Technically, the distinction lies in following aspects: (i) unlike GRPO, our formulation does not apply standard-deviation normalization in the denominator, as this has been shown to potentially harm performance in recent LLM fine-tuning via RL studies (Liu et al., 2025), (ii) similar to GRPO, we omit a KL penalty term, since our empirical experiments showed that it has little practical benefit. Furthermore, a recent study showed that on-policy RL implicitly constrains the updated policy to remain close to the base policy under a KL divergence measure, even without an explicit KL penalty term (Shenfeld et al., 2025), and (iii) we ignore the generation-length normalization term. In the diffusion setting, this simplification is further justified by the fact that the sequence length of the reverse diffusion process is fixed across generations, rendering length normalization unnecessary.
|
| 278 |
+
|
| 279 |
+
Provenly, LOOP has lower variance than PPO:
|
| 280 |
+
|
| 281 |
+
Proposition 4.1. The LOOP estimator $\hat{J}_{\theta}^{\mathrm{LOOP}}(\pi)$ (Eq. 13) has lower variance than the PPO estimator $\hat{J}_{\theta}^{\mathrm{PPO}}(\pi)$ (Eq. 12):
|
| 282 |
+
|
| 283 |
+
$$
|
| 284 |
+
\operatorname {V a r} \left[ \hat {J} _ {\theta} ^ {\mathrm {L O O P}} (\pi) \right] < \operatorname {V a r} \left[ \hat {J} _ {\theta} ^ {\mathrm {P P O}} (\pi) \right]. \tag {15}
|
| 285 |
+
$$
|
| 286 |
+
|
| 287 |
+
Proof. Since the sampled trajectories are independent:
|
| 288 |
+
|
| 289 |
+
$$
|
| 290 |
+
\mathrm {V a r} \Big [ \hat {J} _ {\theta} ^ {\mathrm {L O O P}} (\pi) \Big ] = \frac {1}{K ^ {2}} \mathrm {V a r} \Big [ \hat {J} _ {\theta} ^ {\mathrm {P P O}} (\pi) \Big ] < \mathrm {V a r} \Big [ \hat {J} _ {\theta} ^ {\mathrm {P P O}} (\pi) \Big ]. \square
|
| 291 |
+
$$
|
| 292 |
+
|
| 293 |
+
# 5 Experimental Setup
|
| 294 |
+
|
| 295 |
+
Benchmark. Text-to-image diffusion and language models often fail to satisfy an essential reasoning skill of attribute binding. Attribute binding reasoning capability refers to the ability of a model to generate images with attributes such as color, shape, texture, spatial alignment, (and others) specified in the input prompt. In other words, generated images often fail to bind certain attributes specified in the instruction prompt (Huang et al., 2023; Ramesh et al., 2022; Fu & Cheng, 2024). Since attribute binding seems to be a basic requirement for useful real-world applications, we choose the T2I-CompBench benchmark (Huang et al., 2023), which contains multiple attribute binding/image compositionality tasks, and its corresponding reward metric to benchmark text-to-image generative models. We also select two common tasks from prior RL for diffusion work: improving aesthetic quality of generation, and image-text semantic alignment (Black et al., 2023; Fan et al., 2024). To summarize, we choose the following tasks for the RL optimization: (i) Color, (ii) Shape, (iii) Texture, (iv) 2D Spatial, (v) Numeracy, (vi) Aesthetic, (vii) Image-text Alignment. For all tasks, the prompts are split into training/validation prompts. We report the average reward on both training and validation split.
|
| 296 |
+
|
| 297 |
+
Model. As the base diffusion model, we use Stable diffusion V2 (Rombach et al., 2022), which is a latent diffusion model. For optimization, we fully update the UNet model, with a learning rate of $1e^{-5}$ . We also tried LORA fine-tuning (Hu et al., 2021), but the results were not satisfactory, so we update the entire model instead.
|
| 298 |
+
|
| 299 |
+
# 6 Hyperparameter and Implementation Details
|
| 300 |
+
|
| 301 |
+
For REINFORCE (including REINFORCE with baseline correction term), PPO, and LOOP the number of denoising steps $(T)$ is set to 50. The diffusion guidance weight is set to 5.0. For optimization, we use AdamW Loshchilov & Hutter (2017) with a learning rate of $1e^{-5}$ , and the weight decay of $1e - 4$ , with other parameters kept at the default value. We clip the gradient norm to 1.0. We train all models using 8 A100 GPUs with a batch size of 4 per GPU. The clipping parameter $\epsilon$ for PPO, and LOOP is set to $1e^{-4}$ .
|
| 302 |
+
|
| 303 |
+
# 7 Results and Discussion
|
| 304 |
+
|
| 305 |
+
# 7.1 REINFORCE vs. PPO efficiency-effectiveness trade-off
|
| 306 |
+
|
| 307 |
+
We present our empirical results on the efficiency-effectiveness trade-off between REINFORCE and PPO. Our evaluation compares the following methods: the REINFORCE policy gradient for diffusion fine-tuning (Eq. 7); the REINFORCE policy gradient with a baseline correction term (REINFORCE w/ BC), detailed in Eq. 8, where the baseline term is the average reward for the given prompt (Black et al., 2023), and the PPO objective for diffusion fine-tuning, which incorporates importance sampling and clipping, as outlined in Eq. 3. This PPO objective is equivalent to the DDPO objective in the original RL for diffusion method (Black et al., 2023).
|
| 308 |
+
|
| 309 |
+
Figure 2 shows the training reward over epochs for the attributes: Color, Shape, and Texture from the T2I-CompBench benchmark, and training reward from optimizing the aesthetic model. Results are averaged over 3 runs. It is clear that REINFORCE policy gradient is not effective in terms of performance, as compared to other variants. Adding a baseline correction term indeed improves the training performance, validating the effectiveness of baseline in terms of training performance, possibly because of reduced variance. PPO achieves the highest training reward, validating the effectiveness of importance sampling and clipping for diffusion fine-tuning.
|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
Figure 2: Evaluating REINFORCE vs. PPO trade-off by comparing: REINFORCE (Eq. 7), REINFORCE with baseline correction term (Eq. 8), and PPO (Eq. 3). We evaluate on the T2I-CompBench benchmark over three image attributes: Color, Shape, and Texture. We also compare on the aesthetic task. Y-axis corresponds to the training reward, x-axis corresponds to the training epoch. Results are averaged over 3 runs; shaded areas indicate $80\%$ prediction intervals.
|
| 313 |
+
|
| 314 |
+
We also evaluate the performance on a separate validation set. For each validation prompt, we generate 10 independent images from the diffusion policy, and average the reward, finally averaging over all evaluation prompts. The validation results are reported in Table 1. The results are consistent with the pattern observed with the training rewards, i.e., REINFORCE with baseline provides a better performance than plain REINFORCE, suggesting that baseline correction indeed helps with the final performance. Nevertheless, PPO (DDPO) still performs better than REINFORCE.
|
| 315 |
+
|
| 316 |
+
We now have empirical evidence supporting the efficiency-effectiveness trade-off discussed in Section 3. From these results, we can conclude that fine-tuning text-to-image diffusion models is more effective with IS and clipping from PPO, or baseline corrections from REINFORCE. This bolsters our motivation for proposing LOOP as an approach to effectively combine these methods.
|
| 317 |
+
|
| 318 |
+
Table 1: Comparing REINFORCE with DDPO on the T2I-CompBench benchmark over three image attributes: Color, Shape, and Texture. We report average reward on unseen test set (higher is better). For each prompt, average rewards over 10 independent generated images are calculated.
|
| 319 |
+
|
| 320 |
+
<table><tr><td>Method</td><td>Color ↑</td><td>Shape ↑</td><td>Texture ↑</td></tr><tr><td>REINFORCE</td><td>0.6438</td><td>0.5330</td><td>0.6359</td></tr><tr><td>REINFORCE w/ BC</td><td>0.6351</td><td>0.5347</td><td>0.6656</td></tr><tr><td>DDPO</td><td>0.6821</td><td>0.5655</td><td>0.6909</td></tr></table>
|
| 321 |
+
|
| 322 |
+
# 7.2 Evaluating LOOP
|
| 323 |
+
|
| 324 |
+
Next we discuss the results from our proposed RL for diffusion fine-tuning method, LOOP.
|
| 325 |
+
|
| 326 |
+
Performance during training. Figure 3 shows the training reward curves for different tasks, against number of epochs. LOOP outperforms DDPO (Black et al., 2023) across all seven tasks consistently throughout training. This establishes the effectiveness of sampling multiple diffusion trajectories per input prompt, and the leave-one-out baseline correction term (Eq. 9) during training. Training reward curve is smoother for the aesthetic task, as compared to tasks from the T2I-CompBench benchmark. We hypothesise that improving the attribute binding property of diffusion model is a harder task than improving the aesthetic quality of generated images.
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
Figure 3: Comparing DDPO (referenced as PPO) with the proposed LOOP on the T2I-CompBench benchmark with respect to image attributes: Color, Shape, Texture, and Spatial relationship. We also report results on aesthetic preference and image-text alignment tasks (Black et al., 2023). The y-axis shows training reward, and the x-axis shows training epoch. Results are averaged over three independent runs; shaded areas denote $80\%$ prediction intervals.
|
| 330 |
+
|
| 331 |
+
Table 2 reports the average rewards on the test set across various tasks from the T2I-CompBench benchmark. For each prompt, we generate 10 different images and calculate the average rewards. LOOP consistently outperforms DDPO (Black et al., 2023) and other strong supervised learning-based baselines across all tasks. Notably, LOOP achieves relative improvements of $18.1\%$ and $15.2\%$ over DDPO on shape and color attributes, respectively.
|
| 332 |
+
|
| 333 |
+
For the aesthetic and image-text alignment objectives, the validation rewards are reported in Table 3. LOOP results in a $15.4\%$ relative improvement over PPO for the aesthetic task, and a $2.4\%$ improvement over PPO for the image-text alignment task.
|
| 334 |
+
|
| 335 |
+
Impact of number of independent trajectories $(k)$ . The LOOP variant with number of independent trajectories $K = 4$ performs the best across all tasks, followed by the variant $K = 3$ . This is intuitive given that Monte-Carlo estimates get better with more number of samples (Owen, 2013). Surprisingly, the performance of the variant with $K = 2$ is comparable to PPO.
|
| 336 |
+
|
| 337 |
+
Table 2: Comparing the performance of the proposed LOOP method with state-of-the-art baselines on the T2I-CompBench benchmark over image attributes such as Color, Shape, Texture, Spatial relation, and Numeracy. The metrics in this table are average reward on an unseen test set (higher is better). For each prompt we generate and average rewards across 10 different generated images.
|
| 338 |
+
|
| 339 |
+
<table><tr><td>Model</td><td>Color ↑</td><td>Shape ↑</td><td>Texture ↑</td><td>Spatial ↑</td><td>Numeracy ↑</td></tr><tr><td>Stable v1.4 (Rombach et al., 2022)</td><td>0.3765</td><td>0.3576</td><td>0.4156</td><td>0.1246</td><td>0.4461</td></tr><tr><td>Stable v2 (Rombach et al., 2022)</td><td>0.5065</td><td>0.4221</td><td>0.4922</td><td>0.1342</td><td>0.4579</td></tr><tr><td>Composable v2 (Liu et al., 2022)</td><td>0.4063</td><td>0.3299</td><td>0.3645</td><td>0.0800</td><td>0.4261</td></tr><tr><td>Structured v2 (Feng et al., 2022)</td><td>0.4990</td><td>0.4218</td><td>0.4900</td><td>0.1386</td><td>0.4550</td></tr><tr><td>Attn-Exct v2 (Chefer et al., 2023)</td><td>0.6400</td><td>0.4517</td><td>0.5963</td><td>0.1455</td><td>0.4767</td></tr><tr><td>GORS unbiased (Huang et al., 2023)</td><td>0.6414</td><td>0.4546</td><td>0.6025</td><td>0.1725</td><td>-</td></tr><tr><td>GORS (Huang et al., 2023)</td><td>0.6603</td><td>0.4785</td><td>0.6287</td><td>0.1815</td><td>0.4841</td></tr><tr><td>DDPO (Black et al., 2023)</td><td>0.6821</td><td>0.5655</td><td>0.6909</td><td>0.1961</td><td>0.5102</td></tr><tr><td>LOOP (k=3)</td><td>0.7515</td><td>0.6220</td><td>0.7353</td><td>0.1966</td><td>0.5242</td></tr><tr><td>LOOP (k=4)</td><td>0.7859</td><td>0.6676</td><td>0.7518</td><td>0.2136</td><td>0.5422</td></tr></table>
|
| 340 |
+
|
| 341 |
+
Table 3: Comparing the performance of LOOP with DDPO on the aesthetic and image-text alignment tasks. Higher values are better.
|
| 342 |
+
|
| 343 |
+
<table><tr><td>Method</td><td>Aesthetic ↑</td><td>Image Align. ↑</td></tr><tr><td>DDPO (Black et al., 2023)</td><td>6.8135</td><td>20.466</td></tr><tr><td>LOOP (k=2)</td><td>6.8617</td><td>20.788</td></tr><tr><td>LOOP (k=3)</td><td>7.0772</td><td>20.619</td></tr><tr><td>LOOP (k=4)</td><td>7.8606</td><td>20.909</td></tr></table>
|
| 344 |
+
|
| 345 |
+
# 8 Qualitative Examples
|
| 346 |
+
|
| 347 |
+
For a qualitative evaluation of the attribute-binding reasoning ability, we present some example image generations from SD, DDPO, and LOOP in Figures 1, 4, and 5.
|
| 348 |
+
|
| 349 |
+
In Figure 1 qualitative examples of the attribute binding task are presented. In the example in the first column of Figure 1, input prompt specifies a black ball with a white cat. Stable diffusion (SD) and PPO fail to bind the color black with the generated ball, whereas LOOP successfully binds that attribute. Similarly, in the third column, SD and PPO fail to bind the hexagon shape attribute to the watermelon, whereas LOOP manages to do that. In the fourth column, SD and PPO fail to add the horse object itself, whereas LOOP adds the horse with the specified black color, and flowing cyan patterns.
|
| 350 |
+
|
| 351 |
+
Figure 4 highlights improvements in aesthetic quality of the generated images. Compared to SD v2 and PPO, LOOP produces sharper, more coherent compositions with balanced lighting and color tone. For example, in the second column ("a cat") and in the fourth column ("butterfly"), LOOP enhances realism and contrast while preserving overall artistic intent.
|
| 352 |
+
|
| 353 |
+
Finally, Figure 5 presents additional qualitative examples that emphasize both binding and aesthetics. LOOP accurately binds challenging color-object pairs (e.g., teal branch, pink cornfield) while producing more visually appealing and natural results. PPO and SD v2 often miss attribute alignment or produce dull, less cohesive scenes.
|
| 354 |
+
|
| 355 |
+
# 9 Conclusion
|
| 356 |
+
|
| 357 |
+
We have studied the efficiency-effectiveness trade-off between two fundamental RL methods for diffusion fine-tuning: REINFORCE and PPO. Our analysis, both theoretical and empirical, demonstrates that while REINFORCE is computationally efficient and easier to implement, it suffers from high variance and sample
|
| 358 |
+
|
| 359 |
+

|
| 360 |
+
SD v2
|
| 361 |
+
|
| 362 |
+

|
| 363 |
+
|
| 364 |
+

|
| 365 |
+
|
| 366 |
+

|
| 367 |
+
|
| 368 |
+

|
| 369 |
+
|
| 370 |
+

|
| 371 |
+
PPO
|
| 372 |
+
|
| 373 |
+

|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
LOOP
|
| 383 |
+
"A puppy dog"
|
| 384 |
+
Figure 4: LOOP improves aesthetic quality. Qualitative examples are presented from images generated via: Stable Diffusion 2.0 (first row), PPO (second row), and LOOP $k = 4$ (third row). LOOP consistently generates more aesthetic images, as compared to PPO and SD.
|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
"A cat"
|
| 388 |
+
|
| 389 |
+

|
| 390 |
+
"Butterfly"
|
| 391 |
+
|
| 392 |
+

|
| 393 |
+
"Bright yellow sunflower in a green field"
|
| 394 |
+
|
| 395 |
+

|
| 396 |
+
"Crystal clear mountain lage reflecting snow-capped peaks
|
| 397 |
+
|
| 398 |
+
inefficiency compared to PPO. PPO, though more effective, comes with significant computational overhead, requiring three models in memory simultaneously and involving sensitive hyperparameter tuning.
|
| 399 |
+
|
| 400 |
+
Building on these insights, we have introduced LOOP, a novel RL method for diffusion fine-tuning that combines variance reduction techniques from REINFORCE (multiple trajectory sampling and leave-one-out baseline correction) with the robustness and sample efficiency of PPO (importance sampling and clipping). Our empirical evaluation on the T2I-CompBench benchmark demonstrates that LOOP achieves substantial improvements over both the base Stable Diffusion model and the state-of-the-art PPO method across multiple tasks, including attribute binding (color, shape, texture, spatial relationships), aesthetic quality, and image-text alignment.
|
| 401 |
+
|
| 402 |
+
Quantitatively, LOOP $(\mathrm{k} = 4)$ achieves substantial improvements over PPO across all evaluated tasks. On the T2I-CompBench benchmark, LOOP achieves relative improvements of $18.1\%$ on shape binding, $15.2\%$ on color binding, $8.8\%$ on texture binding, and $8.9\%$ on spatial reasoning. LOOP also improves aesthetic quality by $15.4\%$ and image-text alignment by $2.2\%$ . Qualitatively, as shown in Figures 1, 4, and 5, LOOP successfully binds attributes that previous methods fail to capture, while also producing more visually coherent and aesthetic images.
|
| 403 |
+
|
| 404 |
+
A limitation of LOOP is the increased computational cost from sampling multiple diffusion trajectories per prompt, which leads to longer training times compared to standard PPO. Future work could explore adaptive sampling strategies to reduce this overhead while maintaining LOOP's effectiveness, extend the method to other diffusion architectures and modalities, or investigate the integration of human preference modeling for better alignment with real-world objectives.
|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
SD v2
|
| 408 |
+
|
| 409 |
+

|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
|
| 417 |
+

|
| 418 |
+
PPO
|
| 419 |
+
|
| 420 |
+

|
| 421 |
+
|
| 422 |
+

|
| 423 |
+
|
| 424 |
+

|
| 425 |
+
|
| 426 |
+

|
| 427 |
+
|
| 428 |
+

|
| 429 |
+
LOOP
|
| 430 |
+
|
| 431 |
+

|
| 432 |
+
Figure 5: Additional qualitative examples presented from images generated via Stable Diffusion 2.0 (first row), PPO (second row), and LOOP $k = 4$ (third row). LOOP consistently generates more aesthetic images, as compared to PPO and SD (first, third, and fifth prompt). LOOP also binds the color attribute (teal branch in second example, and pink cornfield in the forth example), where SD and PPO fail.
|
| 433 |
+
|
| 434 |
+

|
| 435 |
+
|
| 436 |
+

|
| 437 |
+
|
| 438 |
+

|
| 439 |
+
|
| 440 |
+
"A white feather on "A neon orange owl a black velvet sur- sitting on a teal face" branch
|
| 441 |
+
|
| 442 |
+
"Pink bridge over a "A pink cornfield" glowing blue river"
|
| 443 |
+
|
| 444 |
+
"A yellow flower in a green vase"
|
| 445 |
+
|
| 446 |
+
# References
|
| 447 |
+
|
| 448 |
+
Joshua Achiam, David Held, Aviv Tamar, and Pieter Abbeel. Constrained policy optimization. In International conference on machine learning, pp. 22-31. PMLR, 2017.
|
| 449 |
+
Arash Ahmadian, Chris Cremer, Matthias Gallé, Marzieh Fadaee, Julia Kreutzer, Olivier Pietquin, Ahmet Üstün, and Sara Hooker. Back to basics: Revisiting REINFORCE style optimization for learning from human feedback in LLMs. arXiv preprint arXiv:2402.14740, 2024.
|
| 450 |
+
Jacob Austin, Daniel D Johnson, Jonathan Ho, Daniel Tarlow, and Rianne van den Berg. Structured denoising diffusion models in discrete state-spaces. Advances in Neural Information Processing Systems, 34:17981-17993, 2021.
|
| 451 |
+
Kevin Black, Michael Janner, Yilun Du, Ilya Kostrikov, and Sergey Levine. Training diffusion models with reinforcement learning. arXiv preprint arXiv:2305.13301, 2023.
|
| 452 |
+
Andreas Blattmann, Robin Rombach, Huan Ling, Tim Dockhorn, Seung Wook Kim, Sanja Fidler, and Karsten Kreis. Align your latents: High-resolution video synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 22563-22575, 2023.
|
| 453 |
+
Hila Chefer, Yuval Alaluf, Yael Vinker, Lior Wolf, and Daniel Cohen-Or. Attend-and-excite: Attention-based semantic guidance for text-to-image diffusion models. ACM Transactions on Graphics (TOG), 42(4):1-10, 2023.
|
| 454 |
+
Logan Engstrom, Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Firdaus Janoos, Larry Rudolph, and Aleksander Madry. Implementation matters in deep RL: A case study on ppo and trpo. In International conference on learning representations, 2019.
|
| 455 |
+
|
| 456 |
+
Ying Fan, Olivia Watkins, Yuqing Du, Hao Liu, Moonkyung Ryu, Craig Boutilier, Pieter Abbeel, Mohammad Ghavamzadeh, Kangwook Lee, and Kimin Lee. Reinforcement learning for fine-tuning text-to-image diffusion models. Advances in Neural Information Processing Systems, 36, 2024.
|
| 457 |
+
Weixi Feng, Xuehai He, Tsu-Jui Fu, Varun Jampani, Arjun Akula, Pradyumna Narayana, Sugato Basu, Xin Eric Wang, and William Yang Wang. Training-free structured diffusion guidance for compositional text-to-image synthesis. arXiv preprint arXiv:2212.05032, 2022.
|
| 458 |
+
Huan Fu and Guoqing Cheng. Enhancing semantic mapping in text-to-image diffusion via gather-and-bind. Computers & Graphics, 125:104118, 2024.
|
| 459 |
+
Evan Greensmith, Peter L Bartlett, and Jonathan Baxter. Variance reduction techniques for gradient estimates in reinforcement learning. Journal of Machine Learning Research, 5(9), 2004.
|
| 460 |
+
Yi Gu, Zhendong Wang, Yueqin Yin, Yujia Xie, and Mingyuan Zhou. Diffusion-RPO: Aligning diffusion models through relative preference optimization. arXiv preprint arXiv:2406.06382, 2024.
|
| 461 |
+
Shashank Gupta. Safe, efficient, and robust reinforcement learning for ranking and diffusion models. arXiv preprint arXiv:2510.15429, 2025.
|
| 462 |
+
Shashank Gupta, Philipp Hager, Jin Huang, Ali Vardasbi, and Harrie Oosterhuis. Unbiased learning to rank: On recent advances and practical applications. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining, pp. 1118-1121, 2024a.
|
| 463 |
+
Shashank Gupta, Olivier Jeunen, Harrie Oosterhuis, and Maarten de Rijke. Optimal baseline corrections for off-policy contextual bandits. In Proceedings of the 18th ACM Conference on Recommender Systems, pp. 722-732, 2024b.
|
| 464 |
+
Shashank Gupta, Harrie Oosterhuis, and Maarten de Rijke. Practical and robust safety guarantees for advanced counterfactual learning to rank. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, pp. 737-747, 2024c.
|
| 465 |
+
Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020.
|
| 466 |
+
Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LORA: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021.
|
| 467 |
+
Kaiyi Huang, Kaiyue Sun, Enze Xie, Zhenguo Li, and Xihui Liu. T2I-CompBench: A comprehensive benchmark for open-world compositional text-to-image generation. Advances in Neural Information Processing Systems, 36:78723-78747, 2023.
|
| 468 |
+
Shengyi Huang, Michael Noukhovitch, Arian Hosseini, Kashif Rasul, Weixun Wang, and Lewis Tunstall. The $\mathrm{N}+$ implementation details of RLHF with PPO: A case study on TL; DR summarization. arXiv preprint arXiv:2403.17031, 2024.
|
| 469 |
+
Wouter Kool, Herke van Hoof, and Max Welling. Buy 4 REINFORCE samples, get a baseline for free! 2019.
|
| 470 |
+
Kimin Lee, Hao Liu, Moonkyung Ryu, Olivia Watkins, Yuqing Du, Craig Boutilier, Pieter Abbeel, Mohammad Ghavamzadeh, and Shixiang Shane Gu. Aligning text-to-image models using human feedback. arXiv preprint arXiv:2302.12192, 2023.
|
| 471 |
+
Shufan Li, Konstantinos Kallidromitis, Akash Gokul, Yusuke Kato, and Kazuki Kozuka. Aligning diffusion models by optimizing human utility. arXiv preprint arXiv:2404.04465, 2024.
|
| 472 |
+
Nan Liu, Shuang Li, Yilun Du, Antonio Torralba, and Joshua B Tenenbaum. Compositional visual generation with composable diffusion models. In European Conference on Computer Vision, pp. 423-439. Springer, 2022.
|
| 473 |
+
|
| 474 |
+
Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025.
|
| 475 |
+
Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2017. URL https://arxiv.org/abs/1711.05101.
|
| 476 |
+
Shakir Mohamed, Mihaela Rosca, Michael Figurnov, and Andriy Mnih. Monte Carlo gradient estimation in machine learning. Journal of Machine Learning Research, 21(132):1-62, 2020.
|
| 477 |
+
Art B. Owen. Monte Carlo theory, methods and examples. https://artown.su Domains/mc/, 2013.
|
| 478 |
+
James Queeney, Yannis Paschalidis, and Christos G Cassandras. Generalized proximal policy optimization with sample reuse. Advances in Neural Information Processing Systems, 34:11909-11919, 2021.
|
| 479 |
+
Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024.
|
| 480 |
+
Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1(2):3, 2022.
|
| 481 |
+
Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10684-10695, 2022.
|
| 482 |
+
Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294, 2022.
|
| 483 |
+
John Schulman. Trust region policy optimization. arXiv preprint arXiv:1502.05477, 2015.
|
| 484 |
+
John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
|
| 485 |
+
Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Yang Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.
|
| 486 |
+
Idan Shenfeld, Jyothish Pari, and Pulkit Agrawal. RL's razor: Why online reinforcement learning forgets less. arXiv preprint arXiv:2509.04259, 2025.
|
| 487 |
+
Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International conference on machine learning, pp. 2256-2265. PMLR, 2015.
|
| 488 |
+
Richard S. Sutton and Andrew G. Barto. Reinforcement learning: An introduction. The MIT Press, 2018.
|
| 489 |
+
Adith Swaminathan and Thorsten Joachims. The self-normalized estimator for counterfactual learning. In Advances in Neural Information Processing Systems, volume 28, 2015.
|
| 490 |
+
Bram Wallace, Meihua Dang, Rafael Rafailov, Linqi Zhou, Aaron Lou, Senthil Purushwalkam, Stefano Ermon, Caiming Xiong, Shafiq Joty, and Nikhil Naik. Diffusion model alignment using direct preference optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8228-8238, 2024.
|
| 491 |
+
Chenyu Wang, Masatoshi Uehara, Yichun He, Amy Wang, Tommaso Biancalani, Avantika Lal, Tommi Jaakkola, Sergey Levine, Hanchen Wang, and Aviv Regev. Fine-tuning discrete diffusion models via reward optimization with applications to dna and protein design. arXiv preprint arXiv:2410.13643, 2024.
|
| 492 |
+
|
| 493 |
+
Ronald J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine Learning, 8:229-256, 1992.
|
| 494 |
+
Zhengpeng Xie, Changdong Yu, and Weizheng Qiao. Dropout strategy in reinforcement learning: Limiting the surrogate objective variance in policy optimization methods. arXiv preprint arXiv:2310.20380, 2023.
|
| 495 |
+
Jiazheng Xu, Xiao Liu, Yuchen Wu, Yuxuan Tong, Qinkai Li, Ming Ding, Jie Tang, and Yuxiao Dong. Imagereward: Learning and evaluating human preferences for text-to-image generation. Advances in Neural Information Processing Systems, 36, 2024.
|
| 496 |
+
Minkai Xu, Alexander S Powers, Ron O Dror, Stefano Ermon, and Jure Leskovec. Geometric latent diffusion models for 3D molecule generation. In International Conference on Machine Learning, pp. 38592-38610. PMLR, 2023.
|
| 497 |
+
Yongcheng Zeng, Guoqing Liu, Weiyu Ma, Ning Yang, Haifeng Zhang, and Jun Wang. Token-level direct preference optimization. arXiv preprint arXiv:2404.11999, 2024.
|
| 498 |
+
Rui Zheng, Shihan Dou, Songyang Gao, Yuan Hua, Wei Shen, Binghai Wang, Yan Liu, Senjie Jin, Yuhao Zhou, Limao Xiong, et al. Delve into PPO: Implementation matters for stable RLHF. In NeurIPS 2023 Workshop on Instruction Tuning and Instruction Following, 2023.
|
| 499 |
+
Han Zhong, Guhao Feng, Wei Xiong, Xinle Cheng, Li Zhao, Di He, Jiang Bian, and Liwei Wang. DPO meets PPO: Reinforced token optimization for RLHF. arXiv preprint arXiv:2404.18922, 2024.
|
data/2025/2503_00xxx/2503.00897/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fb78154396e90e9a0df6cb8d43f2c4f24cf5ef247c97d4868fcfe7ef98311080
|
| 3 |
+
size 953110
|
data/2025/2503_00xxx/2503.00897/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_00xxx/2503.00936/ee8e137b-6353-4aea-9f54-e9f3f0a4de81_content_list.json
ADDED
|
@@ -0,0 +1,1534 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "IteRPrime: Zero-shot Referring Image Segmentation with Iterative Grad-CAM Refinement and Primary Word Emphasis",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
94,
|
| 8 |
+
119,
|
| 9 |
+
898,
|
| 10 |
+
161
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Yuji Wang*, Jingchen Ni*, Yong Liu, Chun Yuan, Yansong Tang†",
|
| 17 |
+
"bbox": [
|
| 18 |
+
228,
|
| 19 |
+
176,
|
| 20 |
+
771,
|
| 21 |
+
196
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Shenzhen International Graduate School, Tsinghua University *{yuji-wan24, njc24} @ mails.tsinghua.edu.cn, †tang.yansong@sz.tsinghua.edu.cn",
|
| 28 |
+
"bbox": [
|
| 29 |
+
228,
|
| 30 |
+
200,
|
| 31 |
+
767,
|
| 32 |
+
229
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Abstract",
|
| 39 |
+
"text_level": 1,
|
| 40 |
+
"bbox": [
|
| 41 |
+
250,
|
| 42 |
+
273,
|
| 43 |
+
313,
|
| 44 |
+
286
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "Zero-shot Referring Image Segmentation (RIS) identifies the instance mask that best aligns with a specified referring expression without training and fine-tuning, significantly reducing the labor-intensive annotation process. Despite achieving commendable results, previous CLIP-based models have a critical drawback: the models exhibit a notable reduction in their capacity to discern relative spatial relationships of objects. This is because they generate all possible masks on an image and evaluate each masked region for similarity to the given expression, often resulting in decreased sensitivity to direct positional clues in text inputs. Moreover, most methods have weak abilities to manage relationships between primary words and their contexts, causing confusion and reduced accuracy in identifying the correct target region. To address these challenges, we propose IteRPrimE (Iterative Grad-CAM Refinement and Primary word Emphasis), which leverages a saliency heatmap through Grad-CAM from a Vision-Language Pre-trained (VLP) model for image-text matching. An iterative Grad-CAM refinement strategy is introduced to progressively enhance the model's focus on the target region and overcome positional insensitivity, creating a self-correcting effect. Additionally, we design the Primary Word Emphasis module to help the model handle complex semantic relations, enhancing its ability to attend to the intended object. Extensive experiments conducted on the RefCOCO/+/g, and PhraseCut benchmarks demonstrate that IteRPrimE outperforms previous SOTA zero-shot methods, particularly excelling in out-of-domain scenarios.",
|
| 51 |
+
"bbox": [
|
| 52 |
+
99,
|
| 53 |
+
297,
|
| 54 |
+
460,
|
| 55 |
+
650
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Code — https://github.com/VoyageWang/IteRPrimE",
|
| 62 |
+
"bbox": [
|
| 63 |
+
83,
|
| 64 |
+
667,
|
| 65 |
+
433,
|
| 66 |
+
683
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "Introduction",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
225,
|
| 76 |
+
704,
|
| 77 |
+
336,
|
| 78 |
+
720
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Referring Image Segmentation (RIS) requires the model to generate a pixel-level referred object mask based on a textual description, extending the applicability to various tasks such as robot interaction and image editing (Yang et al. 2024, 2022; Liu et al. 2024b; Lai et al. 2024; Luo et al. 2024b). Different from standard semantic segmentation (Wang, Zhao, and Sun 2023; Wang et al. 2024b; Han",
|
| 85 |
+
"bbox": [
|
| 86 |
+
81,
|
| 87 |
+
726,
|
| 88 |
+
478,
|
| 89 |
+
824
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "et al. 2023; Luo et al. 2024a; Bai et al. 2024), RIS necessitates the differentiation of instances within the same category and their relationships with other objects or the scene, which requires high demands on the semantic understanding and spatial perception of the model. However, annotating exact pairs of images, descriptions, and ground-truth masks is both expensive and time-intensive, as the annotation of a query needs a grasp of diverse positional and attributive details within the image (Liu, Ding, and Jiang 2023; Ding et al. 2023; Liu et al. 2019). Recent weakly supervised RIS techniques (Strudel, Laptev, and Schmid 2022; Lee et al. 2023; Xu et al. 2022) have been introduced to mitigate these annotation challenges, yet they still depend on paired data for training purposes and have relatively poor performance. In contrast, a zero-shot approach holds greater value. Leveraging vision-language pre-trained (VLP) models such as CLIP (Radford et al. 2021), this method efficiently generalizes across diverse concepts and unseen categories without further training and fine-tuning.",
|
| 96 |
+
"bbox": [
|
| 97 |
+
516,
|
| 98 |
+
272,
|
| 99 |
+
913,
|
| 100 |
+
537
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "Existing methodologies to harness the characteristics of being unnecessary to fit training data presented by zero-shot learning often employ a two-stage pipeline, shown in Figure 1 (a). As a discriminator between the images masked by the candidate masks and the expression, CLIP is used to select the instance mask whose similarity score is the highest (Sun et al. 2024; Yu, Seo, and Son 2023; Suo, Zhu, and Yang 2023; Ni et al. 2023). However, we observed that these methods always malfunctioned when encountering text inputs with positional information such as \"left\" and \"right\". Due to only a single instance contained in a masked image, the absence of relative spatial perception can be the inherent limitation of these CLIP-based paradigms. Previous pieces of literature alleviate this issue by injecting the human priors or bias that explicitly prompts the CLIP with the given direction clues (Ni et al. 2023; Suo, Zhu, and Yang 2023). To be more specific, they manually design spatial decaying weights from 1 to 0 in the directions consistent with text phrases to make the model aware of positional information, but it can not generalize the scenarios out of predefined directions such as \"next to\". Additionally, the domain shift for CLIP from the natural image to the masked image can also impact the segmentation performance (Liu et al. 2024a; Ding et al. 2022; Zhu and Chen 2024).",
|
| 107 |
+
"bbox": [
|
| 108 |
+
516,
|
| 109 |
+
539,
|
| 110 |
+
911,
|
| 111 |
+
872
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "Some researchers (Lee et al. 2023) have leveraged Grad",
|
| 118 |
+
"bbox": [
|
| 119 |
+
532,
|
| 120 |
+
875,
|
| 121 |
+
911,
|
| 122 |
+
888
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "aside_text",
|
| 128 |
+
"text": "arXiv:2503.00936v1 [cs.CV] 2 Mar 2025",
|
| 129 |
+
"bbox": [
|
| 130 |
+
22,
|
| 131 |
+
265,
|
| 132 |
+
57,
|
| 133 |
+
700
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "page_footnote",
|
| 139 |
+
"text": "\\*These authors contributed equally. \n+Corresponding Author. \nCopyright © 2025, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.",
|
| 140 |
+
"bbox": [
|
| 141 |
+
81,
|
| 142 |
+
835,
|
| 143 |
+
478,
|
| 144 |
+
888
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "image",
|
| 150 |
+
"img_path": "images/8e41f3d3c4ef3704cebb1f41b5c30be135db61ecf876955582aaced5101a7ce6.jpg",
|
| 151 |
+
"image_caption": [
|
| 152 |
+
"Figure 1: (a) The general pipeline of CLIP-based methods. They lack the perception of spatial relative position due to the masked images. (b) The pipeline of our IteRPrimE with Iterative Grad-CAM Refinement Strategy and Primary Word Emphasis of \"bike\". (c) This is a comparative experiment of positional phrase accuracy between IteRPrimE and GL-CLIP on RefCOCO and RefCOCOg."
|
| 153 |
+
],
|
| 154 |
+
"image_footnote": [],
|
| 155 |
+
"bbox": [
|
| 156 |
+
135,
|
| 157 |
+
74,
|
| 158 |
+
872,
|
| 159 |
+
268
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 1
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "text",
|
| 165 |
+
"text": "CAM (Selvaraju et al. 2017) and created two specialized loss functions to attenuate the detrimental effects of positional phrases in weakly supervised settings. Although the losses are unsuitable for zero-shot scenarios, Grad-CAM can partially mitigate the deleterious effects associated with masked images. This is because the method maintains the integrity of the model's spatial perception capabilities by delineating the regions with the greatest attention in the original image for localization, shown in Figure 1 (b). Nevertheless, we still find two major problems by analyzing the occurrences and characteristics of Grad-CAM. First, Grad-CAM struggles to discriminate the semantic relations between different noun phrases, due to the lack of a stronger consideration of the primary word than other context words, shown in baseline predictions of Figure 2 (a). Specifically, the model's weak ability to effectively prioritize the main word in complex expressions undermines its overall performance. Second, Grad-CAM is limited to identifying only small areas of the referred object, which consequently results in selecting undesired instance masks.",
|
| 166 |
+
"bbox": [
|
| 167 |
+
84,
|
| 168 |
+
364,
|
| 169 |
+
478,
|
| 170 |
+
641
|
| 171 |
+
],
|
| 172 |
+
"page_idx": 1
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"type": "text",
|
| 176 |
+
"text": "To overcome these challenges, we propose a novel framework namely, IteRPrime (Iterative Grad-CAM Refinement and Primary word Emphasis) utilizing Grad-CAM for zero-shot RIS. First, we implement an iterative refinement strategy to enhance the representational accuracy and enlarge the indicated area of Grad-CAM, progressively improving the model's concentration on the target object with each cycle, shown in Figure 2 (b). Simultaneously, this strategy is particularly beneficial when the referring expression includes positional words, as it offers the model chances of self-correction at each iteration, shown in Figure 2 (c). Second, the Primary Word Emphasis Module (PWEM) plays a crucial role in enhancing the weak abilities to handle the complex semantic relationships between primary words and other contexts. This module is achieved by emphasizing the Grad-CAMs of the main word within the referring expression, from local and global aspects. Finally,",
|
| 177 |
+
"bbox": [
|
| 178 |
+
81,
|
| 179 |
+
652,
|
| 180 |
+
480,
|
| 181 |
+
891
|
| 182 |
+
],
|
| 183 |
+
"page_idx": 1
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"type": "text",
|
| 187 |
+
"text": "a post-processing module is designed to select a high-quality, contiguous instance mask from a mask proposal network, which encapsulates the target object as indicated by Grad-CAM. By addressing the limitations, the IteRPrimE approach achieves superior performance over prior zero-shot state-of-the-art techniques, notably excelling in out-of-domain scenarios and exhibiting robust cross-domain transfer proficiency. Our main contributions include",
|
| 188 |
+
"bbox": [
|
| 189 |
+
514,
|
| 190 |
+
364,
|
| 191 |
+
911,
|
| 192 |
+
476
|
| 193 |
+
],
|
| 194 |
+
"page_idx": 1
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"type": "list",
|
| 198 |
+
"sub_type": "text",
|
| 199 |
+
"list_items": [
|
| 200 |
+
"1. To our best knowledge, we are the first to use Grad-CAM to instruct Segmentors for zero-shot RIS tasks.",
|
| 201 |
+
"2. We propose the Iterative Grad-CAM Refinement Strategy (IGRS) and Primary Word Emphasis Module (PWEM) to enhance the accuracy and representation of Grad-CAM for better localization, shown in Figure 2.",
|
| 202 |
+
"3. Compared to the previous CLIP-based method, our method significantly outperforms it with inputs containing positional information, shown in Figure 1 (c). Additionally, the approach achieves a better performance on the four popular benchmarks, especially for the outdomain datasets."
|
| 203 |
+
],
|
| 204 |
+
"bbox": [
|
| 205 |
+
517,
|
| 206 |
+
479,
|
| 207 |
+
913,
|
| 208 |
+
655
|
| 209 |
+
],
|
| 210 |
+
"page_idx": 1
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "text",
|
| 214 |
+
"text": "Related Works",
|
| 215 |
+
"text_level": 1,
|
| 216 |
+
"bbox": [
|
| 217 |
+
650,
|
| 218 |
+
674,
|
| 219 |
+
779,
|
| 220 |
+
690
|
| 221 |
+
],
|
| 222 |
+
"page_idx": 1
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"type": "text",
|
| 226 |
+
"text": "Zero-shot referring image segmentation. For the fully-supervised setting, training a well-specified model for RIS needs massive paired text-visual annotations, which are sometimes not affordable and accessible (Shah, VS, and Patel 2024; Liu, Ding, and Jiang 2023; Yang et al. 2022; Wang et al. 2022b; Kim et al. 2022; Ding et al. 2021; Jing et al. 2021). Besides, these models have relatively weak ability in out-of-domain scenarios due to the limited data and a domain gap. Therefore, the zero-shot RIS methods are proposed as the alternative. Global- and local-CLIP (GL-CLIP) (Yu, Seo, and Son 2023) is the first proposed to segment the instance given the text input with zero-shot transfer. By interfacing with the mask proposal network FreeSOLO (Wang et al. 2022a), the approach leverages both global",
|
| 227 |
+
"bbox": [
|
| 228 |
+
514,
|
| 229 |
+
694,
|
| 230 |
+
913,
|
| 231 |
+
890
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 1
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "text",
|
| 237 |
+
"text": "(a) Expression: a man standing next to a young girl on a grassy hillside",
|
| 238 |
+
"bbox": [
|
| 239 |
+
88,
|
| 240 |
+
66,
|
| 241 |
+
475,
|
| 242 |
+
79
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 2
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "image",
|
| 248 |
+
"img_path": "images/08c5bfc9b7b96b778fa64c74ede0552db665310524961fd6e4ca13696a9fe230.jpg",
|
| 249 |
+
"image_caption": [],
|
| 250 |
+
"image_footnote": [],
|
| 251 |
+
"bbox": [
|
| 252 |
+
102,
|
| 253 |
+
80,
|
| 254 |
+
194,
|
| 255 |
+
128
|
| 256 |
+
],
|
| 257 |
+
"page_idx": 2
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"type": "image",
|
| 261 |
+
"img_path": "images/b8a65e9611bc990d6c941186aa9852e0a377c4019d41afa248a2f54a1df42236.jpg",
|
| 262 |
+
"image_caption": [
|
| 263 |
+
"Baseline"
|
| 264 |
+
],
|
| 265 |
+
"image_footnote": [],
|
| 266 |
+
"bbox": [
|
| 267 |
+
225,
|
| 268 |
+
80,
|
| 269 |
+
318,
|
| 270 |
+
128
|
| 271 |
+
],
|
| 272 |
+
"page_idx": 2
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"type": "image",
|
| 276 |
+
"img_path": "images/40879e8606d0c8ebb202411febbb36050bef000d4deb8875c5f4671224ed5c91.jpg",
|
| 277 |
+
"image_caption": [],
|
| 278 |
+
"image_footnote": [],
|
| 279 |
+
"bbox": [
|
| 280 |
+
352,
|
| 281 |
+
80,
|
| 282 |
+
444,
|
| 283 |
+
128
|
| 284 |
+
],
|
| 285 |
+
"page_idx": 2
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"type": "image",
|
| 289 |
+
"img_path": "images/b37343766148a0f4b4a0d36cef816e0448c8924a87b5f583cee15e1265accd95.jpg",
|
| 290 |
+
"image_caption": [
|
| 291 |
+
"(b) Expression: businessman posing in front of an airplane door",
|
| 292 |
+
"1st",
|
| 293 |
+
"Iteration"
|
| 294 |
+
],
|
| 295 |
+
"image_footnote": [],
|
| 296 |
+
"bbox": [
|
| 297 |
+
107,
|
| 298 |
+
147,
|
| 299 |
+
184,
|
| 300 |
+
196
|
| 301 |
+
],
|
| 302 |
+
"page_idx": 2
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"type": "image",
|
| 306 |
+
"img_path": "images/396caf9e6f4ebd005f83db66eb661ffc7481654a788eec4f706a0c729083332d.jpg",
|
| 307 |
+
"image_caption": [
|
| 308 |
+
"2nd",
|
| 309 |
+
"Iteration"
|
| 310 |
+
],
|
| 311 |
+
"image_footnote": [],
|
| 312 |
+
"bbox": [
|
| 313 |
+
235,
|
| 314 |
+
148,
|
| 315 |
+
313,
|
| 316 |
+
196
|
| 317 |
+
],
|
| 318 |
+
"page_idx": 2
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"type": "image",
|
| 322 |
+
"img_path": "images/a6f2ed8e70edbc7ceb140194d80be54d8a8cd07fab127513062e6160b93c9f7e.jpg",
|
| 323 |
+
"image_caption": [],
|
| 324 |
+
"image_footnote": [],
|
| 325 |
+
"bbox": [
|
| 326 |
+
364,
|
| 327 |
+
148,
|
| 328 |
+
442,
|
| 329 |
+
198
|
| 330 |
+
],
|
| 331 |
+
"page_idx": 2
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"type": "image",
|
| 335 |
+
"img_path": "images/f3d31db3626d6502e64bda66eebcad5615aa7f559b49860f63c3f9c55f765ff9.jpg",
|
| 336 |
+
"image_caption": [
|
| 337 |
+
"Figure 2: (a) The weak ability of the baseline model to differentiate the semantic relationships between the primary word \"man\" and the other noun phrases colored green and orange. PWEM can make the model aware of the targeted instance referred to by the main word. (b) The IGRS facilitates the expansion of highlighted areas, surpassing the confined small regions. (c) IGRS offers the model chances of self-correction."
|
| 338 |
+
],
|
| 339 |
+
"image_footnote": [],
|
| 340 |
+
"bbox": [
|
| 341 |
+
89,
|
| 342 |
+
199,
|
| 343 |
+
245,
|
| 344 |
+
276
|
| 345 |
+
],
|
| 346 |
+
"page_idx": 2
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "text",
|
| 350 |
+
"text": "and local textual-image similarity to enhance the discriminative capabilities of the CLIP model. Based on GL-CLIP, some researchers (Wang et al. 2024a) combine the original CLIP similarity score with their proposed Balanced Score with Auxiliary Prompts (BSAP), namely BSAP-H, to reduce the CLIP's text-to-image retrieval hallucination. Ref-Diff (Ni et al. 2023) demonstrates that the text-to-image generative model like Stable Diffusion (Rombach et al. 2022) can generate the intended mask from the cross-attention map, which has considerable performance. TAS (Suo, Zhu, and Yang 2023) mainly depends on another large captioner network BLIP2 (Li et al. 2023) to mine the negative text based on the previous mask proposal network plus discriminator paradigm, which achieves favorable performances. Additionally, SAM (Kirillov et al. 2023) is utilized for better segmentation accuracy. However, these CLIP-based methods struggle to segment the referred subject with positional-described text queries, due to the absence of spatial relationships in the masked image.",
|
| 351 |
+
"bbox": [
|
| 352 |
+
81,
|
| 353 |
+
429,
|
| 354 |
+
478,
|
| 355 |
+
691
|
| 356 |
+
],
|
| 357 |
+
"page_idx": 2
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "text",
|
| 361 |
+
"text": "Grad-CAM for localization. Grad-CAM (Selvaraju et al. 2017) is proposed to provide explainable clues indicating the regions the model pays attention to for the prediction head. In the context of the Image Text Matching (ITM) objective from any VLP (Li et al. 2022; Xu et al. 2023a,b), Grad-CAM enables the establishment of a modality mapping from the textual to the visual domain, specifically calibrated for the task of visual localization. Many works utilize it to localize the objects with the given text (Shen et al. 2024; Lee et al. 2023; Xu et al. 2022; He et al. 2022; Li et al. 2021). However, these approaches either generate a bounding box annotation or are employed within weakly supervised scenarios. Compared to approaches (Shin, Xie, and Albanie 2022; Zhou, Loy, and Dai 2022; Luo et al. 2024a)",
|
| 362 |
+
"bbox": [
|
| 363 |
+
81,
|
| 364 |
+
694,
|
| 365 |
+
480,
|
| 366 |
+
888
|
| 367 |
+
],
|
| 368 |
+
"page_idx": 2
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "text",
|
| 372 |
+
"text": "that perform zero-shot open vocabulary semantic segmentation with Grad-CAM, we are the first to propose the Grad-CAM for zero-shot RIS to study its behaviors under longer and complex textual inputs instead of a single category noun. To address problems of lacking consideration between main words and the other, the PWEM is proposed to aggregate the Grad-CAM from local-spatial and global-token levels. Secondly, a novel iterative refinement strategy is employed to obtain a better representation of Grad-CAM step by step.",
|
| 373 |
+
"bbox": [
|
| 374 |
+
514,
|
| 375 |
+
69,
|
| 376 |
+
911,
|
| 377 |
+
194
|
| 378 |
+
],
|
| 379 |
+
"page_idx": 2
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"text": "Preliminaries",
|
| 384 |
+
"text_level": 1,
|
| 385 |
+
"bbox": [
|
| 386 |
+
656,
|
| 387 |
+
208,
|
| 388 |
+
772,
|
| 389 |
+
222
|
| 390 |
+
],
|
| 391 |
+
"page_idx": 2
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"type": "text",
|
| 395 |
+
"text": "The generation of Grad-CAM is essential for harnessing it for RIS. Given an image-expression pair $(I, E)$ , we can obtain their corresponding embeddings, $v$ and $e$ , by the visual encoder $v = f_{I}(I)$ and text encoder $e = f_{T}(E)$ , respectively. Then, for multimodal fusion, these two embeddings are fed to the cross-attention layers used to align the visual and textual information (Yu et al. 2022; Vaswani et al. 2017). The resultant attention activation maps, $\\mathbf{A}$ , can indicate the activated and recognized regions of $v$ concerning each query textual token in $e$ . However, these indication clues are usually scattered and not densely distributed in the relevant regions. Thus, the gradients, $\\mathbf{G}$ can be used to sharpen and dilute the effect of non-relevant regions in $\\mathbf{A}$ , where contribute less to the output objective, $y$ , like Image Text Matching (ITM). The result of this gradient-weighted dilution process is known as Grad-CAM, $\\mathbf{H}$ .",
|
| 396 |
+
"bbox": [
|
| 397 |
+
514,
|
| 398 |
+
228,
|
| 399 |
+
911,
|
| 400 |
+
449
|
| 401 |
+
],
|
| 402 |
+
"page_idx": 2
|
| 403 |
+
},
|
| 404 |
+
{
|
| 405 |
+
"type": "text",
|
| 406 |
+
"text": "In the cross-attention layer, the Grad-CAM can be formulated by Equation (1)",
|
| 407 |
+
"bbox": [
|
| 408 |
+
516,
|
| 409 |
+
450,
|
| 410 |
+
911,
|
| 411 |
+
478
|
| 412 |
+
],
|
| 413 |
+
"page_idx": 2
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"type": "equation",
|
| 417 |
+
"text": "\n$$\n\\mathbf {H} = \\mathbf {A} \\odot \\mathbf {G}, \\tag {1a}\n$$\n",
|
| 418 |
+
"text_format": "latex",
|
| 419 |
+
"bbox": [
|
| 420 |
+
666,
|
| 421 |
+
487,
|
| 422 |
+
911,
|
| 423 |
+
502
|
| 424 |
+
],
|
| 425 |
+
"page_idx": 2
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"type": "equation",
|
| 429 |
+
"text": "\n$$\n\\mathbf {G} = \\operatorname {c l a m p} \\left(\\frac {\\partial y}{\\partial \\mathbf {A}}, 0, \\infty\\right), \\tag {1b}\n$$\n",
|
| 430 |
+
"text_format": "latex",
|
| 431 |
+
"bbox": [
|
| 432 |
+
620,
|
| 433 |
+
505,
|
| 434 |
+
911,
|
| 435 |
+
539
|
| 436 |
+
],
|
| 437 |
+
"page_idx": 2
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"type": "text",
|
| 441 |
+
"text": "where clamp removes negative gradients, which often represent noise or irrelevant features. Finally, the Grad-CAM used to indicate the image regions, $\\mathbf{H}_f$ , is the mean over all the number of text tokens $|e|$ , as shown in Equation (2)",
|
| 442 |
+
"bbox": [
|
| 443 |
+
516,
|
| 444 |
+
546,
|
| 445 |
+
911,
|
| 446 |
+
603
|
| 447 |
+
],
|
| 448 |
+
"page_idx": 2
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"type": "equation",
|
| 452 |
+
"text": "\n$$\n\\mathbf {H} _ {f} = \\mathbb {E} _ {k} \\left(\\mathbf {H} ^ {k}\\right), k \\in | e |, \\mathbf {H} _ {f} \\in \\mathbb {R} ^ {B \\times h \\times w} \\tag {2}\n$$\n",
|
| 453 |
+
"text_format": "latex",
|
| 454 |
+
"bbox": [
|
| 455 |
+
578,
|
| 456 |
+
609,
|
| 457 |
+
911,
|
| 458 |
+
630
|
| 459 |
+
],
|
| 460 |
+
"page_idx": 2
|
| 461 |
+
},
|
| 462 |
+
{
|
| 463 |
+
"type": "text",
|
| 464 |
+
"text": "where $\\mathbf{H}^k$ denotes the Grad-CAM for the $k$ -th text token, $B$ is the batch size, and $h \\times w$ is the size of visual latent space. This averaging process treats every word equally and ignores the importance of the primary word, thereby undermining the performance of RIS.",
|
| 465 |
+
"bbox": [
|
| 466 |
+
516,
|
| 467 |
+
638,
|
| 468 |
+
913,
|
| 469 |
+
709
|
| 470 |
+
],
|
| 471 |
+
"page_idx": 2
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"type": "text",
|
| 475 |
+
"text": "Method",
|
| 476 |
+
"text_level": 1,
|
| 477 |
+
"bbox": [
|
| 478 |
+
679,
|
| 479 |
+
722,
|
| 480 |
+
750,
|
| 481 |
+
738
|
| 482 |
+
],
|
| 483 |
+
"page_idx": 2
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"type": "text",
|
| 487 |
+
"text": "Overview",
|
| 488 |
+
"text_level": 1,
|
| 489 |
+
"bbox": [
|
| 490 |
+
516,
|
| 491 |
+
744,
|
| 492 |
+
594,
|
| 493 |
+
757
|
| 494 |
+
],
|
| 495 |
+
"page_idx": 2
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"type": "text",
|
| 499 |
+
"text": "Figure 1 (b) demonstrates the entire workflow of our method for zero-shot RIS, IteRPrimE, which can be divided into two parts: an iterative Grad-CAM generator and a selective mask proposal network. First, the Grad-CAM generator is a VLP model with cross-attention layers. The proposed IGRS and PWEM are integrated into the generator. Finally, within the mask proposal network, a post-processing module is designed to select the candidate instance masks, ensuring the accurate and detailed localization of the target object.",
|
| 500 |
+
"bbox": [
|
| 501 |
+
514,
|
| 502 |
+
762,
|
| 503 |
+
913,
|
| 504 |
+
888
|
| 505 |
+
],
|
| 506 |
+
"page_idx": 2
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"type": "image",
|
| 510 |
+
"img_path": "images/480d1394ceee91f5f4daace8d219e41a1048e578ad12d1aa3f9aea2c7cd0dc96.jpg",
|
| 511 |
+
"image_caption": [
|
| 512 |
+
"(a)"
|
| 513 |
+
],
|
| 514 |
+
"image_footnote": [],
|
| 515 |
+
"bbox": [
|
| 516 |
+
158,
|
| 517 |
+
71,
|
| 518 |
+
313,
|
| 519 |
+
180
|
| 520 |
+
],
|
| 521 |
+
"page_idx": 3
|
| 522 |
+
},
|
| 523 |
+
{
|
| 524 |
+
"type": "image",
|
| 525 |
+
"img_path": "images/5d9d89bae4d1dc86e6bdca607a54bb6a30872df1de43c358c98a39e22fb1f89a.jpg",
|
| 526 |
+
"image_caption": [
|
| 527 |
+
"(b)",
|
| 528 |
+
"Figure 3: The Grad-CAMs and attention maps (AM) of \"partially damaged car\". Since the attention map (d) and Grad-CAM (e) of the primary word \"car\" both contain unique activation areas compared to the others, they can be harnessed from local-spatial and global-token perspectives to enhance the focus on the targeted regions, respectively."
|
| 529 |
+
],
|
| 530 |
+
"image_footnote": [],
|
| 531 |
+
"bbox": [
|
| 532 |
+
316,
|
| 533 |
+
71,
|
| 534 |
+
441,
|
| 535 |
+
188
|
| 536 |
+
],
|
| 537 |
+
"page_idx": 3
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "image",
|
| 541 |
+
"img_path": "images/504355148c6657618750c15bd83b594aa4dbace93183a0bec12060b3bd339554.jpg",
|
| 542 |
+
"image_caption": [
|
| 543 |
+
"(c)"
|
| 544 |
+
],
|
| 545 |
+
"image_footnote": [],
|
| 546 |
+
"bbox": [
|
| 547 |
+
442,
|
| 548 |
+
73,
|
| 549 |
+
566,
|
| 550 |
+
188
|
| 551 |
+
],
|
| 552 |
+
"page_idx": 3
|
| 553 |
+
},
|
| 554 |
+
{
|
| 555 |
+
"type": "image",
|
| 556 |
+
"img_path": "images/c36ca981dda9fb0c4bc2318f085b523c496abe5f909da07d3ede7c95e24d243b.jpg",
|
| 557 |
+
"image_caption": [
|
| 558 |
+
"(d)"
|
| 559 |
+
],
|
| 560 |
+
"image_footnote": [],
|
| 561 |
+
"bbox": [
|
| 562 |
+
566,
|
| 563 |
+
73,
|
| 564 |
+
687,
|
| 565 |
+
186
|
| 566 |
+
],
|
| 567 |
+
"page_idx": 3
|
| 568 |
+
},
|
| 569 |
+
{
|
| 570 |
+
"type": "image",
|
| 571 |
+
"img_path": "images/f997eacc10b03ea70a6cab44658d749ef52332c6bb4ef498eab1acb8ee9b716b.jpg",
|
| 572 |
+
"image_caption": [
|
| 573 |
+
"(e)"
|
| 574 |
+
],
|
| 575 |
+
"image_footnote": [],
|
| 576 |
+
"bbox": [
|
| 577 |
+
689,
|
| 578 |
+
73,
|
| 579 |
+
846,
|
| 580 |
+
186
|
| 581 |
+
],
|
| 582 |
+
"page_idx": 3
|
| 583 |
+
},
|
| 584 |
+
{
|
| 585 |
+
"type": "text",
|
| 586 |
+
"text": "Primary Word Emphasis Module",
|
| 587 |
+
"text_level": 1,
|
| 588 |
+
"bbox": [
|
| 589 |
+
83,
|
| 590 |
+
286,
|
| 591 |
+
344,
|
| 592 |
+
301
|
| 593 |
+
],
|
| 594 |
+
"page_idx": 3
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "text",
|
| 598 |
+
"text": "The PWEM is an essential component of the IteRPrimE, designed to confront the challenge posed by the weak capability of Grad-CAM to manage the semantic relationships in input texts featuring multiple potential referred nouns. This module emphasizes the Grad-CAM of the primary word in the expression, thereby increasing the focus on the main word during the averaging operation. Specifically, we first use an NLP processing toolbox to parse the part-of-speech (POS) tags of each word, filtering out a set of text tokens that includes special $<CLS>$ token of BERT (Devlin et al. 2018), nouns, adjectives, verbs, proper nouns, and numerals. These words are recognized as effective tokens $W$ that can provide distinct semantics and their contextual information. They are composed of primary words $W_{m}$ and their contexts $W_{c}$ , where $W = W_{m} \\cup W_{c}$ and $W_{m} \\cap W_{c} = \\emptyset$ . Then, we extract the primary noun from these effective words (e.g. \"car\" in \"partially damaged car\" shown in Figure 3) by employing a designed algorithm. It first generates a syntax tree, identifies the leftmost noun phrase (NP), and then finds the rightmost noun (NN) within that NP, which can be detailed in Algorithm 1 in the appendix.",
|
| 599 |
+
"bbox": [
|
| 600 |
+
81,
|
| 601 |
+
303,
|
| 602 |
+
478,
|
| 603 |
+
594
|
| 604 |
+
],
|
| 605 |
+
"page_idx": 3
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"type": "text",
|
| 609 |
+
"text": "As shown in the right part of Figure 4, we emphasize the effect of the primary word Grad-CAM from two perspectives: local spatial-level and global token-level augmentation. Different from the other contextual effective words $W_{c}$ , the attention map $\\mathbf{A}^{W_m}$ and Grad-CAM $\\mathbf{H}_m$ of the primary token holds the unique activated areas that probably indicate the correct localization of Grad-CAM shown in Figure 3. Therefore, to highlight and isolate the specific contribution of the primary word from the local spatial level, we compute the $\\mathrm{L}_2$ normalized differences, $\\mathbf{A}_{dif}$ between the main word activation map and the other context word activation maps, $\\mathbf{A}^{W_c}$ . The activation difference is further integrated with gradients from the main word $\\mathbf{G}^{W_m}$ , forming a spatial modulator to indicate the local spatial importance in the main word Grad-CAM, $\\mathbf{H}_m$ . Thus, we can obtain the local spatial-level enhanced Grad-CAM of the primary word, $\\mathbf{H}_l$ , as shown in Equation (3)",
|
| 610 |
+
"bbox": [
|
| 611 |
+
81,
|
| 612 |
+
595,
|
| 613 |
+
478,
|
| 614 |
+
830
|
| 615 |
+
],
|
| 616 |
+
"page_idx": 3
|
| 617 |
+
},
|
| 618 |
+
{
|
| 619 |
+
"type": "equation",
|
| 620 |
+
"text": "\n$$\n\\mathbf {A} _ {d i f} = \\frac {\\mathbf {A} ^ {W _ {m}} - \\mathbf {A} ^ {W _ {c}}}{\\left\\| \\mathbf {A} ^ {W _ {m}} - \\mathbf {A} ^ {W _ {c}} \\right\\| _ {2}}, \\tag {3a}\n$$\n",
|
| 621 |
+
"text_format": "latex",
|
| 622 |
+
"bbox": [
|
| 623 |
+
184,
|
| 624 |
+
830,
|
| 625 |
+
478,
|
| 626 |
+
866
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 3
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "equation",
|
| 632 |
+
"text": "\n$$\n\\mathbf {H} _ {l} = \\mathbf {A} _ {d i f} \\odot \\mathbf {G} ^ {W _ {m}} \\odot \\mathbf {H} _ {m}, \\tag {3b}\n$$\n",
|
| 633 |
+
"text_format": "latex",
|
| 634 |
+
"bbox": [
|
| 635 |
+
186,
|
| 636 |
+
868,
|
| 637 |
+
478,
|
| 638 |
+
886
|
| 639 |
+
],
|
| 640 |
+
"page_idx": 3
|
| 641 |
+
},
|
| 642 |
+
{
|
| 643 |
+
"type": "text",
|
| 644 |
+
"text": "where $\\mathbf{H}_m = \\mathbf{A}^{W_m}\\odot \\mathbf{G}^{W_m}$ following Equation (1). Broadcasting occurs when the dimensions do not match.",
|
| 645 |
+
"bbox": [
|
| 646 |
+
514,
|
| 647 |
+
286,
|
| 648 |
+
910,
|
| 649 |
+
315
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 3
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "text",
|
| 655 |
+
"text": "From the global aspect, we manually add the weight of the main word Grad-CAM $\\mathbf{H}_m$ along the token axis during mean operations, which provides additional enhanced focus on the primary token. Therefore, we can obtain the global token-level Grad-CAM $\\mathbf{H}_g$ by Equation (4)",
|
| 656 |
+
"bbox": [
|
| 657 |
+
514,
|
| 658 |
+
315,
|
| 659 |
+
911,
|
| 660 |
+
387
|
| 661 |
+
],
|
| 662 |
+
"page_idx": 3
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "equation",
|
| 666 |
+
"text": "\n$$\nW ^ {\\prime} = W \\cup \\left\\{W _ {m} \\right\\} \\times N _ {c}, \\tag {4a}\n$$\n",
|
| 667 |
+
"text_format": "latex",
|
| 668 |
+
"bbox": [
|
| 669 |
+
627,
|
| 670 |
+
396,
|
| 671 |
+
911,
|
| 672 |
+
415
|
| 673 |
+
],
|
| 674 |
+
"page_idx": 3
|
| 675 |
+
},
|
| 676 |
+
{
|
| 677 |
+
"type": "equation",
|
| 678 |
+
"text": "\n$$\n\\mathbf {H} _ {g} = \\mathbf {A} ^ {W ^ {\\prime}} \\odot \\mathbf {G} ^ {W ^ {\\prime}} \\tag {4b}\n$$\n",
|
| 679 |
+
"text_format": "latex",
|
| 680 |
+
"bbox": [
|
| 681 |
+
627,
|
| 682 |
+
417,
|
| 683 |
+
911,
|
| 684 |
+
440
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 3
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "text",
|
| 690 |
+
"text": "where $N_{c}$ is the number of context tokens and $\\{W_m\\} \\times N_c$ means repeating the main word for $N_{c}$ times. Finally, the resulting augmented Grad-CAM, $\\mathbf{H}_a$ , is the mean of concatenated local and global Grad-CAMs, $\\mathbf{H}_c$ , along the token axis, where $\\mathbf{H}_c = [\\mathbf{H}_g,\\mathbf{H}_l]$ . This map significantly improves the model's Grad-CAM localization accuracy, shown in PWEM of Figure 2 (a).",
|
| 691 |
+
"bbox": [
|
| 692 |
+
514,
|
| 693 |
+
448,
|
| 694 |
+
913,
|
| 695 |
+
546
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 3
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "text",
|
| 701 |
+
"text": "Iterative Grad-CAM Refinement Strategy",
|
| 702 |
+
"text_level": 1,
|
| 703 |
+
"bbox": [
|
| 704 |
+
516,
|
| 705 |
+
561,
|
| 706 |
+
841,
|
| 707 |
+
577
|
| 708 |
+
],
|
| 709 |
+
"page_idx": 3
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"type": "text",
|
| 713 |
+
"text": "Masked Language Modeling (MLM) can be used for bidirectional image generative Transformers such as MasGIT (Chang et al. 2022). The iterative generative paradigm offers self-correction chances for the model to optimize step-by-step in the latent space. Inspired by this, we propose a novel iterative strategy of Grad-CAM to gradually steer the model's attention to the region that the model is not attentive to initially, which brings benefits from two sides. On the one hand, for the circumstances in which Grad-CAM correctly localizes the instance initially, the gradually refined Grad-CAM can be better gathered around the targeted instance region. On the other hand, for the first incorrect localization, the model can attend to other semantic instances to recheck the Grad-CAM prediction, especially for the positional phrase inputs. The overall approach of IGRS is illustrated in the left part of Figure 4.",
|
| 714 |
+
"bbox": [
|
| 715 |
+
514,
|
| 716 |
+
582,
|
| 717 |
+
913,
|
| 718 |
+
804
|
| 719 |
+
],
|
| 720 |
+
"page_idx": 3
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "text",
|
| 724 |
+
"text": "For simple notification, we use $H_{t}$ to represent the resultant $t$ -th iteration Grad-CAM from the PWEM $\\mathbf{H}_a$ . Equation (5) delineates the aggregation and refinement process of Grad-CAM representational updation, which entails the combination with Grad-CAM in the penultimate iteration step $(t - 1)$ , under the constraint of a zero initial condition",
|
| 725 |
+
"bbox": [
|
| 726 |
+
514,
|
| 727 |
+
805,
|
| 728 |
+
913,
|
| 729 |
+
888
|
| 730 |
+
],
|
| 731 |
+
"page_idx": 3
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"type": "image",
|
| 735 |
+
"img_path": "images/fb0bd4e2766737da754962593ba077d92c5a197b3e2691bf41c9870f5b25bf93.jpg",
|
| 736 |
+
"image_caption": [
|
| 737 |
+
"Figure 4: The proposed IGRS (left) and PWEM (right). The mask $M_t'$ is the attention mask for cross-attention layers by dropping the most salient regions of Grad-CAM to zero. PWEM filters the meaningless tokens and augments the Grad-CAM representation from local and global aspects."
|
| 738 |
+
],
|
| 739 |
+
"image_footnote": [],
|
| 740 |
+
"bbox": [
|
| 741 |
+
138,
|
| 742 |
+
71,
|
| 743 |
+
861,
|
| 744 |
+
367
|
| 745 |
+
],
|
| 746 |
+
"page_idx": 4
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "text",
|
| 750 |
+
"text": "$H_0 = 0$",
|
| 751 |
+
"bbox": [
|
| 752 |
+
83,
|
| 753 |
+
455,
|
| 754 |
+
143,
|
| 755 |
+
469
|
| 756 |
+
],
|
| 757 |
+
"page_idx": 4
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "equation",
|
| 761 |
+
"text": "\n$$\nH _ {t} ^ {\\prime} = \\lambda H _ {t - 1} ^ {\\prime} + (1 - \\lambda) \\sigma \\left(H _ {t}\\right), \\tag {5}\n$$\n",
|
| 762 |
+
"text_format": "latex",
|
| 763 |
+
"bbox": [
|
| 764 |
+
176,
|
| 765 |
+
469,
|
| 766 |
+
477,
|
| 767 |
+
488
|
| 768 |
+
],
|
| 769 |
+
"page_idx": 4
|
| 770 |
+
},
|
| 771 |
+
{
|
| 772 |
+
"type": "text",
|
| 773 |
+
"text": "where $H_{t-1}^{\\prime}$ and $H_{t}^{\\prime}$ are the resultant refined heatmaps from the $(t-1)$ -th and $t$ -th iterations, $\\sigma(.)$ is a sigmoid function to scale the value appropriately, and the hyperparameter $\\lambda$ is a balancing factor. To instruct the model to focus on the region previously not paid attention to, in each iteration, a binary attention mask $M_{t}$ would be generated from the refined Grad-CAM heatmap $H_{t}$ by dropping the most attentive region to 0, as shown in Equation (6).",
|
| 774 |
+
"bbox": [
|
| 775 |
+
81,
|
| 776 |
+
494,
|
| 777 |
+
478,
|
| 778 |
+
607
|
| 779 |
+
],
|
| 780 |
+
"page_idx": 4
|
| 781 |
+
},
|
| 782 |
+
{
|
| 783 |
+
"type": "equation",
|
| 784 |
+
"text": "\n$$\nM _ {t} = \\mathcal {P} \\left(H _ {t}, \\theta\\right), \\mathcal {P} (H, \\theta) = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} \\sigma (H) \\geq \\theta \\\\ 1 & \\text {i f} \\sigma (H) < \\theta , \\end{array} \\right. \\tag {6}\n$$\n",
|
| 785 |
+
"text_format": "latex",
|
| 786 |
+
"bbox": [
|
| 787 |
+
122,
|
| 788 |
+
617,
|
| 789 |
+
477,
|
| 790 |
+
650
|
| 791 |
+
],
|
| 792 |
+
"page_idx": 4
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "text",
|
| 796 |
+
"text": "where $\\mathcal{P}(H,\\theta)$ represents the process of applying a sigmoid function to stretch the values and then thresholding the result at $\\theta$ to create a binary mask. The binary mask $M_{t}$ is then combined with the previous mask $M_{t - 1}^{\\prime}$ by the logical and operation, $M_t' = M_{t - 1}'\\wedge M_t$ , where $M_0$ is a tensor of ones. The $\\wedge$ ensures the model can expand to other regions regardless of the places previously focused. This attention binary mask will be fed into the cross-attention layer of a VLP to mask out the visual regions in embedding $v$ , ensuring the text token queries no longer pay attention to the zero regions within the mask.",
|
| 797 |
+
"bbox": [
|
| 798 |
+
81,
|
| 799 |
+
659,
|
| 800 |
+
478,
|
| 801 |
+
816
|
| 802 |
+
],
|
| 803 |
+
"page_idx": 4
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "text",
|
| 807 |
+
"text": "For an interactive algorithm, the stopping condition is essential. To make the iterative process more flexible, we introduce a dynamic stopping criterion based on the proposed soft ITM score at timestep $t$ , $S^{(t)}$ , which is calculated as the product of the ITM, from the VLP model and the relevance",
|
| 808 |
+
"bbox": [
|
| 809 |
+
81,
|
| 810 |
+
816,
|
| 811 |
+
480,
|
| 812 |
+
888
|
| 813 |
+
],
|
| 814 |
+
"page_idx": 4
|
| 815 |
+
},
|
| 816 |
+
{
|
| 817 |
+
"type": "text",
|
| 818 |
+
"text": "score, $S^{(t)} = ITM^{(t)} \\cdot R^{(t)}$ , where $R^{(t)}$ is defined by:",
|
| 819 |
+
"bbox": [
|
| 820 |
+
514,
|
| 821 |
+
453,
|
| 822 |
+
879,
|
| 823 |
+
470
|
| 824 |
+
],
|
| 825 |
+
"page_idx": 4
|
| 826 |
+
},
|
| 827 |
+
{
|
| 828 |
+
"type": "equation",
|
| 829 |
+
"text": "\n$$\nR ^ {(t)} = \\frac {\\sum_ {x \\in X} \\sum_ {y \\in Y} \\left(1 - \\widetilde {H} _ {t - 1} ^ {\\prime}\\right)}{X \\times Y}, \\tag {7}\n$$\n",
|
| 830 |
+
"text_format": "latex",
|
| 831 |
+
"bbox": [
|
| 832 |
+
596,
|
| 833 |
+
477,
|
| 834 |
+
911,
|
| 835 |
+
513
|
| 836 |
+
],
|
| 837 |
+
"page_idx": 4
|
| 838 |
+
},
|
| 839 |
+
{
|
| 840 |
+
"type": "text",
|
| 841 |
+
"text": "where $\\widetilde{H}_{t-1}^{\\prime}$ is the interpolated Grad-CAM heatmap of $H_{t-1}^{\\prime}$ with the same size as the original image with width $X$ and height $Y$ . This relevance score measures the average overlooked Grad-CAM intensity. A higher $R^{(t)}$ indicates that there are some regions less attentive to previously, guiding the model to focus on these overlooked areas in the next iteration. If the score for the current iteration $S^{(t)}$ is less than the score from the previous iteration $S^{(t-1)}$ , the iterative process is terminated. The total iterative times should not exceed $\\nu$ .",
|
| 842 |
+
"bbox": [
|
| 843 |
+
514,
|
| 844 |
+
521,
|
| 845 |
+
913,
|
| 846 |
+
650
|
| 847 |
+
],
|
| 848 |
+
"page_idx": 4
|
| 849 |
+
},
|
| 850 |
+
{
|
| 851 |
+
"type": "text",
|
| 852 |
+
"text": "Selective Mask Proposal Network",
|
| 853 |
+
"text_level": 1,
|
| 854 |
+
"bbox": [
|
| 855 |
+
514,
|
| 856 |
+
661,
|
| 857 |
+
777,
|
| 858 |
+
678
|
| 859 |
+
],
|
| 860 |
+
"page_idx": 4
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"type": "text",
|
| 864 |
+
"text": "Through the aforementioned steps, we can employ the Grad-CAM indication clue to instruct the Segmentors to predict the referred instance mask. For a given image, the mask proposal network would predict the $N_{b}$ masks but they can not autonomously choose which object mask users refer to by the language. Therefore, the selection module within the network is designed to select the mask indicated by the Grad-CAM, which divides the selection procedures into two phases: the filtering phase and the scoring phase.",
|
| 865 |
+
"bbox": [
|
| 866 |
+
514,
|
| 867 |
+
680,
|
| 868 |
+
911,
|
| 869 |
+
805
|
| 870 |
+
],
|
| 871 |
+
"page_idx": 4
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"type": "text",
|
| 875 |
+
"text": "Assuming that the Grad-CAM has successfully localized the instance, the center point of Grad-CAM should be within the inner part of the object. Based on this hypothesis, the selection mechanism is initiated by a preliminary evaluation that involves two main criteria. First, we identify the set of coordinates, $\\mathcal{C}_{max}$ , where the heatmap reaches its peak",
|
| 876 |
+
"bbox": [
|
| 877 |
+
514,
|
| 878 |
+
805,
|
| 879 |
+
913,
|
| 880 |
+
888
|
| 881 |
+
],
|
| 882 |
+
"page_idx": 4
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"type": "table",
|
| 886 |
+
"img_path": "images/cfc38fafa9a250cc2b76e5e6e59bedc89bf6be3f65e9def8c36e2ae514b18ee7.jpg",
|
| 887 |
+
"table_caption": [],
|
| 888 |
+
"table_footnote": [],
|
| 889 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"4\">RefCOCO</td><td colspan=\"4\">RefCOCO+</td><td colspan=\"3\">RefCOCOg</td><td rowspan=\"2\">Average</td></tr><tr><td>val</td><td>testA</td><td>testB</td><td>avg.</td><td>val</td><td>testA</td><td>testB</td><td>avg.</td><td>val</td><td>test</td><td>avg.</td></tr><tr><td>Zero-shot methods</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>GL-CLIP (Yu, Seo, and Son 2023)</td><td>26.7</td><td>25.0</td><td>26.5</td><td>26.1</td><td>28.2</td><td>26.5</td><td>27.9</td><td>27.5</td><td>33.0</td><td>33.1</td><td>33.1</td><td>28.4</td></tr><tr><td>BSAP (Wang et al. 2024a)</td><td>27.3</td><td>27.0</td><td>27.1</td><td>27.1</td><td>28.7</td><td>27.8</td><td>28.3</td><td>28.3</td><td>34.5</td><td>34.5</td><td>34.5</td><td>29.4</td></tr><tr><td>Region token (Yu, Seo, and Son 2023)</td><td>23.4</td><td>22.1</td><td>24.6</td><td>23.4</td><td>24.5</td><td>22.6</td><td>25.4</td><td>24.2</td><td>27.6</td><td>27.3</td><td>27.5</td><td>24.7</td></tr><tr><td>SAM-CLIP (Ni et al. 2023)</td><td>26.3</td><td>25.8</td><td>26.4</td><td>26.2</td><td>25.7</td><td>28</td><td>26.8</td><td>26.8</td><td>38.8</td><td>38.9</td><td>38.9</td><td>29.6</td></tr><tr><td>Ref-Diff (Ni et al. 2023)</td><td>37.2</td><td>38.4</td><td>37.2</td><td>37.6</td><td>37.3</td><td>40.5</td><td>33</td><td>36.9</td><td>44</td><td>44.5</td><td>44.3</td><td>39.0</td></tr><tr><td>TAS (Suo, Zhu, and Yang 2023)</td><td>39.8</td><td>41.1</td><td>36.2</td><td>39.0</td><td>43.6</td><td>49.1</td><td>36.5</td><td>43.1</td><td>46.6</td><td>46.8</td><td>46.7</td><td>42.5</td></tr><tr><td>CaR (Sun et al. 2024)</td><td>33.6</td><td>35.4</td><td>30.5</td><td>33.0</td><td>34.2</td><td>36.0</td><td>31.0</td><td>33.7</td><td>36.7</td><td>36.6</td><td>36.7</td><td>34.3</td></tr><tr><td>Weakly-supervised methods</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>TSEG (Strudel, Laptev, and Schmid 2022)</td><td>25.4</td><td>-</td><td>-</td><td>-</td><td>22.0</td><td>-</td><td>-</td><td>-</td><td>22.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Chunk (Lee et al. 2023)</td><td>31.1</td><td>32.3</td><td>30.1</td><td>31.8</td><td>31.3</td><td>32.1</td><td>30.1</td><td>31.2</td><td>32.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td>IteRPrimE (ours)</td><td>40.2</td><td>46.5</td><td>33.9</td><td>40.2</td><td>44.2</td><td>51.6</td><td>35.3</td><td>43.7</td><td>46.0</td><td>45.8</td><td>45.9</td><td>42.9</td></tr></table>",
|
| 890 |
+
"bbox": [
|
| 891 |
+
91,
|
| 892 |
+
66,
|
| 893 |
+
906,
|
| 894 |
+
253
|
| 895 |
+
],
|
| 896 |
+
"page_idx": 5
|
| 897 |
+
},
|
| 898 |
+
{
|
| 899 |
+
"type": "text",
|
| 900 |
+
"text": "values. Then, the $m$ -th candidate mask $B^{m}$ is examined to determine if it includes at least one activated pixel at any of these coordinates. Second, to ensure the quality of the masks, we apply a connected component labeling technique to constrain the number of connected components in each mask, ensuring that the number of these components does not exceed a predefined threshold of $\\kappa$ . The combined criteria for the preliminary evaluation are defined as follows:",
|
| 901 |
+
"bbox": [
|
| 902 |
+
81,
|
| 903 |
+
316,
|
| 904 |
+
478,
|
| 905 |
+
429
|
| 906 |
+
],
|
| 907 |
+
"page_idx": 5
|
| 908 |
+
},
|
| 909 |
+
{
|
| 910 |
+
"type": "equation",
|
| 911 |
+
"text": "\n$$\n\\begin{array}{l} \\mathcal {A} = \\left\\{m \\in N _ {b} \\mid B _ {(x, y)} ^ {m} \\neq 0, \\exists (x, y) \\in \\mathcal {C} _ {m a x}, \\right\\}, \\\\ \\mathcal {F} = \\left\\{m \\in N _ {b} \\mid g _ {c c} \\left(B ^ {m}\\right) \\leq \\kappa \\right\\}, \\tag {8} \\\\ \\mathcal {D} = \\mathcal {A} \\cap \\mathcal {F}. \\\\ \\end{array}\n$$\n",
|
| 912 |
+
"text_format": "latex",
|
| 913 |
+
"bbox": [
|
| 914 |
+
111,
|
| 915 |
+
438,
|
| 916 |
+
477,
|
| 917 |
+
497
|
| 918 |
+
],
|
| 919 |
+
"page_idx": 5
|
| 920 |
+
},
|
| 921 |
+
{
|
| 922 |
+
"type": "text",
|
| 923 |
+
"text": "In the above equations, $g_{cc}$ denotes a function that quantifies the number of connected components within the $m$ -th candidate from total $N_b$ masks. The intersection of sets $\\mathcal{A}$ and $\\mathcal{F}$ , denoted as $\\mathcal{D}$ , yields the subset of candidate masks that fulfill both the activation and mask quality requirements. This evaluation process filers the irrelevant and empty masks to reduce the computational cost and enhance efficiency.",
|
| 924 |
+
"bbox": [
|
| 925 |
+
81,
|
| 926 |
+
507,
|
| 927 |
+
478,
|
| 928 |
+
604
|
| 929 |
+
],
|
| 930 |
+
"page_idx": 5
|
| 931 |
+
},
|
| 932 |
+
{
|
| 933 |
+
"type": "text",
|
| 934 |
+
"text": "Subsequent to the preliminary filtering phase, we proceed to evaluate each remaining candidate mask through a weighted scoring mechanism that leverages the Grad-CAM heatmap. This involves computing an element-wise product-based score for each mask concerning the heatmap. We define the score for the $j$ -th candidate mask as $Z(j)$ from the set $\\mathcal{D}$ . The scoring process is formulated below:",
|
| 935 |
+
"bbox": [
|
| 936 |
+
81,
|
| 937 |
+
604,
|
| 938 |
+
478,
|
| 939 |
+
704
|
| 940 |
+
],
|
| 941 |
+
"page_idx": 5
|
| 942 |
+
},
|
| 943 |
+
{
|
| 944 |
+
"type": "equation",
|
| 945 |
+
"text": "\n$$\n\\begin{array}{l} Z (j) = \\sum_ {x \\in X} \\sum_ {y \\in Y} \\left(B _ {(x, y)} ^ {j} + B _ {(x, y)} ^ {j} \\odot \\widetilde {H} _ {(x, y)} ^ {\\prime}\\right) \\\\ \\hat {Z} (j) = \\frac {Z (j)}{\\sum_ {x \\in X} \\sum_ {y \\in Y} B _ {(x , y)} ^ {j}}, \\quad j \\in \\mathcal {D}. \\tag {9} \\\\ \\end{array}\n$$\n",
|
| 946 |
+
"text_format": "latex",
|
| 947 |
+
"bbox": [
|
| 948 |
+
132,
|
| 949 |
+
712,
|
| 950 |
+
477,
|
| 951 |
+
786
|
| 952 |
+
],
|
| 953 |
+
"page_idx": 5
|
| 954 |
+
},
|
| 955 |
+
{
|
| 956 |
+
"type": "text",
|
| 957 |
+
"text": "where $\\widetilde{H}_{(x,y)}^{\\prime}$ is the final output Grad-CAM of original image size. The final step in our selection process involves identifying the candidate mask with the maximum normalized score, $\\hat{Z} (j)$ , as the chosen segmentation output:",
|
| 958 |
+
"bbox": [
|
| 959 |
+
81,
|
| 960 |
+
795,
|
| 961 |
+
478,
|
| 962 |
+
858
|
| 963 |
+
],
|
| 964 |
+
"page_idx": 5
|
| 965 |
+
},
|
| 966 |
+
{
|
| 967 |
+
"type": "equation",
|
| 968 |
+
"text": "\n$$\nB _ {\\text {s e l e c t}} = \\arg \\max _ {j \\in \\mathcal {D}} \\hat {Z} (j). \\tag {10}\n$$\n",
|
| 969 |
+
"text_format": "latex",
|
| 970 |
+
"bbox": [
|
| 971 |
+
196,
|
| 972 |
+
867,
|
| 973 |
+
478,
|
| 974 |
+
891
|
| 975 |
+
],
|
| 976 |
+
"page_idx": 5
|
| 977 |
+
},
|
| 978 |
+
{
|
| 979 |
+
"type": "table",
|
| 980 |
+
"img_path": "images/662a60ce74c6a99abe9804791c5f545eb81f94e65901c30047c8e12648821458.jpg",
|
| 981 |
+
"table_caption": [
|
| 982 |
+
"Table 1: Comparison of different methods on different datasets. \"avg.\" denotes the mean performance across various splits within individual datasets, while the terminal \"Average\" column represents the composite mean derived from all dataset splits."
|
| 983 |
+
],
|
| 984 |
+
"table_footnote": [],
|
| 985 |
+
"table_body": "<table><tr><td>Method</td><td>Training dataset</td><td>All</td><td>Unseen</td></tr><tr><td rowspan=\"3\">CRIS</td><td>RefCOCO</td><td>15.5</td><td>13.8</td></tr><tr><td>RefCOCO+</td><td>16.3</td><td>14.6</td></tr><tr><td>RefCOCOg</td><td>16.2</td><td>13.9</td></tr><tr><td rowspan=\"3\">LAVT</td><td>RefCOCO</td><td>16.7</td><td>14.4</td></tr><tr><td>RefCOCO+</td><td>16.6</td><td>13.5</td></tr><tr><td>RefCOCOg</td><td>16.1</td><td>13.5</td></tr><tr><td>GL-CLIP</td><td>N/A</td><td>23.6</td><td>23.0</td></tr><tr><td>TAS</td><td>N/A</td><td>25.6</td><td>-</td></tr><tr><td>Ref-Diff</td><td>N/A</td><td>29.4</td><td>-</td></tr><tr><td>IteRPrimE (ours)</td><td>N/A</td><td>38.1</td><td>37.9</td></tr></table>",
|
| 986 |
+
"bbox": [
|
| 987 |
+
521,
|
| 988 |
+
315,
|
| 989 |
+
911,
|
| 990 |
+
479
|
| 991 |
+
],
|
| 992 |
+
"page_idx": 5
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"text": "Table 2: Comparison of oIoU on PhraseCut for different supervised and zero-shot methods.",
|
| 997 |
+
"bbox": [
|
| 998 |
+
514,
|
| 999 |
+
488,
|
| 1000 |
+
911,
|
| 1001 |
+
517
|
| 1002 |
+
],
|
| 1003 |
+
"page_idx": 5
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"type": "text",
|
| 1007 |
+
"text": "This approach ensures that the selected mask aligns with the regions of interest highlighted by the Grad-CAM heatmap, thereby ensuring the precision and efficacy of RIS.",
|
| 1008 |
+
"bbox": [
|
| 1009 |
+
514,
|
| 1010 |
+
544,
|
| 1011 |
+
913,
|
| 1012 |
+
588
|
| 1013 |
+
],
|
| 1014 |
+
"page_idx": 5
|
| 1015 |
+
},
|
| 1016 |
+
{
|
| 1017 |
+
"type": "text",
|
| 1018 |
+
"text": "Experiments",
|
| 1019 |
+
"text_level": 1,
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
658,
|
| 1022 |
+
599,
|
| 1023 |
+
769,
|
| 1024 |
+
617
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 5
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "text",
|
| 1030 |
+
"text": "Experimental Settings",
|
| 1031 |
+
"text_level": 1,
|
| 1032 |
+
"bbox": [
|
| 1033 |
+
516,
|
| 1034 |
+
619,
|
| 1035 |
+
691,
|
| 1036 |
+
636
|
| 1037 |
+
],
|
| 1038 |
+
"page_idx": 5
|
| 1039 |
+
},
|
| 1040 |
+
{
|
| 1041 |
+
"type": "text",
|
| 1042 |
+
"text": "Datasets and metrics. We employ the RefCOCO (Nagaraja, Morariu, and Davis 2016), RefCOCO+ (Nagaraja, Morariu, and Davis 2016), RefCOCOg (Kazemzadeh et al. 2014; Mao et al. 2016), and PhraseCut datasets (Wu et al. 2020) for evaluating the proposed zero-shot methods. RefCOCO with shorter expressions (average 1.6 nouns, 3.6 words) contains massive positional phrases (50%), especially those with direct direction clues like \"left\" or \"right\". In contrast, RefCOCO+ focuses on the attribute phrases with the same average expression length. RefCOCOg is a more challenging benchmark that has longer phrases (average 2.8 nouns, 8.4 words) and complex expressions. To verify the effectiveness of the mode in out-of-domains, we adapt our model to the PhraseCut dataset which contains the additional 1271 categories in the test split based on 80 in COCO. Following (Sun et al. 2024; Yu, Seo, and Son 2023; Han et al. 2024), we utilize the mean Intersection over Union (mIoU) for RefCOCO series, a common metric for RIS. Following (Yu,",
|
| 1043 |
+
"bbox": [
|
| 1044 |
+
514,
|
| 1045 |
+
638,
|
| 1046 |
+
913,
|
| 1047 |
+
890
|
| 1048 |
+
],
|
| 1049 |
+
"page_idx": 5
|
| 1050 |
+
},
|
| 1051 |
+
{
|
| 1052 |
+
"type": "text",
|
| 1053 |
+
"text": "(a) Self-correction effect for positional phrases. the smaller bird at the left",
|
| 1054 |
+
"bbox": [
|
| 1055 |
+
174,
|
| 1056 |
+
66,
|
| 1057 |
+
741,
|
| 1058 |
+
83
|
| 1059 |
+
],
|
| 1060 |
+
"page_idx": 6
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "image",
|
| 1064 |
+
"img_path": "images/33ad422b9dc0f5a4dfa8a82c14215fb0d3e6bcb90fd01a6a6cf7fdf74e165f64.jpg",
|
| 1065 |
+
"image_caption": [
|
| 1066 |
+
"Ours"
|
| 1067 |
+
],
|
| 1068 |
+
"image_footnote": [],
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
196,
|
| 1071 |
+
85,
|
| 1072 |
+
823,
|
| 1073 |
+
140
|
| 1074 |
+
],
|
| 1075 |
+
"page_idx": 6
|
| 1076 |
+
},
|
| 1077 |
+
{
|
| 1078 |
+
"type": "text",
|
| 1079 |
+
"text": "(b) Strong robustness of out-domain phrases. the arm of the photo not being pictured",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
174,
|
| 1082 |
+
154,
|
| 1083 |
+
821,
|
| 1084 |
+
169
|
| 1085 |
+
],
|
| 1086 |
+
"page_idx": 6
|
| 1087 |
+
},
|
| 1088 |
+
{
|
| 1089 |
+
"type": "image",
|
| 1090 |
+
"img_path": "images/7355f2a9aea4db943bcff9793c4246216dc319c524e9c88adac5bd998fec19ce.jpg",
|
| 1091 |
+
"image_caption": [
|
| 1092 |
+
"Ours"
|
| 1093 |
+
],
|
| 1094 |
+
"image_footnote": [],
|
| 1095 |
+
"bbox": [
|
| 1096 |
+
207,
|
| 1097 |
+
170,
|
| 1098 |
+
815,
|
| 1099 |
+
246
|
| 1100 |
+
],
|
| 1101 |
+
"page_idx": 6
|
| 1102 |
+
},
|
| 1103 |
+
{
|
| 1104 |
+
"type": "text",
|
| 1105 |
+
"text": "(c) Strong robustness of out-domain categories. metal bridge",
|
| 1106 |
+
"bbox": [
|
| 1107 |
+
176,
|
| 1108 |
+
258,
|
| 1109 |
+
645,
|
| 1110 |
+
275
|
| 1111 |
+
],
|
| 1112 |
+
"page_idx": 6
|
| 1113 |
+
},
|
| 1114 |
+
{
|
| 1115 |
+
"type": "image",
|
| 1116 |
+
"img_path": "images/62247300e6057b2e01e5b801ce3c009f794b9b7e7c3002105e9cbe12caf491ce.jpg",
|
| 1117 |
+
"image_caption": [
|
| 1118 |
+
"Figure 5: The qualitative comparisons with GL-CLIP. (a) The self-correction effect is brought by our IGRS, especially for positional phrases. (b) For the unseen phrases like \"not\", our model shows better robustness. (c) shows the gathering effect of IteRPrimE with high confidence to select the whole mask instead of a part like GL-CLIP."
|
| 1119 |
+
],
|
| 1120 |
+
"image_footnote": [],
|
| 1121 |
+
"bbox": [
|
| 1122 |
+
199,
|
| 1123 |
+
277,
|
| 1124 |
+
823,
|
| 1125 |
+
359
|
| 1126 |
+
],
|
| 1127 |
+
"page_idx": 6
|
| 1128 |
+
},
|
| 1129 |
+
{
|
| 1130 |
+
"type": "table",
|
| 1131 |
+
"img_path": "images/127dfcbc8d22b221564e5d09b96a974ed38997c3db11f21970ceeca87f31b9cc.jpg",
|
| 1132 |
+
"table_caption": [],
|
| 1133 |
+
"table_footnote": [],
|
| 1134 |
+
"table_body": "<table><tr><td>Method</td><td>RefCOCO testA</td><td>RefCOCOg test</td></tr><tr><td>Overall Mean</td><td>43.4</td><td>41.3</td></tr><tr><td>GVLP (Shen et al. 2024)</td><td>45.0</td><td>41.9</td></tr><tr><td>Global Augment</td><td>46.5</td><td>45.7</td></tr><tr><td>Local Augment</td><td>45.3</td><td>42.3</td></tr><tr><td>PWEM</td><td>46.5</td><td>45.8</td></tr></table>",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
83,
|
| 1137 |
+
436,
|
| 1138 |
+
480,
|
| 1139 |
+
520
|
| 1140 |
+
],
|
| 1141 |
+
"page_idx": 6
|
| 1142 |
+
},
|
| 1143 |
+
{
|
| 1144 |
+
"type": "table",
|
| 1145 |
+
"img_path": "images/e57c458892a9cda27e85fb24c6860f113ccc31ddaa85396d6fbaed610b8de7bc.jpg",
|
| 1146 |
+
"table_caption": [
|
| 1147 |
+
"Table 3: Comparison of methods with different Grad-CAM generation methods on RefCOCO testA and RefCOCOg test datasets."
|
| 1148 |
+
],
|
| 1149 |
+
"table_footnote": [],
|
| 1150 |
+
"table_body": "<table><tr><td>Method</td><td>RefCOCO testA</td><td>RefCOCOg test</td><td>RefCOCOg val</td></tr><tr><td>Mask image</td><td>46.3</td><td>45.3</td><td>45.2</td></tr><tr><td>Mask feature</td><td>46.5</td><td>45.8</td><td>46.0</td></tr></table>",
|
| 1151 |
+
"bbox": [
|
| 1152 |
+
84,
|
| 1153 |
+
592,
|
| 1154 |
+
483,
|
| 1155 |
+
635
|
| 1156 |
+
],
|
| 1157 |
+
"page_idx": 6
|
| 1158 |
+
},
|
| 1159 |
+
{
|
| 1160 |
+
"type": "text",
|
| 1161 |
+
"text": "Table 4: Performance comparison of masking out the salient regions in the image level and feature level (attention mask).",
|
| 1162 |
+
"bbox": [
|
| 1163 |
+
81,
|
| 1164 |
+
643,
|
| 1165 |
+
478,
|
| 1166 |
+
672
|
| 1167 |
+
],
|
| 1168 |
+
"page_idx": 6
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "text",
|
| 1172 |
+
"text": "Seo, and Son 2023; Wu et al. 2020), we report the overall Intersection over Union (oIoU) for the PhraseCut dataset.",
|
| 1173 |
+
"bbox": [
|
| 1174 |
+
81,
|
| 1175 |
+
705,
|
| 1176 |
+
478,
|
| 1177 |
+
733
|
| 1178 |
+
],
|
| 1179 |
+
"page_idx": 6
|
| 1180 |
+
},
|
| 1181 |
+
{
|
| 1182 |
+
"type": "text",
|
| 1183 |
+
"text": "Implementation details. We use the commonly used mask proposal network, Mask2Former (Cheng et al. 2022; Liang et al. 2023), to obtain 200 instance-level mask proposals. Following (Shen et al. 2024; Lee et al. 2023), we utilize the base model ALBEF to study the Grad-CAM for localization and it is generated in the 8th cross-attention layer. In processing the input text, a prefatory phrase \"there is a\" is appended. The hyperparameter balancing factor $\\lambda$ , upper connecting limit $\\kappa$ , iterative number $\\nu$ , and binarization threshold $\\theta$ are 0.8, 12, 3, and 0.5, respectively. All experiments are conducted on a 24 GB RTX 3090 GPU.",
|
| 1184 |
+
"bbox": [
|
| 1185 |
+
81,
|
| 1186 |
+
734,
|
| 1187 |
+
480,
|
| 1188 |
+
888
|
| 1189 |
+
],
|
| 1190 |
+
"page_idx": 6
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "text",
|
| 1194 |
+
"text": "Results",
|
| 1195 |
+
"text_level": 1,
|
| 1196 |
+
"bbox": [
|
| 1197 |
+
517,
|
| 1198 |
+
438,
|
| 1199 |
+
578,
|
| 1200 |
+
452
|
| 1201 |
+
],
|
| 1202 |
+
"page_idx": 6
|
| 1203 |
+
},
|
| 1204 |
+
{
|
| 1205 |
+
"type": "text",
|
| 1206 |
+
"text": "Main results. As shown in Table 1, IteRPrimE almost achieves the best performance on all three datasets, especially in the testA splits of RefCOCO and RefCOCO+. It outperforms the SOTA TAS method with a $0.4\\%$ average improvement. For all the splits of RefCOCO and RefCOCO+ rich in short positional phrases, our model obtains an average of $40.2\\%$ and $43.7\\%$ compared to the $39.0\\%$ and $43.1\\%$ of TAS, respectively. Therefore, our method is more robust to the positional information compared to the CLIP-based paradigms. However, the model may have the relatively weaker capability of complex expressions shown in RefCOCOg, which can be attributed to the data limitation and gap in the pertaining stage. Additionally, by using the additional captioner of BLIP2 (Li et al. 2023) and SAM (Kirillov et al. 2023), TAS maintains the best performance across some splits, especially for complex phrases, but it has the drawback of low throughput and heavy volumes.",
|
| 1207 |
+
"bbox": [
|
| 1208 |
+
514,
|
| 1209 |
+
458,
|
| 1210 |
+
913,
|
| 1211 |
+
694
|
| 1212 |
+
],
|
| 1213 |
+
"page_idx": 6
|
| 1214 |
+
},
|
| 1215 |
+
{
|
| 1216 |
+
"type": "text",
|
| 1217 |
+
"text": "Zero-shot evaluation on unseen domain. Notably, as shown in Table 2, our model has high capabilities of cross-domain zero-shot transfer compared to other zero-shot SOTA and the existing supervised methods CRIS (Wang et al. 2022b) and LAVT (Yang et al. 2022). IteRPrimE significantly outperforms both kinds of methods in the outdomain scenarios. Upon assessment within a subset of categories not present in the RefCOCO datasets (denoted as the \"Unseen\" column), our model shows the best robustness compared to the supervised methods with huge performance degradation. Notably, the underperformance of the TAS model on this dataset may be attributed to the predominance of complex outdoor scenes within the dataset. In such intricate environments, the reliance on an additional",
|
| 1218 |
+
"bbox": [
|
| 1219 |
+
514,
|
| 1220 |
+
694,
|
| 1221 |
+
913,
|
| 1222 |
+
888
|
| 1223 |
+
],
|
| 1224 |
+
"page_idx": 6
|
| 1225 |
+
},
|
| 1226 |
+
{
|
| 1227 |
+
"type": "table",
|
| 1228 |
+
"img_path": "images/f1730dcb5b7000ddef95e81fa9871d8ad0a8342a48b21fda9b40d56a89dbcf45.jpg",
|
| 1229 |
+
"table_caption": [],
|
| 1230 |
+
"table_footnote": [],
|
| 1231 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">RefCOCOg test</td><td colspan=\"3\">RefCOCO testA</td></tr><tr><td>Position</td><td>Others</td><td>Overall</td><td>Position</td><td>Others</td><td>Overall</td></tr><tr><td>GVLP w/o IGRS</td><td>33.0</td><td>43.6</td><td>41.3</td><td>34.7</td><td>53.2</td><td>44.7</td></tr><tr><td>GVLP w/IGRS</td><td>33.7</td><td>44.3</td><td>41.9</td><td>35.1</td><td>53.6</td><td>45.0</td></tr><tr><td>PWEM w/o IGRS</td><td>36.4</td><td>47.5</td><td>45.1</td><td>36.1</td><td>54.8</td><td>46.1</td></tr><tr><td>PWEM w/IGRS</td><td>37.4</td><td>48.2</td><td>45.8</td><td>36.5</td><td>55.0</td><td>46.5</td></tr></table>",
|
| 1232 |
+
"bbox": [
|
| 1233 |
+
88,
|
| 1234 |
+
66,
|
| 1235 |
+
475,
|
| 1236 |
+
132
|
| 1237 |
+
],
|
| 1238 |
+
"page_idx": 7
|
| 1239 |
+
},
|
| 1240 |
+
{
|
| 1241 |
+
"type": "text",
|
| 1242 |
+
"text": "Table 5: Ablation studies of the proposed PWEM and IGRS.",
|
| 1243 |
+
"bbox": [
|
| 1244 |
+
83,
|
| 1245 |
+
141,
|
| 1246 |
+
478,
|
| 1247 |
+
157
|
| 1248 |
+
],
|
| 1249 |
+
"page_idx": 7
|
| 1250 |
+
},
|
| 1251 |
+
{
|
| 1252 |
+
"type": "text",
|
| 1253 |
+
"text": "captioning model for annotation by TAS could potentially introduce greater noise, thereby compromising the model's performance. However, facing complex environmental contexts, our model's efficacy in localizing pertinent regions is attributed to its retention of spatial perception. Concurrently, the integration of IGRS and PWEM has further bolstered IteRPrimE's proficiency in addressing the complicated interrelationships among objects within the scene, thereby leading to this commendable performance.",
|
| 1254 |
+
"bbox": [
|
| 1255 |
+
81,
|
| 1256 |
+
185,
|
| 1257 |
+
478,
|
| 1258 |
+
310
|
| 1259 |
+
],
|
| 1260 |
+
"page_idx": 7
|
| 1261 |
+
},
|
| 1262 |
+
{
|
| 1263 |
+
"type": "text",
|
| 1264 |
+
"text": "Qualitative comparisons. Figure 5 shows the comparisons with GL-CLIP (Yu, Seo, and Son 2023). First, we demonstrate that our IGRS module possesses a self-corrective mechanism, the same answer as GL-CLIP initially before refining its predictions by revisiting initially overlooked regions. In Figure 5 (b), the scarcity of such negative phrases in the training set is offset by our model's robustness. Finally, we address the limited highlighted region of initial Grad-CAM representation by the IGRS, demonstrated in Figure 5 (c). The more gathering of the Grad-CAM, the more likelihood that the correct instance mask will be selected instead of the part.",
|
| 1265 |
+
"bbox": [
|
| 1266 |
+
81,
|
| 1267 |
+
311,
|
| 1268 |
+
480,
|
| 1269 |
+
478
|
| 1270 |
+
],
|
| 1271 |
+
"page_idx": 7
|
| 1272 |
+
},
|
| 1273 |
+
{
|
| 1274 |
+
"type": "text",
|
| 1275 |
+
"text": "Ablation Study",
|
| 1276 |
+
"text_level": 1,
|
| 1277 |
+
"bbox": [
|
| 1278 |
+
83,
|
| 1279 |
+
491,
|
| 1280 |
+
205,
|
| 1281 |
+
508
|
| 1282 |
+
],
|
| 1283 |
+
"page_idx": 7
|
| 1284 |
+
},
|
| 1285 |
+
{
|
| 1286 |
+
"type": "text",
|
| 1287 |
+
"text": "Effect of PWEM. According to Equation (2), the mean operation is essential for the generation of Grad-CAM and deeply influences the Grad-CAM representational accuracy. Therefore, Table 3 presents the results of the ablation study, examining the impact of various aggregation configurations for Grad-CAM generation. The \"Overall Mean\" is the direct mean of all the tokens' Grad-CAM, but the GVLP uses the selected effective tokens for averaging (Shen et al. 2024). The remaining is introduced before as shown in Equation (3) and Equation (4). Compared to the previous methods, the proposed PWEM can significantly improve the performances because it can save the examples that fail due to the weak complex semantic understanding between the main word and the other contexts. Additionally, global augmentation shows stronger potential than local because it could dominate the effect during aggregation.",
|
| 1288 |
+
"bbox": [
|
| 1289 |
+
81,
|
| 1290 |
+
512,
|
| 1291 |
+
478,
|
| 1292 |
+
734
|
| 1293 |
+
],
|
| 1294 |
+
"page_idx": 7
|
| 1295 |
+
},
|
| 1296 |
+
{
|
| 1297 |
+
"type": "text",
|
| 1298 |
+
"text": "Effect of mask position in IGRS. Table 4 evaluates the position that the binary mask $M$ applied. \"Mask image\" means adding the mask into the original image so that the indicated regions are masked out, similar to GL-CLIP. However, this can degrade the performance due to the absence of relative relationships of regions. Our method for attention masking in the cross-attention layer is more robust, with improvement on all three splits.",
|
| 1299 |
+
"bbox": [
|
| 1300 |
+
81,
|
| 1301 |
+
734,
|
| 1302 |
+
478,
|
| 1303 |
+
845
|
| 1304 |
+
],
|
| 1305 |
+
"page_idx": 7
|
| 1306 |
+
},
|
| 1307 |
+
{
|
| 1308 |
+
"type": "text",
|
| 1309 |
+
"text": "Effect of our proposed PWEM and IGRS. Table 5 evaluates the performance improvements achieved by integrating different modules within our methodology. The \"Posi",
|
| 1310 |
+
"bbox": [
|
| 1311 |
+
81,
|
| 1312 |
+
845,
|
| 1313 |
+
478,
|
| 1314 |
+
888
|
| 1315 |
+
],
|
| 1316 |
+
"page_idx": 7
|
| 1317 |
+
},
|
| 1318 |
+
{
|
| 1319 |
+
"type": "image",
|
| 1320 |
+
"img_path": "images/9a968c07ebd3a1d4714f8961c10a48897c0db9feae1cb77866c30eae1fecdf0a.jpg",
|
| 1321 |
+
"image_caption": [],
|
| 1322 |
+
"image_footnote": [],
|
| 1323 |
+
"bbox": [
|
| 1324 |
+
606,
|
| 1325 |
+
66,
|
| 1326 |
+
823,
|
| 1327 |
+
159
|
| 1328 |
+
],
|
| 1329 |
+
"page_idx": 7
|
| 1330 |
+
},
|
| 1331 |
+
{
|
| 1332 |
+
"type": "image",
|
| 1333 |
+
"img_path": "images/5c3f7f18776a848741c7e8c270b3fc983f1715b52e524e0eaac083f023701ca2.jpg",
|
| 1334 |
+
"image_caption": [
|
| 1335 |
+
"(a) Iteration times",
|
| 1336 |
+
"(b) $\\lambda$",
|
| 1337 |
+
"Figure 6: The line charts of two hyperparameters."
|
| 1338 |
+
],
|
| 1339 |
+
"image_footnote": [],
|
| 1340 |
+
"bbox": [
|
| 1341 |
+
612,
|
| 1342 |
+
172,
|
| 1343 |
+
821,
|
| 1344 |
+
265
|
| 1345 |
+
],
|
| 1346 |
+
"page_idx": 7
|
| 1347 |
+
},
|
| 1348 |
+
{
|
| 1349 |
+
"type": "text",
|
| 1350 |
+
"text": "tion” category encompasses those test samples that explicitly feature positional expressions. Conversely, the “Others” category serves as the complement. These results demonstrate that our modules not only improve general performance but also enhance the model’s ability to manage complex semantic and spatial relations, particularly in positional contexts.",
|
| 1351 |
+
"bbox": [
|
| 1352 |
+
514,
|
| 1353 |
+
337,
|
| 1354 |
+
911,
|
| 1355 |
+
434
|
| 1356 |
+
],
|
| 1357 |
+
"page_idx": 7
|
| 1358 |
+
},
|
| 1359 |
+
{
|
| 1360 |
+
"type": "text",
|
| 1361 |
+
"text": "Different assembly of iteration times and $\\lambda$ . Figure 6 presents the ablation study of two hyperparameters in IGRS, analyzing the effect of varying iteration times and $\\lambda$ on the RefCOCO testA dataset. Figure 6 (a) shows that as the number of iterations increases from 1 to 3, the metric improves, peaking at $46.5\\%$ . However, beyond three iterations, the performance change becomes minimal. Therefore, selecting 3 iterations is optimal for balancing performance and time efficiency. Figure 6 (b) presents another line chart analyzing the impact of $\\lambda$ in the Grad-CAM updation. The metric increases as $\\lambda$ is gradually raised from 0 to 0.2 while exceeding this point, performance declines with higher alpha values. Overall, the optimal value of $\\lambda$ is 0.2.",
|
| 1362 |
+
"bbox": [
|
| 1363 |
+
514,
|
| 1364 |
+
435,
|
| 1365 |
+
913,
|
| 1366 |
+
616
|
| 1367 |
+
],
|
| 1368 |
+
"page_idx": 7
|
| 1369 |
+
},
|
| 1370 |
+
{
|
| 1371 |
+
"type": "text",
|
| 1372 |
+
"text": "Conclusion",
|
| 1373 |
+
"text_level": 1,
|
| 1374 |
+
"bbox": [
|
| 1375 |
+
665,
|
| 1376 |
+
631,
|
| 1377 |
+
764,
|
| 1378 |
+
646
|
| 1379 |
+
],
|
| 1380 |
+
"page_idx": 7
|
| 1381 |
+
},
|
| 1382 |
+
{
|
| 1383 |
+
"type": "text",
|
| 1384 |
+
"text": "This paper presents IteRPrimE, a novel framework for Zeroshot Referring Image Segmentation (RIS), addressing the limitations of previous methods in handling positional sensitivity and complex semantic relationships. By incorporating an Iterative Grad-CAM Refinement Strategy (IGRS) and a Primary Word Emphasis Module (PWEM), IteRPrimE enhances the model's ability to accurately focus on target regions and manage semantic nuances. Extensive experiments on RefCOCO $+ / + \\mathrm{g}$ and PhraseCut benchmarks demonstrate that IteRPrimE significantly outperforms previous state-of-the-art zero-shot methods, particularly in out-of-domain contexts. These findings highlight the framework's potential to advance zero-shot RIS by improving model sensitivity to positional and semantic details. Future research endeavors may seek to extend the Grad-CAM-guided RIS paradigm to encompass all segmentation tasks across varying levels of granularity with linguistic directives.",
|
| 1385 |
+
"bbox": [
|
| 1386 |
+
514,
|
| 1387 |
+
652,
|
| 1388 |
+
913,
|
| 1389 |
+
888
|
| 1390 |
+
],
|
| 1391 |
+
"page_idx": 7
|
| 1392 |
+
},
|
| 1393 |
+
{
|
| 1394 |
+
"type": "text",
|
| 1395 |
+
"text": "Acknowledgments",
|
| 1396 |
+
"text_level": 1,
|
| 1397 |
+
"bbox": [
|
| 1398 |
+
202,
|
| 1399 |
+
66,
|
| 1400 |
+
359,
|
| 1401 |
+
83
|
| 1402 |
+
],
|
| 1403 |
+
"page_idx": 8
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"type": "text",
|
| 1407 |
+
"text": "This work was supported by Shenzhen Science and Technology Program under Grant CJGJZD20220517142402006.",
|
| 1408 |
+
"bbox": [
|
| 1409 |
+
83,
|
| 1410 |
+
88,
|
| 1411 |
+
480,
|
| 1412 |
+
118
|
| 1413 |
+
],
|
| 1414 |
+
"page_idx": 8
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "text",
|
| 1418 |
+
"text": "References",
|
| 1419 |
+
"text_level": 1,
|
| 1420 |
+
"bbox": [
|
| 1421 |
+
233,
|
| 1422 |
+
132,
|
| 1423 |
+
330,
|
| 1424 |
+
148
|
| 1425 |
+
],
|
| 1426 |
+
"page_idx": 8
|
| 1427 |
+
},
|
| 1428 |
+
{
|
| 1429 |
+
"type": "list",
|
| 1430 |
+
"sub_type": "ref_text",
|
| 1431 |
+
"list_items": [
|
| 1432 |
+
"Bai, S.; Liu, Y.; Han, Y.; Zhang, H.; and Tang, Y. 2024. Self-calibrated clip for training-free open-vocabulary segmentation. arXiv preprint arXiv:2411.15869.",
|
| 1433 |
+
"Chang, H.; Zhang, H.; Jiang, L.; Liu, C.; and Freeman, W. T. 2022. Maskgit: Masked generative image transformer. In CVPR, 11315-11325.",
|
| 1434 |
+
"Cheng, B.; Misra, I.; Schwing, A. G.; Kirillov, A.; and Girdhar, R. 2022. Masked-attention mask transformer for universal image segmentation. In CVPR, 1290–1299.",
|
| 1435 |
+
"Devlin, J.; Chang, M.-W.; Lee, K.; and Toutanova, K. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.",
|
| 1436 |
+
"Ding, H.; Liu, C.; He, S.; Jiang, X.; and Loy, C. C. 2023. MeViS: A large-scale benchmark for video segmentation with motion expressions. In CVPR, 2694-2703.",
|
| 1437 |
+
"Ding, H.; Liu, C.; Wang, S.; and Jiang, X. 2021. Vision-language transformer and query generation for referring segmentation. In ICCV, 16321-16330.",
|
| 1438 |
+
"Ding, J.; Xue, N.; Xia, G.-S.; and Dai, D. 2022. Decoupling zero-shot semantic segmentation. In CVPR, 11583-11592.",
|
| 1439 |
+
"Han, K.; Liu, Y.; Liew, J. H.; Ding, H.; Liu, J.; Wang, Y.; Tang, Y.; Yang, Y.; Feng, J.; Zhao, Y.; et al. 2023. Global knowledge calibration for fast open-vocabulary segmentation. In CVPR, 797-807.",
|
| 1440 |
+
"Han, Z.; Zhu, F.; Lao, Q.; and Jiang, H. 2024. Zero-shot referring expression comprehension via structural similarity between images and captions. In CVPR, 14364-14374.",
|
| 1441 |
+
"He, S.; Guo, T.; Dai, T.; Qiao, R.; Wu, C.; Shu, X.; and Ren, B. 2022. VLMAE: Vision-language masked autoencoder. arXiv preprint arXiv:2208.09374.",
|
| 1442 |
+
"Jing, Y.; Kong, T.; Wang, W.; Wang, L.; Li, L.; and Tan, T. 2021. Locate then segment: A strong pipeline for referring image segmentation. In CVPR, 9858-9867.",
|
| 1443 |
+
"Kazemzadeh, S.; Ordonez, V.; Matten, M.; and Berg, T. 2014. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 787-798.",
|
| 1444 |
+
"Kim, N.; Kim, D.; Lan, C.; Zeng, W.; and Kwak, S. 2022. Restr: Convolution-free referring image segmentation using transformers. In CVPR, 18145-18154.",
|
| 1445 |
+
"Kirillov, A.; Mintun, E.; Ravi, N.; Mao, H.; Rolland, C.; Gustafson, L.; Xiao, T.; Whitehead, S.; Berg, A. C.; Lo, W.-Y.; et al. 2023. Segment anything. In ICCV, 4015-4026.",
|
| 1446 |
+
"Lai, X.; Tian, Z.; Chen, Y.; Li, Y.; Yuan, Y.; Liu, S.; and Jia, J. 2024. Lisa: Reasoning segmentation via large language model. In CVPR, 9579-9589.",
|
| 1447 |
+
"Lee, J.; Lee, S.; Nam, J.; Yu, S.; Do, J.; and Taghavi, T. 2023. Weakly supervised referring image segmentation with intra-chunk and inter-chunk consistency. In ICCV, 21870-21881."
|
| 1448 |
+
],
|
| 1449 |
+
"bbox": [
|
| 1450 |
+
84,
|
| 1451 |
+
152,
|
| 1452 |
+
480,
|
| 1453 |
+
888
|
| 1454 |
+
],
|
| 1455 |
+
"page_idx": 8
|
| 1456 |
+
},
|
| 1457 |
+
{
|
| 1458 |
+
"type": "list",
|
| 1459 |
+
"sub_type": "ref_text",
|
| 1460 |
+
"list_items": [
|
| 1461 |
+
"Li, C.; Xu, H.; Tian, J.; Wang, W.; Yan, M.; Bi, B.; Ye, J.; Chen, H.; Xu, G.; Cao, Z.; et al. 2022. mplug: Effective and efficient vision-language learning by cross-modal skip-connections. arXiv preprint arXiv:2205.12005.",
|
| 1462 |
+
"Li, J.; Li, D.; Savarese, S.; and Hoi, S. 2023. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 19730–19742. PMLR.",
|
| 1463 |
+
"Li, J.; Selvaraju, R.; Gotmare, A.; Joty, S.; Xiong, C.; and Hoi, S. C. H. 2021. Align before fuse: Vision and language representation learning with momentum distillation. Advances in neural information processing systems, 34: 9694-9705.",
|
| 1464 |
+
"Liang, F.; Wu, B.; Dai, X.; Li, K.; Zhao, Y.; Zhang, H.; Zhang, P.; Vajda, P.; and Marculescu, D. 2023. Open-vocabulary semantic segmentation with mask-adapted clip. In CVPR, 7061-7070.",
|
| 1465 |
+
"Liu, C.; Ding, H.; and Jiang, X. 2023. Gres: Generalized referring expression segmentation. In CVPR, 23592-23601.",
|
| 1466 |
+
"Liu, R.; Liu, C.; Bai, Y.; and Yuille, A. L. 2019. Clevr-ref+: Diagnosing visual reasoning with referring expressions. In CVPR, 4185-4194.",
|
| 1467 |
+
"Liu, Y.; Bai, S.; Li, G.; Wang, Y.; and Tang, Y. 2024a. Open-vocabulary segmentation with semantic-assisted calibration. In CVPR, 3491-3500.",
|
| 1468 |
+
"Liu, Y.; Zhang, C.; Wang, Y.; Wang, J.; Yang, Y.; and Tang, Y. 2024b. Universal segmentation at arbitrary granularity with language instruction. In CVPR, 3459-3469.",
|
| 1469 |
+
"Luo, J.; Khandelwal, S.; Sigal, L.; and Li, B. 2024a. Emergent Open-Vocabulary Semantic Segmentation from Off-the-shelf Vision-Language Models. In CVPR, 4029-4040.",
|
| 1470 |
+
"Luo, Z.; Xiao, Y.; Liu, Y.; Li, S.; Wang, Y.; Tang, Y.; Li, X.; and Yang, Y. 2024b. Soc: Semantic-assisted object cluster for referring video object segmentation. Advances in Neural Information Processing Systems, 36.",
|
| 1471 |
+
"Mao, J.; Huang, J.; Toshev, A.; Camburu, O.; Yuille, A. L.; and Murphy, K. 2016. Generation and comprehension of unambiguous object descriptions. In CVPR, 11-20.",
|
| 1472 |
+
"Nagaraja, V. K.; Morariu, V. I.; and Davis, L. S. 2016. Modeling context between objects for referring expression understanding. In ECCV, 792-807. Springer.",
|
| 1473 |
+
"Ni, M.; Zhang, Y.; Feng, K.; Li, X.; Guo, Y.; and Zuo, W. 2023. Ref-diff: Zero-shot referring image segmentation with generative models. arXiv preprint arXiv:2308.16777.",
|
| 1474 |
+
"Radford, A.; Kim, J. W.; Hallacy, C.; Ramesh, A.; Goh, G.; Agarwal, S.; Sastry, G.; Askell, A.; Mishkin, P.; Clark, J.; et al. 2021. Learning transferable visual models from natural language supervision. In ICML, 8748-8763. PMLR.",
|
| 1475 |
+
"Rombach, R.; Blattmann, A.; Lorenz, D.; Esser, P.; and Omer, B. 2022. High-resolution image synthesis with latent diffusion models. In CVPR, 10684-10695.",
|
| 1476 |
+
"Selvaraju, R. R.; Cogswell, M.; Das, A.; Vedantam, R.; Parikh, D.; and Batra, D. 2017. Grad-cam: Visual explanations from deep networks via gradient-based localization. In CVPR, 618-626."
|
| 1477 |
+
],
|
| 1478 |
+
"bbox": [
|
| 1479 |
+
517,
|
| 1480 |
+
68,
|
| 1481 |
+
911,
|
| 1482 |
+
888
|
| 1483 |
+
],
|
| 1484 |
+
"page_idx": 8
|
| 1485 |
+
},
|
| 1486 |
+
{
|
| 1487 |
+
"type": "list",
|
| 1488 |
+
"sub_type": "ref_text",
|
| 1489 |
+
"list_items": [
|
| 1490 |
+
"Shah, N. A.; VS, V.; and Patel, V. M. 2024. LQMFormer: Language-aware Query Mask Transformer for Referring Image Segmentation. In CVPR, 12903-12913.",
|
| 1491 |
+
"Shen, H.; Zhao, T.; Zhu, M.; and Yin, J. 2024. Ground-VLP: Harnessing Zero-Shot Visual Grounding from Vision-Language Pre-training and Open-Vocabulary Object Detection. In AAAI, volume 38, 4766-4775.",
|
| 1492 |
+
"Shin, G.; Xie, W.; and Albanie, S. 2022. Reco: Retrieve and co-segment for zero-shot transfer. Advances in Neural Information Processing Systems, 35: 33754-33767.",
|
| 1493 |
+
"Strudel, R.; Laptev, I.; and Schmid, C. 2022. Weakly-supervised segmentation of referring expressions. arXiv preprint arXiv:2205.04725.",
|
| 1494 |
+
"Sun, S.; Li, R.; Torr, P.; Gu, X.; and Li, S. 2024. Clip as rnn: Segment countless visual concepts without training endeavor. In CVPR, 13171-13182.",
|
| 1495 |
+
"Suo, Y.; Zhu, L.; and Yang, Y. 2023. Text augmented spatial-aware zero-shot referring image segmentation. EMNLP.",
|
| 1496 |
+
"Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones, L.; Gomez, A. N.; Kaiser, L.; and Polosukhin, I. 2017. Attention is all you need. Advances in neural information processing systems, 30.",
|
| 1497 |
+
"Wang, H.; Zhan, Y.; Liu, L.; Ding, L.; Yang, Y.; and Yu, J. 2024a. Towards Alleviating Text-to-Image Retrieval Hallucination for CLIP in Zero-shot Learning. arXiv preprint arXiv:2402.18400.",
|
| 1498 |
+
"Wang, X.; Yu, Z.; De Mello, S.; Kautz, J.; Anandkumar, A.; Shen, C.; and Alvarez, J. M. 2022a. Freesolo: Learning to segment objects without annotations. In CVPR, 14176-14186.",
|
| 1499 |
+
"Wang, Y.; Zhao, R.; and Sun, Z. 2023. Efficient Remote Sensing Transformer for Coastline Detection with Sentinel-2 Satellite Imagery. In IGARSS, 5439-5442. IEEE.",
|
| 1500 |
+
"Wang, Y.; Zhao, R.; Wei, S.; Ni, J.; Wu, M.; Luo, Y.; and Luo, C. 2024b. Convolution Meets Transformer: Efficient Hybrid Transformer for Semantic Segmentation with Very High Resolution Imagery. In IGARSS 2024, 9688-9691. IEEE.",
|
| 1501 |
+
"Wang, Z.; Lu, Y.; Li, Q.; Tao, X.; Guo, Y.; Gong, M.; and Liu, T. 2022b. Cris: Clip-driven referring image segmentation. In CVPR, 11686-11695.",
|
| 1502 |
+
"Wu, C.; Lin, Z.; Cohen, S.; Bui, T.; and Maji, S. 2020. Phrasecut: Language-based image segmentation in the wild. In CVPR, 10216-10225.",
|
| 1503 |
+
"Xu, H.; Ye, Q.; Yan, M.; Shi, Y.; Ye, J.; Xu, Y.; Li, C.; Bi, B.; Qian, Q.; Wang, W.; et al. 2023a. mplug-2: A modularized multi-modal foundation model across text, image and video. In ICML, 38728-38748. PMLR.",
|
| 1504 |
+
"Xu, J.; De Mello, S.; Liu, S.; Byeon, W.; Breuel, T.; Kautz, J.; and Wang, X. 2022. Groupvit: Semantic segmentation emerges from text supervision. In CVPR, 18134-18144.",
|
| 1505 |
+
"Xu, X.; Wu, C.; Rosenman, S.; Lal, V.; Che, W.; and Duan, N. 2023b. Bridgetower: Building bridges between encoders in vision-language representation learning. In AAAI, volume 37, 10637-10647."
|
| 1506 |
+
],
|
| 1507 |
+
"bbox": [
|
| 1508 |
+
83,
|
| 1509 |
+
68,
|
| 1510 |
+
478,
|
| 1511 |
+
888
|
| 1512 |
+
],
|
| 1513 |
+
"page_idx": 9
|
| 1514 |
+
},
|
| 1515 |
+
{
|
| 1516 |
+
"type": "list",
|
| 1517 |
+
"sub_type": "ref_text",
|
| 1518 |
+
"list_items": [
|
| 1519 |
+
"Yang, Z.; Wang, J.; Tang, Y.; Chen, K.; Zhao, H.; and Torr, P. H. 2022. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 18155-18165.",
|
| 1520 |
+
"Yang, Z.; Wang, J.; Ye, X.; Tang, Y.; Chen, K.; Zhao, H.; and Torr, P. H. 2024. Language-aware vision transformer for referring segmentation. IEEE TPAMI.",
|
| 1521 |
+
"Yu, J.; Wang, Z.; Vasudevan, V.; Yeung, L.; Seyedhosseini, M.; and Wu, Y. 2022. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917.",
|
| 1522 |
+
"Yu, S.; Seo, P. H.; and Son, J. 2023. Zero-shot referring image segmentation with global-local context features. In CVPR, 19456-19465.",
|
| 1523 |
+
"Zhou, C.; Loy, C. C.; and Dai, B. 2022. Extract free dense labels from clip. In ECCV, 696-712. Springer.",
|
| 1524 |
+
"Zhu, C.; and Chen, L. 2024. A survey on open-vocabulary detection and segmentation: Past, present, and future. IEEE TPAMI."
|
| 1525 |
+
],
|
| 1526 |
+
"bbox": [
|
| 1527 |
+
517,
|
| 1528 |
+
68,
|
| 1529 |
+
913,
|
| 1530 |
+
333
|
| 1531 |
+
],
|
| 1532 |
+
"page_idx": 9
|
| 1533 |
+
}
|
| 1534 |
+
]
|
data/2025/2503_00xxx/2503.00936/ee8e137b-6353-4aea-9f54-e9f3f0a4de81_model.json
ADDED
|
@@ -0,0 +1,2244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.266,
|
| 8 |
+
0.058,
|
| 9 |
+
0.701
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2503.00936v1 [cs.CV] 2 Mar 2025"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.096,
|
| 18 |
+
0.12,
|
| 19 |
+
0.9,
|
| 20 |
+
0.162
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "IteRPrime: Zero-shot Referring Image Segmentation with Iterative Grad-CAM Refinement and Primary Word Emphasis"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.23,
|
| 29 |
+
0.178,
|
| 30 |
+
0.772,
|
| 31 |
+
0.197
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Yuji Wang*, Jingchen Ni*, Yong Liu, Chun Yuan, Yansong Tang†"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.23,
|
| 40 |
+
0.202,
|
| 41 |
+
0.768,
|
| 42 |
+
0.231
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "Shenzhen International Graduate School, Tsinghua University *{yuji-wan24, njc24} @ mails.tsinghua.edu.cn, †tang.yansong@sz.tsinghua.edu.cn"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "title",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.25,
|
| 51 |
+
0.274,
|
| 52 |
+
0.314,
|
| 53 |
+
0.287
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Abstract"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.1,
|
| 62 |
+
0.298,
|
| 63 |
+
0.462,
|
| 64 |
+
0.651
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "Zero-shot Referring Image Segmentation (RIS) identifies the instance mask that best aligns with a specified referring expression without training and fine-tuning, significantly reducing the labor-intensive annotation process. Despite achieving commendable results, previous CLIP-based models have a critical drawback: the models exhibit a notable reduction in their capacity to discern relative spatial relationships of objects. This is because they generate all possible masks on an image and evaluate each masked region for similarity to the given expression, often resulting in decreased sensitivity to direct positional clues in text inputs. Moreover, most methods have weak abilities to manage relationships between primary words and their contexts, causing confusion and reduced accuracy in identifying the correct target region. To address these challenges, we propose IteRPrimE (Iterative Grad-CAM Refinement and Primary word Emphasis), which leverages a saliency heatmap through Grad-CAM from a Vision-Language Pre-trained (VLP) model for image-text matching. An iterative Grad-CAM refinement strategy is introduced to progressively enhance the model's focus on the target region and overcome positional insensitivity, creating a self-correcting effect. Additionally, we design the Primary Word Emphasis module to help the model handle complex semantic relations, enhancing its ability to attend to the intended object. Extensive experiments conducted on the RefCOCO/+/g, and PhraseCut benchmarks demonstrate that IteRPrimE outperforms previous SOTA zero-shot methods, particularly excelling in out-of-domain scenarios."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.084,
|
| 73 |
+
0.668,
|
| 74 |
+
0.434,
|
| 75 |
+
0.684
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "Code — https://github.com/VoyageWang/IteRPrimE"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "title",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.227,
|
| 84 |
+
0.705,
|
| 85 |
+
0.337,
|
| 86 |
+
0.721
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "Introduction"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.082,
|
| 95 |
+
0.727,
|
| 96 |
+
0.48,
|
| 97 |
+
0.825
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Referring Image Segmentation (RIS) requires the model to generate a pixel-level referred object mask based on a textual description, extending the applicability to various tasks such as robot interaction and image editing (Yang et al. 2024, 2022; Liu et al. 2024b; Lai et al. 2024; Luo et al. 2024b). Different from standard semantic segmentation (Wang, Zhao, and Sun 2023; Wang et al. 2024b; Han"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.517,
|
| 106 |
+
0.273,
|
| 107 |
+
0.915,
|
| 108 |
+
0.538
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "et al. 2023; Luo et al. 2024a; Bai et al. 2024), RIS necessitates the differentiation of instances within the same category and their relationships with other objects or the scene, which requires high demands on the semantic understanding and spatial perception of the model. However, annotating exact pairs of images, descriptions, and ground-truth masks is both expensive and time-intensive, as the annotation of a query needs a grasp of diverse positional and attributive details within the image (Liu, Ding, and Jiang 2023; Ding et al. 2023; Liu et al. 2019). Recent weakly supervised RIS techniques (Strudel, Laptev, and Schmid 2022; Lee et al. 2023; Xu et al. 2022) have been introduced to mitigate these annotation challenges, yet they still depend on paired data for training purposes and have relatively poor performance. In contrast, a zero-shot approach holds greater value. Leveraging vision-language pre-trained (VLP) models such as CLIP (Radford et al. 2021), this method efficiently generalizes across diverse concepts and unseen categories without further training and fine-tuning."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.517,
|
| 117 |
+
0.54,
|
| 118 |
+
0.913,
|
| 119 |
+
0.873
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "Existing methodologies to harness the characteristics of being unnecessary to fit training data presented by zero-shot learning often employ a two-stage pipeline, shown in Figure 1 (a). As a discriminator between the images masked by the candidate masks and the expression, CLIP is used to select the instance mask whose similarity score is the highest (Sun et al. 2024; Yu, Seo, and Son 2023; Suo, Zhu, and Yang 2023; Ni et al. 2023). However, we observed that these methods always malfunctioned when encountering text inputs with positional information such as \"left\" and \"right\". Due to only a single instance contained in a masked image, the absence of relative spatial perception can be the inherent limitation of these CLIP-based paradigms. Previous pieces of literature alleviate this issue by injecting the human priors or bias that explicitly prompts the CLIP with the given direction clues (Ni et al. 2023; Suo, Zhu, and Yang 2023). To be more specific, they manually design spatial decaying weights from 1 to 0 in the directions consistent with text phrases to make the model aware of positional information, but it can not generalize the scenarios out of predefined directions such as \"next to\". Additionally, the domain shift for CLIP from the natural image to the masked image can also impact the segmentation performance (Liu et al. 2024a; Ding et al. 2022; Zhu and Chen 2024)."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.534,
|
| 128 |
+
0.875,
|
| 129 |
+
0.913,
|
| 130 |
+
0.89
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Some researchers (Lee et al. 2023) have leveraged Grad"
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "page_footnote",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.082,
|
| 139 |
+
0.836,
|
| 140 |
+
0.48,
|
| 141 |
+
0.89
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "\\*These authors contributed equally. \n+Corresponding Author. \nCopyright © 2025, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved."
|
| 145 |
+
}
|
| 146 |
+
],
|
| 147 |
+
[
|
| 148 |
+
{
|
| 149 |
+
"type": "image",
|
| 150 |
+
"bbox": [
|
| 151 |
+
0.137,
|
| 152 |
+
0.075,
|
| 153 |
+
0.874,
|
| 154 |
+
0.269
|
| 155 |
+
],
|
| 156 |
+
"angle": 0,
|
| 157 |
+
"content": null
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "image_caption",
|
| 161 |
+
"bbox": [
|
| 162 |
+
0.082,
|
| 163 |
+
0.281,
|
| 164 |
+
0.916,
|
| 165 |
+
0.34
|
| 166 |
+
],
|
| 167 |
+
"angle": 0,
|
| 168 |
+
"content": "Figure 1: (a) The general pipeline of CLIP-based methods. They lack the perception of spatial relative position due to the masked images. (b) The pipeline of our IteRPrimE with Iterative Grad-CAM Refinement Strategy and Primary Word Emphasis of \"bike\". (c) This is a comparative experiment of positional phrase accuracy between IteRPrimE and GL-CLIP on RefCOCO and RefCOCOg."
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.086,
|
| 174 |
+
0.365,
|
| 175 |
+
0.48,
|
| 176 |
+
0.642
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "CAM (Selvaraju et al. 2017) and created two specialized loss functions to attenuate the detrimental effects of positional phrases in weakly supervised settings. Although the losses are unsuitable for zero-shot scenarios, Grad-CAM can partially mitigate the deleterious effects associated with masked images. This is because the method maintains the integrity of the model's spatial perception capabilities by delineating the regions with the greatest attention in the original image for localization, shown in Figure 1 (b). Nevertheless, we still find two major problems by analyzing the occurrences and characteristics of Grad-CAM. First, Grad-CAM struggles to discriminate the semantic relations between different noun phrases, due to the lack of a stronger consideration of the primary word than other context words, shown in baseline predictions of Figure 2 (a). Specifically, the model's weak ability to effectively prioritize the main word in complex expressions undermines its overall performance. Second, Grad-CAM is limited to identifying only small areas of the referred object, which consequently results in selecting undesired instance masks."
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.082,
|
| 185 |
+
0.654,
|
| 186 |
+
0.481,
|
| 187 |
+
0.892
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "To overcome these challenges, we propose a novel framework namely, IteRPrime (Iterative Grad-CAM Refinement and Primary word Emphasis) utilizing Grad-CAM for zero-shot RIS. First, we implement an iterative refinement strategy to enhance the representational accuracy and enlarge the indicated area of Grad-CAM, progressively improving the model's concentration on the target object with each cycle, shown in Figure 2 (b). Simultaneously, this strategy is particularly beneficial when the referring expression includes positional words, as it offers the model chances of self-correction at each iteration, shown in Figure 2 (c). Second, the Primary Word Emphasis Module (PWEM) plays a crucial role in enhancing the weak abilities to handle the complex semantic relationships between primary words and other contexts. This module is achieved by emphasizing the Grad-CAMs of the main word within the referring expression, from local and global aspects. Finally,"
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.516,
|
| 196 |
+
0.366,
|
| 197 |
+
0.913,
|
| 198 |
+
0.477
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "a post-processing module is designed to select a high-quality, contiguous instance mask from a mask proposal network, which encapsulates the target object as indicated by Grad-CAM. By addressing the limitations, the IteRPrimE approach achieves superior performance over prior zero-shot state-of-the-art techniques, notably excelling in out-of-domain scenarios and exhibiting robust cross-domain transfer proficiency. Our main contributions include"
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.52,
|
| 207 |
+
0.481,
|
| 208 |
+
0.913,
|
| 209 |
+
0.51
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "1. To our best knowledge, we are the first to use Grad-CAM to instruct Segmentors for zero-shot RIS tasks."
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "text",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.518,
|
| 218 |
+
0.513,
|
| 219 |
+
0.914,
|
| 220 |
+
0.569
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "2. We propose the Iterative Grad-CAM Refinement Strategy (IGRS) and Primary Word Emphasis Module (PWEM) to enhance the accuracy and representation of Grad-CAM for better localization, shown in Figure 2."
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.518,
|
| 229 |
+
0.572,
|
| 230 |
+
0.915,
|
| 231 |
+
0.656
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "3. Compared to the previous CLIP-based method, our method significantly outperforms it with inputs containing positional information, shown in Figure 1 (c). Additionally, the approach achieves a better performance on the four popular benchmarks, especially for the outdomain datasets."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "list",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.518,
|
| 240 |
+
0.481,
|
| 241 |
+
0.915,
|
| 242 |
+
0.656
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": null
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "title",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.651,
|
| 251 |
+
0.675,
|
| 252 |
+
0.78,
|
| 253 |
+
0.691
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "Related Works"
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.516,
|
| 262 |
+
0.695,
|
| 263 |
+
0.915,
|
| 264 |
+
0.891
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "Zero-shot referring image segmentation. For the fully-supervised setting, training a well-specified model for RIS needs massive paired text-visual annotations, which are sometimes not affordable and accessible (Shah, VS, and Patel 2024; Liu, Ding, and Jiang 2023; Yang et al. 2022; Wang et al. 2022b; Kim et al. 2022; Ding et al. 2021; Jing et al. 2021). Besides, these models have relatively weak ability in out-of-domain scenarios due to the limited data and a domain gap. Therefore, the zero-shot RIS methods are proposed as the alternative. Global- and local-CLIP (GL-CLIP) (Yu, Seo, and Son 2023) is the first proposed to segment the instance given the text input with zero-shot transfer. By interfacing with the mask proposal network FreeSOLO (Wang et al. 2022a), the approach leverages both global"
|
| 268 |
+
}
|
| 269 |
+
],
|
| 270 |
+
[
|
| 271 |
+
{
|
| 272 |
+
"type": "text",
|
| 273 |
+
"bbox": [
|
| 274 |
+
0.089,
|
| 275 |
+
0.068,
|
| 276 |
+
0.476,
|
| 277 |
+
0.08
|
| 278 |
+
],
|
| 279 |
+
"angle": 0,
|
| 280 |
+
"content": "(a) Expression: a man standing next to a young girl on a grassy hillside"
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"type": "image",
|
| 284 |
+
"bbox": [
|
| 285 |
+
0.103,
|
| 286 |
+
0.081,
|
| 287 |
+
0.195,
|
| 288 |
+
0.13
|
| 289 |
+
],
|
| 290 |
+
"angle": 0,
|
| 291 |
+
"content": null
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"type": "image",
|
| 295 |
+
"bbox": [
|
| 296 |
+
0.227,
|
| 297 |
+
0.081,
|
| 298 |
+
0.319,
|
| 299 |
+
0.13
|
| 300 |
+
],
|
| 301 |
+
"angle": 0,
|
| 302 |
+
"content": null
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"type": "image",
|
| 306 |
+
"bbox": [
|
| 307 |
+
0.354,
|
| 308 |
+
0.081,
|
| 309 |
+
0.445,
|
| 310 |
+
0.13
|
| 311 |
+
],
|
| 312 |
+
"angle": 0,
|
| 313 |
+
"content": null
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"type": "image_caption",
|
| 317 |
+
"bbox": [
|
| 318 |
+
0.249,
|
| 319 |
+
0.13,
|
| 320 |
+
0.297,
|
| 321 |
+
0.138
|
| 322 |
+
],
|
| 323 |
+
"angle": 0,
|
| 324 |
+
"content": "Baseline"
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"type": "image_caption",
|
| 328 |
+
"bbox": [
|
| 329 |
+
0.091,
|
| 330 |
+
0.138,
|
| 331 |
+
0.441,
|
| 332 |
+
0.148
|
| 333 |
+
],
|
| 334 |
+
"angle": 0,
|
| 335 |
+
"content": "(b) Expression: businessman posing in front of an airplane door"
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "image",
|
| 339 |
+
"bbox": [
|
| 340 |
+
0.108,
|
| 341 |
+
0.148,
|
| 342 |
+
0.185,
|
| 343 |
+
0.197
|
| 344 |
+
],
|
| 345 |
+
"angle": 0,
|
| 346 |
+
"content": null
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "image_caption",
|
| 350 |
+
"bbox": [
|
| 351 |
+
0.187,
|
| 352 |
+
0.167,
|
| 353 |
+
0.232,
|
| 354 |
+
0.183
|
| 355 |
+
],
|
| 356 |
+
"angle": 0,
|
| 357 |
+
"content": "1st"
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "image_caption",
|
| 361 |
+
"bbox": [
|
| 362 |
+
0.186,
|
| 363 |
+
0.184,
|
| 364 |
+
0.235,
|
| 365 |
+
0.198
|
| 366 |
+
],
|
| 367 |
+
"angle": 0,
|
| 368 |
+
"content": "Iteration"
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "image",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.236,
|
| 374 |
+
0.149,
|
| 375 |
+
0.315,
|
| 376 |
+
0.198
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": null
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "image_caption",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.317,
|
| 385 |
+
0.167,
|
| 386 |
+
0.362,
|
| 387 |
+
0.183
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "2nd"
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "image_caption",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.315,
|
| 396 |
+
0.185,
|
| 397 |
+
0.366,
|
| 398 |
+
0.198
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "Iteration"
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "image",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.366,
|
| 407 |
+
0.149,
|
| 408 |
+
0.444,
|
| 409 |
+
0.199
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": null
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "image",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.09,
|
| 418 |
+
0.2,
|
| 419 |
+
0.246,
|
| 420 |
+
0.277
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": null
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "image_caption",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.082,
|
| 429 |
+
0.288,
|
| 430 |
+
0.48,
|
| 431 |
+
0.399
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "Figure 2: (a) The weak ability of the baseline model to differentiate the semantic relationships between the primary word \"man\" and the other noun phrases colored green and orange. PWEM can make the model aware of the targeted instance referred to by the main word. (b) The IGRS facilitates the expansion of highlighted areas, surpassing the confined small regions. (c) IGRS offers the model chances of self-correction."
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "text",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.082,
|
| 440 |
+
0.43,
|
| 441 |
+
0.48,
|
| 442 |
+
0.693
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": "and local textual-image similarity to enhance the discriminative capabilities of the CLIP model. Based on GL-CLIP, some researchers (Wang et al. 2024a) combine the original CLIP similarity score with their proposed Balanced Score with Auxiliary Prompts (BSAP), namely BSAP-H, to reduce the CLIP's text-to-image retrieval hallucination. Ref-Diff (Ni et al. 2023) demonstrates that the text-to-image generative model like Stable Diffusion (Rombach et al. 2022) can generate the intended mask from the cross-attention map, which has considerable performance. TAS (Suo, Zhu, and Yang 2023) mainly depends on another large captioner network BLIP2 (Li et al. 2023) to mine the negative text based on the previous mask proposal network plus discriminator paradigm, which achieves favorable performances. Additionally, SAM (Kirillov et al. 2023) is utilized for better segmentation accuracy. However, these CLIP-based methods struggle to segment the referred subject with positional-described text queries, due to the absence of spatial relationships in the masked image."
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "text",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.082,
|
| 451 |
+
0.695,
|
| 452 |
+
0.481,
|
| 453 |
+
0.89
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "Grad-CAM for localization. Grad-CAM (Selvaraju et al. 2017) is proposed to provide explainable clues indicating the regions the model pays attention to for the prediction head. In the context of the Image Text Matching (ITM) objective from any VLP (Li et al. 2022; Xu et al. 2023a,b), Grad-CAM enables the establishment of a modality mapping from the textual to the visual domain, specifically calibrated for the task of visual localization. Many works utilize it to localize the objects with the given text (Shen et al. 2024; Lee et al. 2023; Xu et al. 2022; He et al. 2022; Li et al. 2021). However, these approaches either generate a bounding box annotation or are employed within weakly supervised scenarios. Compared to approaches (Shin, Xie, and Albanie 2022; Zhou, Loy, and Dai 2022; Luo et al. 2024a)"
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.516,
|
| 462 |
+
0.07,
|
| 463 |
+
0.913,
|
| 464 |
+
0.195
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "that perform zero-shot open vocabulary semantic segmentation with Grad-CAM, we are the first to propose the Grad-CAM for zero-shot RIS to study its behaviors under longer and complex textual inputs instead of a single category noun. To address problems of lacking consideration between main words and the other, the PWEM is proposed to aggregate the Grad-CAM from local-spatial and global-token levels. Secondly, a novel iterative refinement strategy is employed to obtain a better representation of Grad-CAM step by step."
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "title",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.657,
|
| 473 |
+
0.209,
|
| 474 |
+
0.774,
|
| 475 |
+
0.223
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "Preliminaries"
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "text",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.516,
|
| 484 |
+
0.229,
|
| 485 |
+
0.913,
|
| 486 |
+
0.45
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "The generation of Grad-CAM is essential for harnessing it for RIS. Given an image-expression pair \\((I, E)\\), we can obtain their corresponding embeddings, \\(v\\) and \\(e\\), by the visual encoder \\(v = f_{I}(I)\\) and text encoder \\(e = f_{T}(E)\\), respectively. Then, for multimodal fusion, these two embeddings are fed to the cross-attention layers used to align the visual and textual information (Yu et al. 2022; Vaswani et al. 2017). The resultant attention activation maps, \\(\\mathbf{A}\\), can indicate the activated and recognized regions of \\(v\\) concerning each query textual token in \\(e\\). However, these indication clues are usually scattered and not densely distributed in the relevant regions. Thus, the gradients, \\(\\mathbf{G}\\) can be used to sharpen and dilute the effect of non-relevant regions in \\(\\mathbf{A}\\), where contribute less to the output objective, \\(y\\), like Image Text Matching (ITM). The result of this gradient-weighted dilution process is known as Grad-CAM, \\(\\mathbf{H}\\)."
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.517,
|
| 495 |
+
0.451,
|
| 496 |
+
0.913,
|
| 497 |
+
0.479
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "In the cross-attention layer, the Grad-CAM can be formulated by Equation (1)"
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "equation",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.668,
|
| 506 |
+
0.488,
|
| 507 |
+
0.912,
|
| 508 |
+
0.503
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "\\[\n\\mathbf {H} = \\mathbf {A} \\odot \\mathbf {G}, \\tag {1a}\n\\]"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "equation",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.622,
|
| 517 |
+
0.506,
|
| 518 |
+
0.913,
|
| 519 |
+
0.54
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "\\[\n\\mathbf {G} = \\operatorname {c l a m p} \\left(\\frac {\\partial y}{\\partial \\mathbf {A}}, 0, \\infty\\right), \\tag {1b}\n\\]"
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.517,
|
| 528 |
+
0.547,
|
| 529 |
+
0.913,
|
| 530 |
+
0.604
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "where clamp removes negative gradients, which often represent noise or irrelevant features. Finally, the Grad-CAM used to indicate the image regions, \\(\\mathbf{H}_f\\), is the mean over all the number of text tokens \\(|e|\\), as shown in Equation (2)"
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "equation",
|
| 537 |
+
"bbox": [
|
| 538 |
+
0.579,
|
| 539 |
+
0.611,
|
| 540 |
+
0.913,
|
| 541 |
+
0.631
|
| 542 |
+
],
|
| 543 |
+
"angle": 0,
|
| 544 |
+
"content": "\\[\n\\mathbf {H} _ {f} = \\mathbb {E} _ {k} \\left(\\mathbf {H} ^ {k}\\right), k \\in | e |, \\mathbf {H} _ {f} \\in \\mathbb {R} ^ {B \\times h \\times w} \\tag {2}\n\\]"
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "text",
|
| 548 |
+
"bbox": [
|
| 549 |
+
0.517,
|
| 550 |
+
0.639,
|
| 551 |
+
0.914,
|
| 552 |
+
0.71
|
| 553 |
+
],
|
| 554 |
+
"angle": 0,
|
| 555 |
+
"content": "where \\(\\mathbf{H}^k\\) denotes the Grad-CAM for the \\(k\\)-th text token, \\(B\\) is the batch size, and \\(h \\times w\\) is the size of visual latent space. This averaging process treats every word equally and ignores the importance of the primary word, thereby undermining the performance of RIS."
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "title",
|
| 559 |
+
"bbox": [
|
| 560 |
+
0.68,
|
| 561 |
+
0.723,
|
| 562 |
+
0.75,
|
| 563 |
+
0.739
|
| 564 |
+
],
|
| 565 |
+
"angle": 0,
|
| 566 |
+
"content": "Method"
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "title",
|
| 570 |
+
"bbox": [
|
| 571 |
+
0.517,
|
| 572 |
+
0.745,
|
| 573 |
+
0.596,
|
| 574 |
+
0.758
|
| 575 |
+
],
|
| 576 |
+
"angle": 0,
|
| 577 |
+
"content": "Overview"
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "text",
|
| 581 |
+
"bbox": [
|
| 582 |
+
0.516,
|
| 583 |
+
0.763,
|
| 584 |
+
0.914,
|
| 585 |
+
0.89
|
| 586 |
+
],
|
| 587 |
+
"angle": 0,
|
| 588 |
+
"content": "Figure 1 (b) demonstrates the entire workflow of our method for zero-shot RIS, IteRPrimE, which can be divided into two parts: an iterative Grad-CAM generator and a selective mask proposal network. First, the Grad-CAM generator is a VLP model with cross-attention layers. The proposed IGRS and PWEM are integrated into the generator. Finally, within the mask proposal network, a post-processing module is designed to select the candidate instance masks, ensuring the accurate and detailed localization of the target object."
|
| 589 |
+
}
|
| 590 |
+
],
|
| 591 |
+
[
|
| 592 |
+
{
|
| 593 |
+
"type": "image",
|
| 594 |
+
"bbox": [
|
| 595 |
+
0.16,
|
| 596 |
+
0.072,
|
| 597 |
+
0.315,
|
| 598 |
+
0.181
|
| 599 |
+
],
|
| 600 |
+
"angle": 0,
|
| 601 |
+
"content": null
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "image_caption",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.224,
|
| 607 |
+
0.192,
|
| 608 |
+
0.246,
|
| 609 |
+
0.206
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "(a)"
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "image",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.318,
|
| 618 |
+
0.073,
|
| 619 |
+
0.442,
|
| 620 |
+
0.189
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": null
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "image_caption",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.373,
|
| 629 |
+
0.191,
|
| 630 |
+
0.396,
|
| 631 |
+
0.206
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "(b)"
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "image",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.443,
|
| 640 |
+
0.074,
|
| 641 |
+
0.568,
|
| 642 |
+
0.189
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": null
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "image_caption",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.502,
|
| 651 |
+
0.191,
|
| 652 |
+
0.523,
|
| 653 |
+
0.205
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "(c)"
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "image",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.568,
|
| 662 |
+
0.074,
|
| 663 |
+
0.689,
|
| 664 |
+
0.188
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": null
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "image_caption",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.628,
|
| 673 |
+
0.191,
|
| 674 |
+
0.65,
|
| 675 |
+
0.206
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "(d)"
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "image",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.691,
|
| 684 |
+
0.074,
|
| 685 |
+
0.847,
|
| 686 |
+
0.188
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": null
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "image_caption",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.766,
|
| 695 |
+
0.191,
|
| 696 |
+
0.787,
|
| 697 |
+
0.206
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": "(e)"
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "image_caption",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.082,
|
| 706 |
+
0.218,
|
| 707 |
+
0.915,
|
| 708 |
+
0.262
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "Figure 3: The Grad-CAMs and attention maps (AM) of \"partially damaged car\". Since the attention map (d) and Grad-CAM (e) of the primary word \"car\" both contain unique activation areas compared to the others, they can be harnessed from local-spatial and global-token perspectives to enhance the focus on the targeted regions, respectively."
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "title",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.084,
|
| 717 |
+
0.287,
|
| 718 |
+
0.345,
|
| 719 |
+
0.303
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": "Primary Word Emphasis Module"
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "text",
|
| 726 |
+
"bbox": [
|
| 727 |
+
0.082,
|
| 728 |
+
0.304,
|
| 729 |
+
0.48,
|
| 730 |
+
0.595
|
| 731 |
+
],
|
| 732 |
+
"angle": 0,
|
| 733 |
+
"content": "The PWEM is an essential component of the IteRPrimE, designed to confront the challenge posed by the weak capability of Grad-CAM to manage the semantic relationships in input texts featuring multiple potential referred nouns. This module emphasizes the Grad-CAM of the primary word in the expression, thereby increasing the focus on the main word during the averaging operation. Specifically, we first use an NLP processing toolbox to parse the part-of-speech (POS) tags of each word, filtering out a set of text tokens that includes special \\(<CLS>\\) token of BERT (Devlin et al. 2018), nouns, adjectives, verbs, proper nouns, and numerals. These words are recognized as effective tokens \\(W\\) that can provide distinct semantics and their contextual information. They are composed of primary words \\(W_{m}\\) and their contexts \\(W_{c}\\), where \\(W = W_{m} \\cup W_{c}\\) and \\(W_{m} \\cap W_{c} = \\emptyset\\). Then, we extract the primary noun from these effective words (e.g. \"car\" in \"partially damaged car\" shown in Figure 3) by employing a designed algorithm. It first generates a syntax tree, identifies the leftmost noun phrase (NP), and then finds the rightmost noun (NN) within that NP, which can be detailed in Algorithm 1 in the appendix."
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"bbox": [
|
| 738 |
+
0.082,
|
| 739 |
+
0.596,
|
| 740 |
+
0.48,
|
| 741 |
+
0.831
|
| 742 |
+
],
|
| 743 |
+
"angle": 0,
|
| 744 |
+
"content": "As shown in the right part of Figure 4, we emphasize the effect of the primary word Grad-CAM from two perspectives: local spatial-level and global token-level augmentation. Different from the other contextual effective words \\( W_{c} \\), the attention map \\( \\mathbf{A}^{W_m} \\) and Grad-CAM \\( \\mathbf{H}_m \\) of the primary token holds the unique activated areas that probably indicate the correct localization of Grad-CAM shown in Figure 3. Therefore, to highlight and isolate the specific contribution of the primary word from the local spatial level, we compute the \\( \\mathrm{L}_2 \\) normalized differences, \\( \\mathbf{A}_{dif} \\) between the main word activation map and the other context word activation maps, \\( \\mathbf{A}^{W_c} \\). The activation difference is further integrated with gradients from the main word \\( \\mathbf{G}^{W_m} \\), forming a spatial modulator to indicate the local spatial importance in the main word Grad-CAM, \\( \\mathbf{H}_m \\). Thus, we can obtain the local spatial-level enhanced Grad-CAM of the primary word, \\( \\mathbf{H}_l \\), as shown in Equation (3)"
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "equation",
|
| 748 |
+
"bbox": [
|
| 749 |
+
0.185,
|
| 750 |
+
0.832,
|
| 751 |
+
0.48,
|
| 752 |
+
0.867
|
| 753 |
+
],
|
| 754 |
+
"angle": 0,
|
| 755 |
+
"content": "\\[\n\\mathbf {A} _ {d i f} = \\frac {\\mathbf {A} ^ {W _ {m}} - \\mathbf {A} ^ {W _ {c}}}{\\left\\| \\mathbf {A} ^ {W _ {m}} - \\mathbf {A} ^ {W _ {c}} \\right\\| _ {2}}, \\tag {3a}\n\\]"
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "equation",
|
| 759 |
+
"bbox": [
|
| 760 |
+
0.187,
|
| 761 |
+
0.869,
|
| 762 |
+
0.479,
|
| 763 |
+
0.887
|
| 764 |
+
],
|
| 765 |
+
"angle": 0,
|
| 766 |
+
"content": "\\[\n\\mathbf {H} _ {l} = \\mathbf {A} _ {d i f} \\odot \\mathbf {G} ^ {W _ {m}} \\odot \\mathbf {H} _ {m}, \\tag {3b}\n\\]"
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"bbox": [
|
| 771 |
+
0.516,
|
| 772 |
+
0.287,
|
| 773 |
+
0.911,
|
| 774 |
+
0.316
|
| 775 |
+
],
|
| 776 |
+
"angle": 0,
|
| 777 |
+
"content": "where \\(\\mathbf{H}_m = \\mathbf{A}^{W_m}\\odot \\mathbf{G}^{W_m}\\) following Equation (1). Broadcasting occurs when the dimensions do not match."
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "text",
|
| 781 |
+
"bbox": [
|
| 782 |
+
0.516,
|
| 783 |
+
0.316,
|
| 784 |
+
0.913,
|
| 785 |
+
0.388
|
| 786 |
+
],
|
| 787 |
+
"angle": 0,
|
| 788 |
+
"content": "From the global aspect, we manually add the weight of the main word Grad-CAM \\(\\mathbf{H}_m\\) along the token axis during mean operations, which provides additional enhanced focus on the primary token. Therefore, we can obtain the global token-level Grad-CAM \\(\\mathbf{H}_g\\) by Equation (4)"
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "equation",
|
| 792 |
+
"bbox": [
|
| 793 |
+
0.628,
|
| 794 |
+
0.397,
|
| 795 |
+
0.913,
|
| 796 |
+
0.416
|
| 797 |
+
],
|
| 798 |
+
"angle": 0,
|
| 799 |
+
"content": "\\[\nW ^ {\\prime} = W \\cup \\left\\{W _ {m} \\right\\} \\times N _ {c}, \\tag {4a}\n\\]"
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "equation",
|
| 803 |
+
"bbox": [
|
| 804 |
+
0.628,
|
| 805 |
+
0.419,
|
| 806 |
+
0.913,
|
| 807 |
+
0.441
|
| 808 |
+
],
|
| 809 |
+
"angle": 0,
|
| 810 |
+
"content": "\\[\n\\mathbf {H} _ {g} = \\mathbf {A} ^ {W ^ {\\prime}} \\odot \\mathbf {G} ^ {W ^ {\\prime}} \\tag {4b}\n\\]"
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "text",
|
| 814 |
+
"bbox": [
|
| 815 |
+
0.516,
|
| 816 |
+
0.449,
|
| 817 |
+
0.914,
|
| 818 |
+
0.547
|
| 819 |
+
],
|
| 820 |
+
"angle": 0,
|
| 821 |
+
"content": "where \\(N_{c}\\) is the number of context tokens and \\(\\{W_m\\} \\times N_c\\) means repeating the main word for \\(N_{c}\\) times. Finally, the resulting augmented Grad-CAM, \\(\\mathbf{H}_a\\), is the mean of concatenated local and global Grad-CAMs, \\(\\mathbf{H}_c\\), along the token axis, where \\(\\mathbf{H}_c = [\\mathbf{H}_g,\\mathbf{H}_l]\\). This map significantly improves the model's Grad-CAM localization accuracy, shown in PWEM of Figure 2 (a)."
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "title",
|
| 825 |
+
"bbox": [
|
| 826 |
+
0.517,
|
| 827 |
+
0.562,
|
| 828 |
+
0.842,
|
| 829 |
+
0.578
|
| 830 |
+
],
|
| 831 |
+
"angle": 0,
|
| 832 |
+
"content": "Iterative Grad-CAM Refinement Strategy"
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"type": "text",
|
| 836 |
+
"bbox": [
|
| 837 |
+
0.515,
|
| 838 |
+
0.583,
|
| 839 |
+
0.914,
|
| 840 |
+
0.805
|
| 841 |
+
],
|
| 842 |
+
"angle": 0,
|
| 843 |
+
"content": "Masked Language Modeling (MLM) can be used for bidirectional image generative Transformers such as MasGIT (Chang et al. 2022). The iterative generative paradigm offers self-correction chances for the model to optimize step-by-step in the latent space. Inspired by this, we propose a novel iterative strategy of Grad-CAM to gradually steer the model's attention to the region that the model is not attentive to initially, which brings benefits from two sides. On the one hand, for the circumstances in which Grad-CAM correctly localizes the instance initially, the gradually refined Grad-CAM can be better gathered around the targeted instance region. On the other hand, for the first incorrect localization, the model can attend to other semantic instances to recheck the Grad-CAM prediction, especially for the positional phrase inputs. The overall approach of IGRS is illustrated in the left part of Figure 4."
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "text",
|
| 847 |
+
"bbox": [
|
| 848 |
+
0.516,
|
| 849 |
+
0.806,
|
| 850 |
+
0.914,
|
| 851 |
+
0.89
|
| 852 |
+
],
|
| 853 |
+
"angle": 0,
|
| 854 |
+
"content": "For simple notification, we use \\( H_{t} \\) to represent the resultant \\( t \\)-th iteration Grad-CAM from the PWEM \\( \\mathbf{H}_a \\). Equation (5) delineates the aggregation and refinement process of Grad-CAM representational updation, which entails the combination with Grad-CAM in the penultimate iteration step \\( (t - 1) \\), under the constraint of a zero initial condition"
|
| 855 |
+
}
|
| 856 |
+
],
|
| 857 |
+
[
|
| 858 |
+
{
|
| 859 |
+
"type": "image",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.14,
|
| 862 |
+
0.072,
|
| 863 |
+
0.862,
|
| 864 |
+
0.368
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": null
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "image_caption",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.082,
|
| 873 |
+
0.386,
|
| 874 |
+
0.915,
|
| 875 |
+
0.43
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": "Figure 4: The proposed IGRS (left) and PWEM (right). The mask \\( M_t' \\) is the attention mask for cross-attention layers by dropping the most salient regions of Grad-CAM to zero. PWEM filters the meaningless tokens and augments the Grad-CAM representation from local and global aspects."
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "text",
|
| 882 |
+
"bbox": [
|
| 883 |
+
0.084,
|
| 884 |
+
0.456,
|
| 885 |
+
0.144,
|
| 886 |
+
0.47
|
| 887 |
+
],
|
| 888 |
+
"angle": 0,
|
| 889 |
+
"content": "\\(H_0 = 0\\)"
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "equation",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.178,
|
| 895 |
+
0.47,
|
| 896 |
+
0.478,
|
| 897 |
+
0.489
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": "\\[\nH _ {t} ^ {\\prime} = \\lambda H _ {t - 1} ^ {\\prime} + (1 - \\lambda) \\sigma \\left(H _ {t}\\right), \\tag {5}\n\\]"
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "text",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.082,
|
| 906 |
+
0.495,
|
| 907 |
+
0.48,
|
| 908 |
+
0.608
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": "where \\( H_{t-1}^{\\prime} \\) and \\( H_{t}^{\\prime} \\) are the resultant refined heatmaps from the \\( (t-1) \\)-th and \\( t \\)-th iterations, \\( \\sigma(.) \\) is a sigmoid function to scale the value appropriately, and the hyperparameter \\( \\lambda \\) is a balancing factor. To instruct the model to focus on the region previously not paid attention to, in each iteration, a binary attention mask \\( M_{t} \\) would be generated from the refined Grad-CAM heatmap \\( H_{t} \\) by dropping the most attentive region to 0, as shown in Equation (6)."
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "equation",
|
| 915 |
+
"bbox": [
|
| 916 |
+
0.124,
|
| 917 |
+
0.618,
|
| 918 |
+
0.478,
|
| 919 |
+
0.651
|
| 920 |
+
],
|
| 921 |
+
"angle": 0,
|
| 922 |
+
"content": "\\[\nM _ {t} = \\mathcal {P} \\left(H _ {t}, \\theta\\right), \\mathcal {P} (H, \\theta) = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} \\sigma (H) \\geq \\theta \\\\ 1 & \\text {i f} \\sigma (H) < \\theta , \\end{array} \\right. \\tag {6}\n\\]"
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "text",
|
| 926 |
+
"bbox": [
|
| 927 |
+
0.082,
|
| 928 |
+
0.66,
|
| 929 |
+
0.48,
|
| 930 |
+
0.817
|
| 931 |
+
],
|
| 932 |
+
"angle": 0,
|
| 933 |
+
"content": "where \\(\\mathcal{P}(H,\\theta)\\) represents the process of applying a sigmoid function to stretch the values and then thresholding the result at \\(\\theta\\) to create a binary mask. The binary mask \\(M_{t}\\) is then combined with the previous mask \\(M_{t - 1}^{\\prime}\\) by the logical and operation, \\(M_t' = M_{t - 1}'\\wedge M_t\\), where \\(M_0\\) is a tensor of ones. The \\(\\wedge\\) ensures the model can expand to other regions regardless of the places previously focused. This attention binary mask will be fed into the cross-attention layer of a VLP to mask out the visual regions in embedding \\(v\\), ensuring the text token queries no longer pay attention to the zero regions within the mask."
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"bbox": [
|
| 938 |
+
0.082,
|
| 939 |
+
0.818,
|
| 940 |
+
0.481,
|
| 941 |
+
0.89
|
| 942 |
+
],
|
| 943 |
+
"angle": 0,
|
| 944 |
+
"content": "For an interactive algorithm, the stopping condition is essential. To make the iterative process more flexible, we introduce a dynamic stopping criterion based on the proposed soft ITM score at timestep \\( t \\), \\( S^{(t)} \\), which is calculated as the product of the ITM, from the VLP model and the relevance"
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "text",
|
| 948 |
+
"bbox": [
|
| 949 |
+
0.516,
|
| 950 |
+
0.454,
|
| 951 |
+
0.88,
|
| 952 |
+
0.471
|
| 953 |
+
],
|
| 954 |
+
"angle": 0,
|
| 955 |
+
"content": "score, \\( S^{(t)} = ITM^{(t)} \\cdot R^{(t)} \\), where \\( R^{(t)} \\) is defined by:"
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "equation",
|
| 959 |
+
"bbox": [
|
| 960 |
+
0.598,
|
| 961 |
+
0.478,
|
| 962 |
+
0.913,
|
| 963 |
+
0.515
|
| 964 |
+
],
|
| 965 |
+
"angle": 0,
|
| 966 |
+
"content": "\\[\nR ^ {(t)} = \\frac {\\sum_ {x \\in X} \\sum_ {y \\in Y} \\left(1 - \\widetilde {H} _ {t - 1} ^ {\\prime}\\right)}{X \\times Y}, \\tag {7}\n\\]"
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "text",
|
| 970 |
+
"bbox": [
|
| 971 |
+
0.516,
|
| 972 |
+
0.522,
|
| 973 |
+
0.914,
|
| 974 |
+
0.651
|
| 975 |
+
],
|
| 976 |
+
"angle": 0,
|
| 977 |
+
"content": "where \\(\\widetilde{H}_{t-1}^{\\prime}\\) is the interpolated Grad-CAM heatmap of \\(H_{t-1}^{\\prime}\\) with the same size as the original image with width \\(X\\) and height \\(Y\\). This relevance score measures the average overlooked Grad-CAM intensity. A higher \\(R^{(t)}\\) indicates that there are some regions less attentive to previously, guiding the model to focus on these overlooked areas in the next iteration. If the score for the current iteration \\(S^{(t)}\\) is less than the score from the previous iteration \\(S^{(t-1)}\\), the iterative process is terminated. The total iterative times should not exceed \\(\\nu\\)."
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "title",
|
| 981 |
+
"bbox": [
|
| 982 |
+
0.516,
|
| 983 |
+
0.662,
|
| 984 |
+
0.779,
|
| 985 |
+
0.679
|
| 986 |
+
],
|
| 987 |
+
"angle": 0,
|
| 988 |
+
"content": "Selective Mask Proposal Network"
|
| 989 |
+
},
|
| 990 |
+
{
|
| 991 |
+
"type": "text",
|
| 992 |
+
"bbox": [
|
| 993 |
+
0.516,
|
| 994 |
+
0.681,
|
| 995 |
+
0.913,
|
| 996 |
+
0.806
|
| 997 |
+
],
|
| 998 |
+
"angle": 0,
|
| 999 |
+
"content": "Through the aforementioned steps, we can employ the Grad-CAM indication clue to instruct the Segmentors to predict the referred instance mask. For a given image, the mask proposal network would predict the \\( N_{b} \\) masks but they can not autonomously choose which object mask users refer to by the language. Therefore, the selection module within the network is designed to select the mask indicated by the Grad-CAM, which divides the selection procedures into two phases: the filtering phase and the scoring phase."
|
| 1000 |
+
},
|
| 1001 |
+
{
|
| 1002 |
+
"type": "text",
|
| 1003 |
+
"bbox": [
|
| 1004 |
+
0.516,
|
| 1005 |
+
0.806,
|
| 1006 |
+
0.914,
|
| 1007 |
+
0.89
|
| 1008 |
+
],
|
| 1009 |
+
"angle": 0,
|
| 1010 |
+
"content": "Assuming that the Grad-CAM has successfully localized the instance, the center point of Grad-CAM should be within the inner part of the object. Based on this hypothesis, the selection mechanism is initiated by a preliminary evaluation that involves two main criteria. First, we identify the set of coordinates, \\( \\mathcal{C}_{max} \\), where the heatmap reaches its peak"
|
| 1011 |
+
}
|
| 1012 |
+
],
|
| 1013 |
+
[
|
| 1014 |
+
{
|
| 1015 |
+
"type": "table",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
0.093,
|
| 1018 |
+
0.067,
|
| 1019 |
+
0.907,
|
| 1020 |
+
0.254
|
| 1021 |
+
],
|
| 1022 |
+
"angle": 0,
|
| 1023 |
+
"content": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"4\">RefCOCO</td><td colspan=\"4\">RefCOCO+</td><td colspan=\"3\">RefCOCOg</td><td rowspan=\"2\">Average</td></tr><tr><td>val</td><td>testA</td><td>testB</td><td>avg.</td><td>val</td><td>testA</td><td>testB</td><td>avg.</td><td>val</td><td>test</td><td>avg.</td></tr><tr><td>Zero-shot methods</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>GL-CLIP (Yu, Seo, and Son 2023)</td><td>26.7</td><td>25.0</td><td>26.5</td><td>26.1</td><td>28.2</td><td>26.5</td><td>27.9</td><td>27.5</td><td>33.0</td><td>33.1</td><td>33.1</td><td>28.4</td></tr><tr><td>BSAP (Wang et al. 2024a)</td><td>27.3</td><td>27.0</td><td>27.1</td><td>27.1</td><td>28.7</td><td>27.8</td><td>28.3</td><td>28.3</td><td>34.5</td><td>34.5</td><td>34.5</td><td>29.4</td></tr><tr><td>Region token (Yu, Seo, and Son 2023)</td><td>23.4</td><td>22.1</td><td>24.6</td><td>23.4</td><td>24.5</td><td>22.6</td><td>25.4</td><td>24.2</td><td>27.6</td><td>27.3</td><td>27.5</td><td>24.7</td></tr><tr><td>SAM-CLIP (Ni et al. 2023)</td><td>26.3</td><td>25.8</td><td>26.4</td><td>26.2</td><td>25.7</td><td>28</td><td>26.8</td><td>26.8</td><td>38.8</td><td>38.9</td><td>38.9</td><td>29.6</td></tr><tr><td>Ref-Diff (Ni et al. 2023)</td><td>37.2</td><td>38.4</td><td>37.2</td><td>37.6</td><td>37.3</td><td>40.5</td><td>33</td><td>36.9</td><td>44</td><td>44.5</td><td>44.3</td><td>39.0</td></tr><tr><td>TAS (Suo, Zhu, and Yang 2023)</td><td>39.8</td><td>41.1</td><td>36.2</td><td>39.0</td><td>43.6</td><td>49.1</td><td>36.5</td><td>43.1</td><td>46.6</td><td>46.8</td><td>46.7</td><td>42.5</td></tr><tr><td>CaR (Sun et al. 2024)</td><td>33.6</td><td>35.4</td><td>30.5</td><td>33.0</td><td>34.2</td><td>36.0</td><td>31.0</td><td>33.7</td><td>36.7</td><td>36.6</td><td>36.7</td><td>34.3</td></tr><tr><td>Weakly-supervised methods</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>TSEG (Strudel, Laptev, and Schmid 2022)</td><td>25.4</td><td>-</td><td>-</td><td>-</td><td>22.0</td><td>-</td><td>-</td><td>-</td><td>22.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Chunk (Lee et al. 2023)</td><td>31.1</td><td>32.3</td><td>30.1</td><td>31.8</td><td>31.3</td><td>32.1</td><td>30.1</td><td>31.2</td><td>32.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td>IteRPrimE (ours)</td><td>40.2</td><td>46.5</td><td>33.9</td><td>40.2</td><td>44.2</td><td>51.6</td><td>35.3</td><td>43.7</td><td>46.0</td><td>45.8</td><td>45.9</td><td>42.9</td></tr></table>"
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "table_caption",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
0.082,
|
| 1029 |
+
0.262,
|
| 1030 |
+
0.913,
|
| 1031 |
+
0.293
|
| 1032 |
+
],
|
| 1033 |
+
"angle": 0,
|
| 1034 |
+
"content": "Table 1: Comparison of different methods on different datasets. \"avg.\" denotes the mean performance across various splits within individual datasets, while the terminal \"Average\" column represents the composite mean derived from all dataset splits."
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "text",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
0.082,
|
| 1040 |
+
0.318,
|
| 1041 |
+
0.48,
|
| 1042 |
+
0.43
|
| 1043 |
+
],
|
| 1044 |
+
"angle": 0,
|
| 1045 |
+
"content": "values. Then, the \\(m\\)-th candidate mask \\(B^{m}\\) is examined to determine if it includes at least one activated pixel at any of these coordinates. Second, to ensure the quality of the masks, we apply a connected component labeling technique to constrain the number of connected components in each mask, ensuring that the number of these components does not exceed a predefined threshold of \\(\\kappa\\). The combined criteria for the preliminary evaluation are defined as follows:"
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "equation",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
0.112,
|
| 1051 |
+
0.439,
|
| 1052 |
+
0.478,
|
| 1053 |
+
0.498
|
| 1054 |
+
],
|
| 1055 |
+
"angle": 0,
|
| 1056 |
+
"content": "\\[\n\\begin{array}{l} \\mathcal {A} = \\left\\{m \\in N _ {b} \\mid B _ {(x, y)} ^ {m} \\neq 0, \\exists (x, y) \\in \\mathcal {C} _ {m a x}, \\right\\}, \\\\ \\mathcal {F} = \\left\\{m \\in N _ {b} \\mid g _ {c c} \\left(B ^ {m}\\right) \\leq \\kappa \\right\\}, \\tag {8} \\\\ \\mathcal {D} = \\mathcal {A} \\cap \\mathcal {F}. \\\\ \\end{array}\n\\]"
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "text",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
0.082,
|
| 1062 |
+
0.508,
|
| 1063 |
+
0.48,
|
| 1064 |
+
0.606
|
| 1065 |
+
],
|
| 1066 |
+
"angle": 0,
|
| 1067 |
+
"content": "In the above equations, \\( g_{cc} \\) denotes a function that quantifies the number of connected components within the \\( m \\)-th candidate from total \\( N_b \\) masks. The intersection of sets \\( \\mathcal{A} \\) and \\( \\mathcal{F} \\), denoted as \\( \\mathcal{D} \\), yields the subset of candidate masks that fulfill both the activation and mask quality requirements. This evaluation process filers the irrelevant and empty masks to reduce the computational cost and enhance efficiency."
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "text",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
0.082,
|
| 1073 |
+
0.606,
|
| 1074 |
+
0.48,
|
| 1075 |
+
0.705
|
| 1076 |
+
],
|
| 1077 |
+
"angle": 0,
|
| 1078 |
+
"content": "Subsequent to the preliminary filtering phase, we proceed to evaluate each remaining candidate mask through a weighted scoring mechanism that leverages the Grad-CAM heatmap. This involves computing an element-wise product-based score for each mask concerning the heatmap. We define the score for the \\( j \\)-th candidate mask as \\( Z(j) \\) from the set \\( \\mathcal{D} \\). The scoring process is formulated below:"
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "equation",
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
0.133,
|
| 1084 |
+
0.713,
|
| 1085 |
+
0.478,
|
| 1086 |
+
0.787
|
| 1087 |
+
],
|
| 1088 |
+
"angle": 0,
|
| 1089 |
+
"content": "\\[\n\\begin{array}{l} Z (j) = \\sum_ {x \\in X} \\sum_ {y \\in Y} \\left(B _ {(x, y)} ^ {j} + B _ {(x, y)} ^ {j} \\odot \\widetilde {H} _ {(x, y)} ^ {\\prime}\\right) \\\\ \\hat {Z} (j) = \\frac {Z (j)}{\\sum_ {x \\in X} \\sum_ {y \\in Y} B _ {(x , y)} ^ {j}}, \\quad j \\in \\mathcal {D}. \\tag {9} \\\\ \\end{array}\n\\]"
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "text",
|
| 1093 |
+
"bbox": [
|
| 1094 |
+
0.082,
|
| 1095 |
+
0.796,
|
| 1096 |
+
0.48,
|
| 1097 |
+
0.859
|
| 1098 |
+
],
|
| 1099 |
+
"angle": 0,
|
| 1100 |
+
"content": "where \\(\\widetilde{H}_{(x,y)}^{\\prime}\\) is the final output Grad-CAM of original image size. The final step in our selection process involves identifying the candidate mask with the maximum normalized score, \\(\\hat{Z} (j)\\), as the chosen segmentation output:"
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"type": "equation",
|
| 1104 |
+
"bbox": [
|
| 1105 |
+
0.197,
|
| 1106 |
+
0.868,
|
| 1107 |
+
0.48,
|
| 1108 |
+
0.892
|
| 1109 |
+
],
|
| 1110 |
+
"angle": 0,
|
| 1111 |
+
"content": "\\[\nB _ {\\text {s e l e c t}} = \\arg \\max _ {j \\in \\mathcal {D}} \\hat {Z} (j). \\tag {10}\n\\]"
|
| 1112 |
+
},
|
| 1113 |
+
{
|
| 1114 |
+
"type": "table",
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
0.522,
|
| 1117 |
+
0.316,
|
| 1118 |
+
0.912,
|
| 1119 |
+
0.48
|
| 1120 |
+
],
|
| 1121 |
+
"angle": 0,
|
| 1122 |
+
"content": "<table><tr><td>Method</td><td>Training dataset</td><td>All</td><td>Unseen</td></tr><tr><td rowspan=\"3\">CRIS</td><td>RefCOCO</td><td>15.5</td><td>13.8</td></tr><tr><td>RefCOCO+</td><td>16.3</td><td>14.6</td></tr><tr><td>RefCOCOg</td><td>16.2</td><td>13.9</td></tr><tr><td rowspan=\"3\">LAVT</td><td>RefCOCO</td><td>16.7</td><td>14.4</td></tr><tr><td>RefCOCO+</td><td>16.6</td><td>13.5</td></tr><tr><td>RefCOCOg</td><td>16.1</td><td>13.5</td></tr><tr><td>GL-CLIP</td><td>N/A</td><td>23.6</td><td>23.0</td></tr><tr><td>TAS</td><td>N/A</td><td>25.6</td><td>-</td></tr><tr><td>Ref-Diff</td><td>N/A</td><td>29.4</td><td>-</td></tr><tr><td>IteRPrimE (ours)</td><td>N/A</td><td>38.1</td><td>37.9</td></tr></table>"
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "table_caption",
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
0.516,
|
| 1128 |
+
0.489,
|
| 1129 |
+
0.913,
|
| 1130 |
+
0.518
|
| 1131 |
+
],
|
| 1132 |
+
"angle": 0,
|
| 1133 |
+
"content": "Table 2: Comparison of oIoU on PhraseCut for different supervised and zero-shot methods."
|
| 1134 |
+
},
|
| 1135 |
+
{
|
| 1136 |
+
"type": "text",
|
| 1137 |
+
"bbox": [
|
| 1138 |
+
0.516,
|
| 1139 |
+
0.545,
|
| 1140 |
+
0.914,
|
| 1141 |
+
0.589
|
| 1142 |
+
],
|
| 1143 |
+
"angle": 0,
|
| 1144 |
+
"content": "This approach ensures that the selected mask aligns with the regions of interest highlighted by the Grad-CAM heatmap, thereby ensuring the precision and efficacy of RIS."
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "title",
|
| 1148 |
+
"bbox": [
|
| 1149 |
+
0.66,
|
| 1150 |
+
0.601,
|
| 1151 |
+
0.771,
|
| 1152 |
+
0.618
|
| 1153 |
+
],
|
| 1154 |
+
"angle": 0,
|
| 1155 |
+
"content": "Experiments"
|
| 1156 |
+
},
|
| 1157 |
+
{
|
| 1158 |
+
"type": "title",
|
| 1159 |
+
"bbox": [
|
| 1160 |
+
0.517,
|
| 1161 |
+
0.621,
|
| 1162 |
+
0.692,
|
| 1163 |
+
0.637
|
| 1164 |
+
],
|
| 1165 |
+
"angle": 0,
|
| 1166 |
+
"content": "Experimental Settings"
|
| 1167 |
+
},
|
| 1168 |
+
{
|
| 1169 |
+
"type": "text",
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
0.515,
|
| 1172 |
+
0.639,
|
| 1173 |
+
0.915,
|
| 1174 |
+
0.891
|
| 1175 |
+
],
|
| 1176 |
+
"angle": 0,
|
| 1177 |
+
"content": "Datasets and metrics. We employ the RefCOCO (Nagaraja, Morariu, and Davis 2016), RefCOCO+ (Nagaraja, Morariu, and Davis 2016), RefCOCOg (Kazemzadeh et al. 2014; Mao et al. 2016), and PhraseCut datasets (Wu et al. 2020) for evaluating the proposed zero-shot methods. RefCOCO with shorter expressions (average 1.6 nouns, 3.6 words) contains massive positional phrases (50%), especially those with direct direction clues like \"left\" or \"right\". In contrast, RefCOCO+ focuses on the attribute phrases with the same average expression length. RefCOCOg is a more challenging benchmark that has longer phrases (average 2.8 nouns, 8.4 words) and complex expressions. To verify the effectiveness of the mode in out-of-domains, we adapt our model to the PhraseCut dataset which contains the additional 1271 categories in the test split based on 80 in COCO. Following (Sun et al. 2024; Yu, Seo, and Son 2023; Han et al. 2024), we utilize the mean Intersection over Union (mIoU) for RefCOCO series, a common metric for RIS. Following (Yu,"
|
| 1178 |
+
}
|
| 1179 |
+
],
|
| 1180 |
+
[
|
| 1181 |
+
{
|
| 1182 |
+
"type": "text",
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
0.175,
|
| 1185 |
+
0.068,
|
| 1186 |
+
0.743,
|
| 1187 |
+
0.084
|
| 1188 |
+
],
|
| 1189 |
+
"angle": 0,
|
| 1190 |
+
"content": "(a) Self-correction effect for positional phrases. the smaller bird at the left"
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "image",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
0.197,
|
| 1196 |
+
0.087,
|
| 1197 |
+
0.825,
|
| 1198 |
+
0.141
|
| 1199 |
+
],
|
| 1200 |
+
"angle": 0,
|
| 1201 |
+
"content": null
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "image_caption",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
0.44,
|
| 1207 |
+
0.143,
|
| 1208 |
+
0.478,
|
| 1209 |
+
0.156
|
| 1210 |
+
],
|
| 1211 |
+
"angle": 0,
|
| 1212 |
+
"content": "Ours"
|
| 1213 |
+
},
|
| 1214 |
+
{
|
| 1215 |
+
"type": "text",
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
0.175,
|
| 1218 |
+
0.155,
|
| 1219 |
+
0.823,
|
| 1220 |
+
0.17
|
| 1221 |
+
],
|
| 1222 |
+
"angle": 0,
|
| 1223 |
+
"content": "(b) Strong robustness of out-domain phrases. the arm of the photo not being pictured"
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "image",
|
| 1227 |
+
"bbox": [
|
| 1228 |
+
0.208,
|
| 1229 |
+
0.171,
|
| 1230 |
+
0.816,
|
| 1231 |
+
0.247
|
| 1232 |
+
],
|
| 1233 |
+
"angle": 0,
|
| 1234 |
+
"content": null
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "image_caption",
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
0.44,
|
| 1240 |
+
0.248,
|
| 1241 |
+
0.478,
|
| 1242 |
+
0.26
|
| 1243 |
+
],
|
| 1244 |
+
"angle": 0,
|
| 1245 |
+
"content": "Ours"
|
| 1246 |
+
},
|
| 1247 |
+
{
|
| 1248 |
+
"type": "text",
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
0.177,
|
| 1251 |
+
0.26,
|
| 1252 |
+
0.646,
|
| 1253 |
+
0.276
|
| 1254 |
+
],
|
| 1255 |
+
"angle": 0,
|
| 1256 |
+
"content": "(c) Strong robustness of out-domain categories. metal bridge"
|
| 1257 |
+
},
|
| 1258 |
+
{
|
| 1259 |
+
"type": "image",
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
0.2,
|
| 1262 |
+
0.278,
|
| 1263 |
+
0.825,
|
| 1264 |
+
0.36
|
| 1265 |
+
],
|
| 1266 |
+
"angle": 0,
|
| 1267 |
+
"content": null
|
| 1268 |
+
},
|
| 1269 |
+
{
|
| 1270 |
+
"type": "image_caption",
|
| 1271 |
+
"bbox": [
|
| 1272 |
+
0.082,
|
| 1273 |
+
0.37,
|
| 1274 |
+
0.916,
|
| 1275 |
+
0.414
|
| 1276 |
+
],
|
| 1277 |
+
"angle": 0,
|
| 1278 |
+
"content": "Figure 5: The qualitative comparisons with GL-CLIP. (a) The self-correction effect is brought by our IGRS, especially for positional phrases. (b) For the unseen phrases like \"not\", our model shows better robustness. (c) shows the gathering effect of IteRPrimE with high confidence to select the whole mask instead of a part like GL-CLIP."
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "table",
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
0.084,
|
| 1284 |
+
0.437,
|
| 1285 |
+
0.482,
|
| 1286 |
+
0.521
|
| 1287 |
+
],
|
| 1288 |
+
"angle": 0,
|
| 1289 |
+
"content": "<table><tr><td>Method</td><td>RefCOCO testA</td><td>RefCOCOg test</td></tr><tr><td>Overall Mean</td><td>43.4</td><td>41.3</td></tr><tr><td>GVLP (Shen et al. 2024)</td><td>45.0</td><td>41.9</td></tr><tr><td>Global Augment</td><td>46.5</td><td>45.7</td></tr><tr><td>Local Augment</td><td>45.3</td><td>42.3</td></tr><tr><td>PWEM</td><td>46.5</td><td>45.8</td></tr></table>"
|
| 1290 |
+
},
|
| 1291 |
+
{
|
| 1292 |
+
"type": "table_caption",
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
0.083,
|
| 1295 |
+
0.53,
|
| 1296 |
+
0.48,
|
| 1297 |
+
0.573
|
| 1298 |
+
],
|
| 1299 |
+
"angle": 0,
|
| 1300 |
+
"content": "Table 3: Comparison of methods with different Grad-CAM generation methods on RefCOCO testA and RefCOCOg test datasets."
|
| 1301 |
+
},
|
| 1302 |
+
{
|
| 1303 |
+
"type": "table",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
0.085,
|
| 1306 |
+
0.593,
|
| 1307 |
+
0.485,
|
| 1308 |
+
0.636
|
| 1309 |
+
],
|
| 1310 |
+
"angle": 0,
|
| 1311 |
+
"content": "<table><tr><td>Method</td><td>RefCOCO testA</td><td>RefCOCOg test</td><td>RefCOCOg val</td></tr><tr><td>Mask image</td><td>46.3</td><td>45.3</td><td>45.2</td></tr><tr><td>Mask feature</td><td>46.5</td><td>45.8</td><td>46.0</td></tr></table>"
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"type": "table_caption",
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
0.083,
|
| 1317 |
+
0.644,
|
| 1318 |
+
0.48,
|
| 1319 |
+
0.674
|
| 1320 |
+
],
|
| 1321 |
+
"angle": 0,
|
| 1322 |
+
"content": "Table 4: Performance comparison of masking out the salient regions in the image level and feature level (attention mask)."
|
| 1323 |
+
},
|
| 1324 |
+
{
|
| 1325 |
+
"type": "text",
|
| 1326 |
+
"bbox": [
|
| 1327 |
+
0.083,
|
| 1328 |
+
0.706,
|
| 1329 |
+
0.48,
|
| 1330 |
+
0.734
|
| 1331 |
+
],
|
| 1332 |
+
"angle": 0,
|
| 1333 |
+
"content": "Seo, and Son 2023; Wu et al. 2020), we report the overall Intersection over Union (oIoU) for the PhraseCut dataset."
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "text",
|
| 1337 |
+
"bbox": [
|
| 1338 |
+
0.082,
|
| 1339 |
+
0.736,
|
| 1340 |
+
0.481,
|
| 1341 |
+
0.889
|
| 1342 |
+
],
|
| 1343 |
+
"angle": 0,
|
| 1344 |
+
"content": "Implementation details. We use the commonly used mask proposal network, Mask2Former (Cheng et al. 2022; Liang et al. 2023), to obtain 200 instance-level mask proposals. Following (Shen et al. 2024; Lee et al. 2023), we utilize the base model ALBEF to study the Grad-CAM for localization and it is generated in the 8th cross-attention layer. In processing the input text, a prefatory phrase \"there is a\" is appended. The hyperparameter balancing factor \\(\\lambda\\), upper connecting limit \\(\\kappa\\), iterative number \\(\\nu\\), and binarization threshold \\(\\theta\\) are 0.8, 12, 3, and 0.5, respectively. All experiments are conducted on a 24 GB RTX 3090 GPU."
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "title",
|
| 1348 |
+
"bbox": [
|
| 1349 |
+
0.518,
|
| 1350 |
+
0.439,
|
| 1351 |
+
0.58,
|
| 1352 |
+
0.453
|
| 1353 |
+
],
|
| 1354 |
+
"angle": 0,
|
| 1355 |
+
"content": "Results"
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "text",
|
| 1359 |
+
"bbox": [
|
| 1360 |
+
0.515,
|
| 1361 |
+
0.459,
|
| 1362 |
+
0.914,
|
| 1363 |
+
0.695
|
| 1364 |
+
],
|
| 1365 |
+
"angle": 0,
|
| 1366 |
+
"content": "Main results. As shown in Table 1, IteRPrimE almost achieves the best performance on all three datasets, especially in the testA splits of RefCOCO and RefCOCO+. It outperforms the SOTA TAS method with a \\(0.4\\%\\) average improvement. For all the splits of RefCOCO and RefCOCO+ rich in short positional phrases, our model obtains an average of \\(40.2\\%\\) and \\(43.7\\%\\) compared to the \\(39.0\\%\\) and \\(43.1\\%\\) of TAS, respectively. Therefore, our method is more robust to the positional information compared to the CLIP-based paradigms. However, the model may have the relatively weaker capability of complex expressions shown in RefCOCOg, which can be attributed to the data limitation and gap in the pertaining stage. Additionally, by using the additional captioner of BLIP2 (Li et al. 2023) and SAM (Kirillov et al. 2023), TAS maintains the best performance across some splits, especially for complex phrases, but it has the drawback of low throughput and heavy volumes."
|
| 1367 |
+
},
|
| 1368 |
+
{
|
| 1369 |
+
"type": "text",
|
| 1370 |
+
"bbox": [
|
| 1371 |
+
0.515,
|
| 1372 |
+
0.695,
|
| 1373 |
+
0.915,
|
| 1374 |
+
0.89
|
| 1375 |
+
],
|
| 1376 |
+
"angle": 0,
|
| 1377 |
+
"content": "Zero-shot evaluation on unseen domain. Notably, as shown in Table 2, our model has high capabilities of cross-domain zero-shot transfer compared to other zero-shot SOTA and the existing supervised methods CRIS (Wang et al. 2022b) and LAVT (Yang et al. 2022). IteRPrimE significantly outperforms both kinds of methods in the outdomain scenarios. Upon assessment within a subset of categories not present in the RefCOCO datasets (denoted as the \"Unseen\" column), our model shows the best robustness compared to the supervised methods with huge performance degradation. Notably, the underperformance of the TAS model on this dataset may be attributed to the predominance of complex outdoor scenes within the dataset. In such intricate environments, the reliance on an additional"
|
| 1378 |
+
}
|
| 1379 |
+
],
|
| 1380 |
+
[
|
| 1381 |
+
{
|
| 1382 |
+
"type": "table",
|
| 1383 |
+
"bbox": [
|
| 1384 |
+
0.089,
|
| 1385 |
+
0.067,
|
| 1386 |
+
0.477,
|
| 1387 |
+
0.133
|
| 1388 |
+
],
|
| 1389 |
+
"angle": 0,
|
| 1390 |
+
"content": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">RefCOCOg test</td><td colspan=\"3\">RefCOCO testA</td></tr><tr><td>Position</td><td>Others</td><td>Overall</td><td>Position</td><td>Others</td><td>Overall</td></tr><tr><td>GVLP w/o IGRS</td><td>33.0</td><td>43.6</td><td>41.3</td><td>34.7</td><td>53.2</td><td>44.7</td></tr><tr><td>GVLP w/IGRS</td><td>33.7</td><td>44.3</td><td>41.9</td><td>35.1</td><td>53.6</td><td>45.0</td></tr><tr><td>PWEM w/o IGRS</td><td>36.4</td><td>47.5</td><td>45.1</td><td>36.1</td><td>54.8</td><td>46.1</td></tr><tr><td>PWEM w/IGRS</td><td>37.4</td><td>48.2</td><td>45.8</td><td>36.5</td><td>55.0</td><td>46.5</td></tr></table>"
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "table_caption",
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
0.084,
|
| 1396 |
+
0.142,
|
| 1397 |
+
0.479,
|
| 1398 |
+
0.158
|
| 1399 |
+
],
|
| 1400 |
+
"angle": 0,
|
| 1401 |
+
"content": "Table 5: Ablation studies of the proposed PWEM and IGRS."
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "text",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
0.082,
|
| 1407 |
+
0.186,
|
| 1408 |
+
0.48,
|
| 1409 |
+
0.311
|
| 1410 |
+
],
|
| 1411 |
+
"angle": 0,
|
| 1412 |
+
"content": "captioning model for annotation by TAS could potentially introduce greater noise, thereby compromising the model's performance. However, facing complex environmental contexts, our model's efficacy in localizing pertinent regions is attributed to its retention of spatial perception. Concurrently, the integration of IGRS and PWEM has further bolstered IteRPrimE's proficiency in addressing the complicated interrelationships among objects within the scene, thereby leading to this commendable performance."
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "text",
|
| 1416 |
+
"bbox": [
|
| 1417 |
+
0.082,
|
| 1418 |
+
0.312,
|
| 1419 |
+
0.481,
|
| 1420 |
+
0.479
|
| 1421 |
+
],
|
| 1422 |
+
"angle": 0,
|
| 1423 |
+
"content": "Qualitative comparisons. Figure 5 shows the comparisons with GL-CLIP (Yu, Seo, and Son 2023). First, we demonstrate that our IGRS module possesses a self-corrective mechanism, the same answer as GL-CLIP initially before refining its predictions by revisiting initially overlooked regions. In Figure 5 (b), the scarcity of such negative phrases in the training set is offset by our model's robustness. Finally, we address the limited highlighted region of initial Grad-CAM representation by the IGRS, demonstrated in Figure 5 (c). The more gathering of the Grad-CAM, the more likelihood that the correct instance mask will be selected instead of the part."
|
| 1424 |
+
},
|
| 1425 |
+
{
|
| 1426 |
+
"type": "title",
|
| 1427 |
+
"bbox": [
|
| 1428 |
+
0.084,
|
| 1429 |
+
0.492,
|
| 1430 |
+
0.206,
|
| 1431 |
+
0.509
|
| 1432 |
+
],
|
| 1433 |
+
"angle": 0,
|
| 1434 |
+
"content": "Ablation Study"
|
| 1435 |
+
},
|
| 1436 |
+
{
|
| 1437 |
+
"type": "text",
|
| 1438 |
+
"bbox": [
|
| 1439 |
+
0.082,
|
| 1440 |
+
0.513,
|
| 1441 |
+
0.48,
|
| 1442 |
+
0.735
|
| 1443 |
+
],
|
| 1444 |
+
"angle": 0,
|
| 1445 |
+
"content": "Effect of PWEM. According to Equation (2), the mean operation is essential for the generation of Grad-CAM and deeply influences the Grad-CAM representational accuracy. Therefore, Table 3 presents the results of the ablation study, examining the impact of various aggregation configurations for Grad-CAM generation. The \"Overall Mean\" is the direct mean of all the tokens' Grad-CAM, but the GVLP uses the selected effective tokens for averaging (Shen et al. 2024). The remaining is introduced before as shown in Equation (3) and Equation (4). Compared to the previous methods, the proposed PWEM can significantly improve the performances because it can save the examples that fail due to the weak complex semantic understanding between the main word and the other contexts. Additionally, global augmentation shows stronger potential than local because it could dominate the effect during aggregation."
|
| 1446 |
+
},
|
| 1447 |
+
{
|
| 1448 |
+
"type": "text",
|
| 1449 |
+
"bbox": [
|
| 1450 |
+
0.082,
|
| 1451 |
+
0.736,
|
| 1452 |
+
0.48,
|
| 1453 |
+
0.846
|
| 1454 |
+
],
|
| 1455 |
+
"angle": 0,
|
| 1456 |
+
"content": "Effect of mask position in IGRS. Table 4 evaluates the position that the binary mask \\( M \\) applied. \"Mask image\" means adding the mask into the original image so that the indicated regions are masked out, similar to GL-CLIP. However, this can degrade the performance due to the absence of relative relationships of regions. Our method for attention masking in the cross-attention layer is more robust, with improvement on all three splits."
|
| 1457 |
+
},
|
| 1458 |
+
{
|
| 1459 |
+
"type": "text",
|
| 1460 |
+
"bbox": [
|
| 1461 |
+
0.082,
|
| 1462 |
+
0.847,
|
| 1463 |
+
0.48,
|
| 1464 |
+
0.89
|
| 1465 |
+
],
|
| 1466 |
+
"angle": 0,
|
| 1467 |
+
"content": "Effect of our proposed PWEM and IGRS. Table 5 evaluates the performance improvements achieved by integrating different modules within our methodology. The \"Posi"
|
| 1468 |
+
},
|
| 1469 |
+
{
|
| 1470 |
+
"type": "image",
|
| 1471 |
+
"bbox": [
|
| 1472 |
+
0.607,
|
| 1473 |
+
0.068,
|
| 1474 |
+
0.824,
|
| 1475 |
+
0.16
|
| 1476 |
+
],
|
| 1477 |
+
"angle": 0,
|
| 1478 |
+
"content": null
|
| 1479 |
+
},
|
| 1480 |
+
{
|
| 1481 |
+
"type": "image_caption",
|
| 1482 |
+
"bbox": [
|
| 1483 |
+
0.648,
|
| 1484 |
+
0.161,
|
| 1485 |
+
0.78,
|
| 1486 |
+
0.174
|
| 1487 |
+
],
|
| 1488 |
+
"angle": 0,
|
| 1489 |
+
"content": "(a) Iteration times"
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "image",
|
| 1493 |
+
"bbox": [
|
| 1494 |
+
0.613,
|
| 1495 |
+
0.174,
|
| 1496 |
+
0.822,
|
| 1497 |
+
0.266
|
| 1498 |
+
],
|
| 1499 |
+
"angle": 0,
|
| 1500 |
+
"content": null
|
| 1501 |
+
},
|
| 1502 |
+
{
|
| 1503 |
+
"type": "image_caption",
|
| 1504 |
+
"bbox": [
|
| 1505 |
+
0.699,
|
| 1506 |
+
0.268,
|
| 1507 |
+
0.735,
|
| 1508 |
+
0.282
|
| 1509 |
+
],
|
| 1510 |
+
"angle": 0,
|
| 1511 |
+
"content": "(b) \\(\\lambda\\)"
|
| 1512 |
+
},
|
| 1513 |
+
{
|
| 1514 |
+
"type": "image_caption",
|
| 1515 |
+
"bbox": [
|
| 1516 |
+
0.551,
|
| 1517 |
+
0.294,
|
| 1518 |
+
0.878,
|
| 1519 |
+
0.31
|
| 1520 |
+
],
|
| 1521 |
+
"angle": 0,
|
| 1522 |
+
"content": "Figure 6: The line charts of two hyperparameters."
|
| 1523 |
+
},
|
| 1524 |
+
{
|
| 1525 |
+
"type": "text",
|
| 1526 |
+
"bbox": [
|
| 1527 |
+
0.515,
|
| 1528 |
+
0.338,
|
| 1529 |
+
0.913,
|
| 1530 |
+
0.435
|
| 1531 |
+
],
|
| 1532 |
+
"angle": 0,
|
| 1533 |
+
"content": "tion” category encompasses those test samples that explicitly feature positional expressions. Conversely, the “Others” category serves as the complement. These results demonstrate that our modules not only improve general performance but also enhance the model’s ability to manage complex semantic and spatial relations, particularly in positional contexts."
|
| 1534 |
+
},
|
| 1535 |
+
{
|
| 1536 |
+
"type": "text",
|
| 1537 |
+
"bbox": [
|
| 1538 |
+
0.515,
|
| 1539 |
+
0.436,
|
| 1540 |
+
0.914,
|
| 1541 |
+
0.617
|
| 1542 |
+
],
|
| 1543 |
+
"angle": 0,
|
| 1544 |
+
"content": "Different assembly of iteration times and \\(\\lambda\\). Figure 6 presents the ablation study of two hyperparameters in IGRS, analyzing the effect of varying iteration times and \\(\\lambda\\) on the RefCOCO testA dataset. Figure 6 (a) shows that as the number of iterations increases from 1 to 3, the metric improves, peaking at \\(46.5\\%\\). However, beyond three iterations, the performance change becomes minimal. Therefore, selecting 3 iterations is optimal for balancing performance and time efficiency. Figure 6 (b) presents another line chart analyzing the impact of \\(\\lambda\\) in the Grad-CAM updation. The metric increases as \\(\\lambda\\) is gradually raised from 0 to 0.2 while exceeding this point, performance declines with higher alpha values. Overall, the optimal value of \\(\\lambda\\) is 0.2."
|
| 1545 |
+
},
|
| 1546 |
+
{
|
| 1547 |
+
"type": "title",
|
| 1548 |
+
"bbox": [
|
| 1549 |
+
0.666,
|
| 1550 |
+
0.632,
|
| 1551 |
+
0.765,
|
| 1552 |
+
0.647
|
| 1553 |
+
],
|
| 1554 |
+
"angle": 0,
|
| 1555 |
+
"content": "Conclusion"
|
| 1556 |
+
},
|
| 1557 |
+
{
|
| 1558 |
+
"type": "text",
|
| 1559 |
+
"bbox": [
|
| 1560 |
+
0.515,
|
| 1561 |
+
0.653,
|
| 1562 |
+
0.915,
|
| 1563 |
+
0.89
|
| 1564 |
+
],
|
| 1565 |
+
"angle": 0,
|
| 1566 |
+
"content": "This paper presents IteRPrimE, a novel framework for Zeroshot Referring Image Segmentation (RIS), addressing the limitations of previous methods in handling positional sensitivity and complex semantic relationships. By incorporating an Iterative Grad-CAM Refinement Strategy (IGRS) and a Primary Word Emphasis Module (PWEM), IteRPrimE enhances the model's ability to accurately focus on target regions and manage semantic nuances. Extensive experiments on RefCOCO \\(+ / + \\mathrm{g}\\) and PhraseCut benchmarks demonstrate that IteRPrimE significantly outperforms previous state-of-the-art zero-shot methods, particularly in out-of-domain contexts. These findings highlight the framework's potential to advance zero-shot RIS by improving model sensitivity to positional and semantic details. Future research endeavors may seek to extend the Grad-CAM-guided RIS paradigm to encompass all segmentation tasks across varying levels of granularity with linguistic directives."
|
| 1567 |
+
}
|
| 1568 |
+
],
|
| 1569 |
+
[
|
| 1570 |
+
{
|
| 1571 |
+
"type": "title",
|
| 1572 |
+
"bbox": [
|
| 1573 |
+
0.204,
|
| 1574 |
+
0.068,
|
| 1575 |
+
0.36,
|
| 1576 |
+
0.084
|
| 1577 |
+
],
|
| 1578 |
+
"angle": 0,
|
| 1579 |
+
"content": "Acknowledgments"
|
| 1580 |
+
},
|
| 1581 |
+
{
|
| 1582 |
+
"type": "text",
|
| 1583 |
+
"bbox": [
|
| 1584 |
+
0.084,
|
| 1585 |
+
0.089,
|
| 1586 |
+
0.481,
|
| 1587 |
+
0.119
|
| 1588 |
+
],
|
| 1589 |
+
"angle": 0,
|
| 1590 |
+
"content": "This work was supported by Shenzhen Science and Technology Program under Grant CJGJZD20220517142402006."
|
| 1591 |
+
},
|
| 1592 |
+
{
|
| 1593 |
+
"type": "title",
|
| 1594 |
+
"bbox": [
|
| 1595 |
+
0.235,
|
| 1596 |
+
0.133,
|
| 1597 |
+
0.331,
|
| 1598 |
+
0.149
|
| 1599 |
+
],
|
| 1600 |
+
"angle": 0,
|
| 1601 |
+
"content": "References"
|
| 1602 |
+
},
|
| 1603 |
+
{
|
| 1604 |
+
"type": "ref_text",
|
| 1605 |
+
"bbox": [
|
| 1606 |
+
0.085,
|
| 1607 |
+
0.154,
|
| 1608 |
+
0.48,
|
| 1609 |
+
0.196
|
| 1610 |
+
],
|
| 1611 |
+
"angle": 0,
|
| 1612 |
+
"content": "Bai, S.; Liu, Y.; Han, Y.; Zhang, H.; and Tang, Y. 2024. Self-calibrated clip for training-free open-vocabulary segmentation. arXiv preprint arXiv:2411.15869."
|
| 1613 |
+
},
|
| 1614 |
+
{
|
| 1615 |
+
"type": "ref_text",
|
| 1616 |
+
"bbox": [
|
| 1617 |
+
0.085,
|
| 1618 |
+
0.2,
|
| 1619 |
+
0.482,
|
| 1620 |
+
0.242
|
| 1621 |
+
],
|
| 1622 |
+
"angle": 0,
|
| 1623 |
+
"content": "Chang, H.; Zhang, H.; Jiang, L.; Liu, C.; and Freeman, W. T. 2022. Maskgit: Masked generative image transformer. In CVPR, 11315-11325."
|
| 1624 |
+
},
|
| 1625 |
+
{
|
| 1626 |
+
"type": "ref_text",
|
| 1627 |
+
"bbox": [
|
| 1628 |
+
0.085,
|
| 1629 |
+
0.246,
|
| 1630 |
+
0.48,
|
| 1631 |
+
0.289
|
| 1632 |
+
],
|
| 1633 |
+
"angle": 0,
|
| 1634 |
+
"content": "Cheng, B.; Misra, I.; Schwing, A. G.; Kirillov, A.; and Girdhar, R. 2022. Masked-attention mask transformer for universal image segmentation. In CVPR, 1290–1299."
|
| 1635 |
+
},
|
| 1636 |
+
{
|
| 1637 |
+
"type": "ref_text",
|
| 1638 |
+
"bbox": [
|
| 1639 |
+
0.085,
|
| 1640 |
+
0.293,
|
| 1641 |
+
0.48,
|
| 1642 |
+
0.335
|
| 1643 |
+
],
|
| 1644 |
+
"angle": 0,
|
| 1645 |
+
"content": "Devlin, J.; Chang, M.-W.; Lee, K.; and Toutanova, K. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805."
|
| 1646 |
+
},
|
| 1647 |
+
{
|
| 1648 |
+
"type": "ref_text",
|
| 1649 |
+
"bbox": [
|
| 1650 |
+
0.085,
|
| 1651 |
+
0.339,
|
| 1652 |
+
0.48,
|
| 1653 |
+
0.381
|
| 1654 |
+
],
|
| 1655 |
+
"angle": 0,
|
| 1656 |
+
"content": "Ding, H.; Liu, C.; He, S.; Jiang, X.; and Loy, C. C. 2023. MeViS: A large-scale benchmark for video segmentation with motion expressions. In CVPR, 2694-2703."
|
| 1657 |
+
},
|
| 1658 |
+
{
|
| 1659 |
+
"type": "ref_text",
|
| 1660 |
+
"bbox": [
|
| 1661 |
+
0.085,
|
| 1662 |
+
0.385,
|
| 1663 |
+
0.48,
|
| 1664 |
+
0.427
|
| 1665 |
+
],
|
| 1666 |
+
"angle": 0,
|
| 1667 |
+
"content": "Ding, H.; Liu, C.; Wang, S.; and Jiang, X. 2021. Vision-language transformer and query generation for referring segmentation. In ICCV, 16321-16330."
|
| 1668 |
+
},
|
| 1669 |
+
{
|
| 1670 |
+
"type": "ref_text",
|
| 1671 |
+
"bbox": [
|
| 1672 |
+
0.085,
|
| 1673 |
+
0.431,
|
| 1674 |
+
0.48,
|
| 1675 |
+
0.46
|
| 1676 |
+
],
|
| 1677 |
+
"angle": 0,
|
| 1678 |
+
"content": "Ding, J.; Xue, N.; Xia, G.-S.; and Dai, D. 2022. Decoupling zero-shot semantic segmentation. In CVPR, 11583-11592."
|
| 1679 |
+
},
|
| 1680 |
+
{
|
| 1681 |
+
"type": "ref_text",
|
| 1682 |
+
"bbox": [
|
| 1683 |
+
0.085,
|
| 1684 |
+
0.464,
|
| 1685 |
+
0.48,
|
| 1686 |
+
0.52
|
| 1687 |
+
],
|
| 1688 |
+
"angle": 0,
|
| 1689 |
+
"content": "Han, K.; Liu, Y.; Liew, J. H.; Ding, H.; Liu, J.; Wang, Y.; Tang, Y.; Yang, Y.; Feng, J.; Zhao, Y.; et al. 2023. Global knowledge calibration for fast open-vocabulary segmentation. In CVPR, 797-807."
|
| 1690 |
+
},
|
| 1691 |
+
{
|
| 1692 |
+
"type": "ref_text",
|
| 1693 |
+
"bbox": [
|
| 1694 |
+
0.085,
|
| 1695 |
+
0.524,
|
| 1696 |
+
0.48,
|
| 1697 |
+
0.567
|
| 1698 |
+
],
|
| 1699 |
+
"angle": 0,
|
| 1700 |
+
"content": "Han, Z.; Zhu, F.; Lao, Q.; and Jiang, H. 2024. Zero-shot referring expression comprehension via structural similarity between images and captions. In CVPR, 14364-14374."
|
| 1701 |
+
},
|
| 1702 |
+
{
|
| 1703 |
+
"type": "ref_text",
|
| 1704 |
+
"bbox": [
|
| 1705 |
+
0.085,
|
| 1706 |
+
0.57,
|
| 1707 |
+
0.48,
|
| 1708 |
+
0.613
|
| 1709 |
+
],
|
| 1710 |
+
"angle": 0,
|
| 1711 |
+
"content": "He, S.; Guo, T.; Dai, T.; Qiao, R.; Wu, C.; Shu, X.; and Ren, B. 2022. VLMAE: Vision-language masked autoencoder. arXiv preprint arXiv:2208.09374."
|
| 1712 |
+
},
|
| 1713 |
+
{
|
| 1714 |
+
"type": "ref_text",
|
| 1715 |
+
"bbox": [
|
| 1716 |
+
0.085,
|
| 1717 |
+
0.616,
|
| 1718 |
+
0.48,
|
| 1719 |
+
0.658
|
| 1720 |
+
],
|
| 1721 |
+
"angle": 0,
|
| 1722 |
+
"content": "Jing, Y.; Kong, T.; Wang, W.; Wang, L.; Li, L.; and Tan, T. 2021. Locate then segment: A strong pipeline for referring image segmentation. In CVPR, 9858-9867."
|
| 1723 |
+
},
|
| 1724 |
+
{
|
| 1725 |
+
"type": "ref_text",
|
| 1726 |
+
"bbox": [
|
| 1727 |
+
0.085,
|
| 1728 |
+
0.662,
|
| 1729 |
+
0.48,
|
| 1730 |
+
0.704
|
| 1731 |
+
],
|
| 1732 |
+
"angle": 0,
|
| 1733 |
+
"content": "Kazemzadeh, S.; Ordonez, V.; Matten, M.; and Berg, T. 2014. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 787-798."
|
| 1734 |
+
},
|
| 1735 |
+
{
|
| 1736 |
+
"type": "ref_text",
|
| 1737 |
+
"bbox": [
|
| 1738 |
+
0.085,
|
| 1739 |
+
0.709,
|
| 1740 |
+
0.48,
|
| 1741 |
+
0.75
|
| 1742 |
+
],
|
| 1743 |
+
"angle": 0,
|
| 1744 |
+
"content": "Kim, N.; Kim, D.; Lan, C.; Zeng, W.; and Kwak, S. 2022. Restr: Convolution-free referring image segmentation using transformers. In CVPR, 18145-18154."
|
| 1745 |
+
},
|
| 1746 |
+
{
|
| 1747 |
+
"type": "ref_text",
|
| 1748 |
+
"bbox": [
|
| 1749 |
+
0.085,
|
| 1750 |
+
0.755,
|
| 1751 |
+
0.48,
|
| 1752 |
+
0.797
|
| 1753 |
+
],
|
| 1754 |
+
"angle": 0,
|
| 1755 |
+
"content": "Kirillov, A.; Mintun, E.; Ravi, N.; Mao, H.; Rolland, C.; Gustafson, L.; Xiao, T.; Whitehead, S.; Berg, A. C.; Lo, W.-Y.; et al. 2023. Segment anything. In ICCV, 4015-4026."
|
| 1756 |
+
},
|
| 1757 |
+
{
|
| 1758 |
+
"type": "ref_text",
|
| 1759 |
+
"bbox": [
|
| 1760 |
+
0.085,
|
| 1761 |
+
0.801,
|
| 1762 |
+
0.48,
|
| 1763 |
+
0.843
|
| 1764 |
+
],
|
| 1765 |
+
"angle": 0,
|
| 1766 |
+
"content": "Lai, X.; Tian, Z.; Chen, Y.; Li, Y.; Yuan, Y.; Liu, S.; and Jia, J. 2024. Lisa: Reasoning segmentation via large language model. In CVPR, 9579-9589."
|
| 1767 |
+
},
|
| 1768 |
+
{
|
| 1769 |
+
"type": "ref_text",
|
| 1770 |
+
"bbox": [
|
| 1771 |
+
0.085,
|
| 1772 |
+
0.847,
|
| 1773 |
+
0.48,
|
| 1774 |
+
0.89
|
| 1775 |
+
],
|
| 1776 |
+
"angle": 0,
|
| 1777 |
+
"content": "Lee, J.; Lee, S.; Nam, J.; Yu, S.; Do, J.; and Taghavi, T. 2023. Weakly supervised referring image segmentation with intra-chunk and inter-chunk consistency. In ICCV, 21870-21881."
|
| 1778 |
+
},
|
| 1779 |
+
{
|
| 1780 |
+
"type": "list",
|
| 1781 |
+
"bbox": [
|
| 1782 |
+
0.085,
|
| 1783 |
+
0.154,
|
| 1784 |
+
0.482,
|
| 1785 |
+
0.89
|
| 1786 |
+
],
|
| 1787 |
+
"angle": 0,
|
| 1788 |
+
"content": null
|
| 1789 |
+
},
|
| 1790 |
+
{
|
| 1791 |
+
"type": "ref_text",
|
| 1792 |
+
"bbox": [
|
| 1793 |
+
0.518,
|
| 1794 |
+
0.069,
|
| 1795 |
+
0.913,
|
| 1796 |
+
0.126
|
| 1797 |
+
],
|
| 1798 |
+
"angle": 0,
|
| 1799 |
+
"content": "Li, C.; Xu, H.; Tian, J.; Wang, W.; Yan, M.; Bi, B.; Ye, J.; Chen, H.; Xu, G.; Cao, Z.; et al. 2022. mplug: Effective and efficient vision-language learning by cross-modal skip-connections. arXiv preprint arXiv:2205.12005."
|
| 1800 |
+
},
|
| 1801 |
+
{
|
| 1802 |
+
"type": "ref_text",
|
| 1803 |
+
"bbox": [
|
| 1804 |
+
0.518,
|
| 1805 |
+
0.129,
|
| 1806 |
+
0.913,
|
| 1807 |
+
0.183
|
| 1808 |
+
],
|
| 1809 |
+
"angle": 0,
|
| 1810 |
+
"content": "Li, J.; Li, D.; Savarese, S.; and Hoi, S. 2023. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 19730–19742. PMLR."
|
| 1811 |
+
},
|
| 1812 |
+
{
|
| 1813 |
+
"type": "ref_text",
|
| 1814 |
+
"bbox": [
|
| 1815 |
+
0.518,
|
| 1816 |
+
0.188,
|
| 1817 |
+
0.913,
|
| 1818 |
+
0.257
|
| 1819 |
+
],
|
| 1820 |
+
"angle": 0,
|
| 1821 |
+
"content": "Li, J.; Selvaraju, R.; Gotmare, A.; Joty, S.; Xiong, C.; and Hoi, S. C. H. 2021. Align before fuse: Vision and language representation learning with momentum distillation. Advances in neural information processing systems, 34: 9694-9705."
|
| 1822 |
+
},
|
| 1823 |
+
{
|
| 1824 |
+
"type": "ref_text",
|
| 1825 |
+
"bbox": [
|
| 1826 |
+
0.518,
|
| 1827 |
+
0.261,
|
| 1828 |
+
0.913,
|
| 1829 |
+
0.316
|
| 1830 |
+
],
|
| 1831 |
+
"angle": 0,
|
| 1832 |
+
"content": "Liang, F.; Wu, B.; Dai, X.; Li, K.; Zhao, Y.; Zhang, H.; Zhang, P.; Vajda, P.; and Marculescu, D. 2023. Open-vocabulary semantic segmentation with mask-adapted clip. In CVPR, 7061-7070."
|
| 1833 |
+
},
|
| 1834 |
+
{
|
| 1835 |
+
"type": "ref_text",
|
| 1836 |
+
"bbox": [
|
| 1837 |
+
0.518,
|
| 1838 |
+
0.32,
|
| 1839 |
+
0.913,
|
| 1840 |
+
0.349
|
| 1841 |
+
],
|
| 1842 |
+
"angle": 0,
|
| 1843 |
+
"content": "Liu, C.; Ding, H.; and Jiang, X. 2023. Gres: Generalized referring expression segmentation. In CVPR, 23592-23601."
|
| 1844 |
+
},
|
| 1845 |
+
{
|
| 1846 |
+
"type": "ref_text",
|
| 1847 |
+
"bbox": [
|
| 1848 |
+
0.518,
|
| 1849 |
+
0.352,
|
| 1850 |
+
0.913,
|
| 1851 |
+
0.394
|
| 1852 |
+
],
|
| 1853 |
+
"angle": 0,
|
| 1854 |
+
"content": "Liu, R.; Liu, C.; Bai, Y.; and Yuille, A. L. 2019. Clevr-ref+: Diagnosing visual reasoning with referring expressions. In CVPR, 4185-4194."
|
| 1855 |
+
},
|
| 1856 |
+
{
|
| 1857 |
+
"type": "ref_text",
|
| 1858 |
+
"bbox": [
|
| 1859 |
+
0.518,
|
| 1860 |
+
0.397,
|
| 1861 |
+
0.913,
|
| 1862 |
+
0.439
|
| 1863 |
+
],
|
| 1864 |
+
"angle": 0,
|
| 1865 |
+
"content": "Liu, Y.; Bai, S.; Li, G.; Wang, Y.; and Tang, Y. 2024a. Open-vocabulary segmentation with semantic-assisted calibration. In CVPR, 3491-3500."
|
| 1866 |
+
},
|
| 1867 |
+
{
|
| 1868 |
+
"type": "ref_text",
|
| 1869 |
+
"bbox": [
|
| 1870 |
+
0.518,
|
| 1871 |
+
0.443,
|
| 1872 |
+
0.913,
|
| 1873 |
+
0.486
|
| 1874 |
+
],
|
| 1875 |
+
"angle": 0,
|
| 1876 |
+
"content": "Liu, Y.; Zhang, C.; Wang, Y.; Wang, J.; Yang, Y.; and Tang, Y. 2024b. Universal segmentation at arbitrary granularity with language instruction. In CVPR, 3459-3469."
|
| 1877 |
+
},
|
| 1878 |
+
{
|
| 1879 |
+
"type": "ref_text",
|
| 1880 |
+
"bbox": [
|
| 1881 |
+
0.518,
|
| 1882 |
+
0.489,
|
| 1883 |
+
0.913,
|
| 1884 |
+
0.531
|
| 1885 |
+
],
|
| 1886 |
+
"angle": 0,
|
| 1887 |
+
"content": "Luo, J.; Khandelwal, S.; Sigal, L.; and Li, B. 2024a. Emergent Open-Vocabulary Semantic Segmentation from Off-the-shelf Vision-Language Models. In CVPR, 4029-4040."
|
| 1888 |
+
},
|
| 1889 |
+
{
|
| 1890 |
+
"type": "ref_text",
|
| 1891 |
+
"bbox": [
|
| 1892 |
+
0.518,
|
| 1893 |
+
0.533,
|
| 1894 |
+
0.913,
|
| 1895 |
+
0.59
|
| 1896 |
+
],
|
| 1897 |
+
"angle": 0,
|
| 1898 |
+
"content": "Luo, Z.; Xiao, Y.; Liu, Y.; Li, S.; Wang, Y.; Tang, Y.; Li, X.; and Yang, Y. 2024b. Soc: Semantic-assisted object cluster for referring video object segmentation. Advances in Neural Information Processing Systems, 36."
|
| 1899 |
+
},
|
| 1900 |
+
{
|
| 1901 |
+
"type": "ref_text",
|
| 1902 |
+
"bbox": [
|
| 1903 |
+
0.518,
|
| 1904 |
+
0.593,
|
| 1905 |
+
0.913,
|
| 1906 |
+
0.636
|
| 1907 |
+
],
|
| 1908 |
+
"angle": 0,
|
| 1909 |
+
"content": "Mao, J.; Huang, J.; Toshev, A.; Camburu, O.; Yuille, A. L.; and Murphy, K. 2016. Generation and comprehension of unambiguous object descriptions. In CVPR, 11-20."
|
| 1910 |
+
},
|
| 1911 |
+
{
|
| 1912 |
+
"type": "ref_text",
|
| 1913 |
+
"bbox": [
|
| 1914 |
+
0.518,
|
| 1915 |
+
0.639,
|
| 1916 |
+
0.913,
|
| 1917 |
+
0.681
|
| 1918 |
+
],
|
| 1919 |
+
"angle": 0,
|
| 1920 |
+
"content": "Nagaraja, V. K.; Morariu, V. I.; and Davis, L. S. 2016. Modeling context between objects for referring expression understanding. In ECCV, 792-807. Springer."
|
| 1921 |
+
},
|
| 1922 |
+
{
|
| 1923 |
+
"type": "ref_text",
|
| 1924 |
+
"bbox": [
|
| 1925 |
+
0.518,
|
| 1926 |
+
0.684,
|
| 1927 |
+
0.913,
|
| 1928 |
+
0.726
|
| 1929 |
+
],
|
| 1930 |
+
"angle": 0,
|
| 1931 |
+
"content": "Ni, M.; Zhang, Y.; Feng, K.; Li, X.; Guo, Y.; and Zuo, W. 2023. Ref-diff: Zero-shot referring image segmentation with generative models. arXiv preprint arXiv:2308.16777."
|
| 1932 |
+
},
|
| 1933 |
+
{
|
| 1934 |
+
"type": "ref_text",
|
| 1935 |
+
"bbox": [
|
| 1936 |
+
0.518,
|
| 1937 |
+
0.729,
|
| 1938 |
+
0.913,
|
| 1939 |
+
0.785
|
| 1940 |
+
],
|
| 1941 |
+
"angle": 0,
|
| 1942 |
+
"content": "Radford, A.; Kim, J. W.; Hallacy, C.; Ramesh, A.; Goh, G.; Agarwal, S.; Sastry, G.; Askell, A.; Mishkin, P.; Clark, J.; et al. 2021. Learning transferable visual models from natural language supervision. In ICML, 8748-8763. PMLR."
|
| 1943 |
+
},
|
| 1944 |
+
{
|
| 1945 |
+
"type": "ref_text",
|
| 1946 |
+
"bbox": [
|
| 1947 |
+
0.518,
|
| 1948 |
+
0.788,
|
| 1949 |
+
0.913,
|
| 1950 |
+
0.83
|
| 1951 |
+
],
|
| 1952 |
+
"angle": 0,
|
| 1953 |
+
"content": "Rombach, R.; Blattmann, A.; Lorenz, D.; Esser, P.; and Omer, B. 2022. High-resolution image synthesis with latent diffusion models. In CVPR, 10684-10695."
|
| 1954 |
+
},
|
| 1955 |
+
{
|
| 1956 |
+
"type": "ref_text",
|
| 1957 |
+
"bbox": [
|
| 1958 |
+
0.518,
|
| 1959 |
+
0.834,
|
| 1960 |
+
0.913,
|
| 1961 |
+
0.889
|
| 1962 |
+
],
|
| 1963 |
+
"angle": 0,
|
| 1964 |
+
"content": "Selvaraju, R. R.; Cogswell, M.; Das, A.; Vedantam, R.; Parikh, D.; and Batra, D. 2017. Grad-cam: Visual explanations from deep networks via gradient-based localization. In CVPR, 618-626."
|
| 1965 |
+
},
|
| 1966 |
+
{
|
| 1967 |
+
"type": "list",
|
| 1968 |
+
"bbox": [
|
| 1969 |
+
0.518,
|
| 1970 |
+
0.069,
|
| 1971 |
+
0.913,
|
| 1972 |
+
0.889
|
| 1973 |
+
],
|
| 1974 |
+
"angle": 0,
|
| 1975 |
+
"content": null
|
| 1976 |
+
}
|
| 1977 |
+
],
|
| 1978 |
+
[
|
| 1979 |
+
{
|
| 1980 |
+
"type": "ref_text",
|
| 1981 |
+
"bbox": [
|
| 1982 |
+
0.084,
|
| 1983 |
+
0.069,
|
| 1984 |
+
0.48,
|
| 1985 |
+
0.111
|
| 1986 |
+
],
|
| 1987 |
+
"angle": 0,
|
| 1988 |
+
"content": "Shah, N. A.; VS, V.; and Patel, V. M. 2024. LQMFormer: Language-aware Query Mask Transformer for Referring Image Segmentation. In CVPR, 12903-12913."
|
| 1989 |
+
},
|
| 1990 |
+
{
|
| 1991 |
+
"type": "ref_text",
|
| 1992 |
+
"bbox": [
|
| 1993 |
+
0.085,
|
| 1994 |
+
0.114,
|
| 1995 |
+
0.48,
|
| 1996 |
+
0.17
|
| 1997 |
+
],
|
| 1998 |
+
"angle": 0,
|
| 1999 |
+
"content": "Shen, H.; Zhao, T.; Zhu, M.; and Yin, J. 2024. Ground-VLP: Harnessing Zero-Shot Visual Grounding from Vision-Language Pre-training and Open-Vocabulary Object Detection. In AAAI, volume 38, 4766-4775."
|
| 2000 |
+
},
|
| 2001 |
+
{
|
| 2002 |
+
"type": "ref_text",
|
| 2003 |
+
"bbox": [
|
| 2004 |
+
0.085,
|
| 2005 |
+
0.173,
|
| 2006 |
+
0.48,
|
| 2007 |
+
0.216
|
| 2008 |
+
],
|
| 2009 |
+
"angle": 0,
|
| 2010 |
+
"content": "Shin, G.; Xie, W.; and Albanie, S. 2022. Reco: Retrieve and co-segment for zero-shot transfer. Advances in Neural Information Processing Systems, 35: 33754-33767."
|
| 2011 |
+
},
|
| 2012 |
+
{
|
| 2013 |
+
"type": "ref_text",
|
| 2014 |
+
"bbox": [
|
| 2015 |
+
0.084,
|
| 2016 |
+
0.219,
|
| 2017 |
+
0.48,
|
| 2018 |
+
0.262
|
| 2019 |
+
],
|
| 2020 |
+
"angle": 0,
|
| 2021 |
+
"content": "Strudel, R.; Laptev, I.; and Schmid, C. 2022. Weakly-supervised segmentation of referring expressions. arXiv preprint arXiv:2205.04725."
|
| 2022 |
+
},
|
| 2023 |
+
{
|
| 2024 |
+
"type": "ref_text",
|
| 2025 |
+
"bbox": [
|
| 2026 |
+
0.084,
|
| 2027 |
+
0.265,
|
| 2028 |
+
0.48,
|
| 2029 |
+
0.307
|
| 2030 |
+
],
|
| 2031 |
+
"angle": 0,
|
| 2032 |
+
"content": "Sun, S.; Li, R.; Torr, P.; Gu, X.; and Li, S. 2024. Clip as rnn: Segment countless visual concepts without training endeavor. In CVPR, 13171-13182."
|
| 2033 |
+
},
|
| 2034 |
+
{
|
| 2035 |
+
"type": "ref_text",
|
| 2036 |
+
"bbox": [
|
| 2037 |
+
0.085,
|
| 2038 |
+
0.31,
|
| 2039 |
+
0.48,
|
| 2040 |
+
0.339
|
| 2041 |
+
],
|
| 2042 |
+
"angle": 0,
|
| 2043 |
+
"content": "Suo, Y.; Zhu, L.; and Yang, Y. 2023. Text augmented spatial-aware zero-shot referring image segmentation. EMNLP."
|
| 2044 |
+
},
|
| 2045 |
+
{
|
| 2046 |
+
"type": "ref_text",
|
| 2047 |
+
"bbox": [
|
| 2048 |
+
0.085,
|
| 2049 |
+
0.342,
|
| 2050 |
+
0.48,
|
| 2051 |
+
0.398
|
| 2052 |
+
],
|
| 2053 |
+
"angle": 0,
|
| 2054 |
+
"content": "Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones, L.; Gomez, A. N.; Kaiser, L.; and Polosukhin, I. 2017. Attention is all you need. Advances in neural information processing systems, 30."
|
| 2055 |
+
},
|
| 2056 |
+
{
|
| 2057 |
+
"type": "ref_text",
|
| 2058 |
+
"bbox": [
|
| 2059 |
+
0.085,
|
| 2060 |
+
0.401,
|
| 2061 |
+
0.48,
|
| 2062 |
+
0.457
|
| 2063 |
+
],
|
| 2064 |
+
"angle": 0,
|
| 2065 |
+
"content": "Wang, H.; Zhan, Y.; Liu, L.; Ding, L.; Yang, Y.; and Yu, J. 2024a. Towards Alleviating Text-to-Image Retrieval Hallucination for CLIP in Zero-shot Learning. arXiv preprint arXiv:2402.18400."
|
| 2066 |
+
},
|
| 2067 |
+
{
|
| 2068 |
+
"type": "ref_text",
|
| 2069 |
+
"bbox": [
|
| 2070 |
+
0.085,
|
| 2071 |
+
0.46,
|
| 2072 |
+
0.48,
|
| 2073 |
+
0.515
|
| 2074 |
+
],
|
| 2075 |
+
"angle": 0,
|
| 2076 |
+
"content": "Wang, X.; Yu, Z.; De Mello, S.; Kautz, J.; Anandkumar, A.; Shen, C.; and Alvarez, J. M. 2022a. Freesolo: Learning to segment objects without annotations. In CVPR, 14176-14186."
|
| 2077 |
+
},
|
| 2078 |
+
{
|
| 2079 |
+
"type": "ref_text",
|
| 2080 |
+
"bbox": [
|
| 2081 |
+
0.085,
|
| 2082 |
+
0.519,
|
| 2083 |
+
0.48,
|
| 2084 |
+
0.562
|
| 2085 |
+
],
|
| 2086 |
+
"angle": 0,
|
| 2087 |
+
"content": "Wang, Y.; Zhao, R.; and Sun, Z. 2023. Efficient Remote Sensing Transformer for Coastline Detection with Sentinel-2 Satellite Imagery. In IGARSS, 5439-5442. IEEE."
|
| 2088 |
+
},
|
| 2089 |
+
{
|
| 2090 |
+
"type": "ref_text",
|
| 2091 |
+
"bbox": [
|
| 2092 |
+
0.085,
|
| 2093 |
+
0.565,
|
| 2094 |
+
0.48,
|
| 2095 |
+
0.634
|
| 2096 |
+
],
|
| 2097 |
+
"angle": 0,
|
| 2098 |
+
"content": "Wang, Y.; Zhao, R.; Wei, S.; Ni, J.; Wu, M.; Luo, Y.; and Luo, C. 2024b. Convolution Meets Transformer: Efficient Hybrid Transformer for Semantic Segmentation with Very High Resolution Imagery. In IGARSS 2024, 9688-9691. IEEE."
|
| 2099 |
+
},
|
| 2100 |
+
{
|
| 2101 |
+
"type": "ref_text",
|
| 2102 |
+
"bbox": [
|
| 2103 |
+
0.085,
|
| 2104 |
+
0.638,
|
| 2105 |
+
0.48,
|
| 2106 |
+
0.68
|
| 2107 |
+
],
|
| 2108 |
+
"angle": 0,
|
| 2109 |
+
"content": "Wang, Z.; Lu, Y.; Li, Q.; Tao, X.; Guo, Y.; Gong, M.; and Liu, T. 2022b. Cris: Clip-driven referring image segmentation. In CVPR, 11686-11695."
|
| 2110 |
+
},
|
| 2111 |
+
{
|
| 2112 |
+
"type": "ref_text",
|
| 2113 |
+
"bbox": [
|
| 2114 |
+
0.085,
|
| 2115 |
+
0.683,
|
| 2116 |
+
0.48,
|
| 2117 |
+
0.725
|
| 2118 |
+
],
|
| 2119 |
+
"angle": 0,
|
| 2120 |
+
"content": "Wu, C.; Lin, Z.; Cohen, S.; Bui, T.; and Maji, S. 2020. Phrasecut: Language-based image segmentation in the wild. In CVPR, 10216-10225."
|
| 2121 |
+
},
|
| 2122 |
+
{
|
| 2123 |
+
"type": "ref_text",
|
| 2124 |
+
"bbox": [
|
| 2125 |
+
0.085,
|
| 2126 |
+
0.728,
|
| 2127 |
+
0.48,
|
| 2128 |
+
0.784
|
| 2129 |
+
],
|
| 2130 |
+
"angle": 0,
|
| 2131 |
+
"content": "Xu, H.; Ye, Q.; Yan, M.; Shi, Y.; Ye, J.; Xu, Y.; Li, C.; Bi, B.; Qian, Q.; Wang, W.; et al. 2023a. mplug-2: A modularized multi-modal foundation model across text, image and video. In ICML, 38728-38748. PMLR."
|
| 2132 |
+
},
|
| 2133 |
+
{
|
| 2134 |
+
"type": "ref_text",
|
| 2135 |
+
"bbox": [
|
| 2136 |
+
0.085,
|
| 2137 |
+
0.788,
|
| 2138 |
+
0.48,
|
| 2139 |
+
0.83
|
| 2140 |
+
],
|
| 2141 |
+
"angle": 0,
|
| 2142 |
+
"content": "Xu, J.; De Mello, S.; Liu, S.; Byeon, W.; Breuel, T.; Kautz, J.; and Wang, X. 2022. Groupvit: Semantic segmentation emerges from text supervision. In CVPR, 18134-18144."
|
| 2143 |
+
},
|
| 2144 |
+
{
|
| 2145 |
+
"type": "ref_text",
|
| 2146 |
+
"bbox": [
|
| 2147 |
+
0.085,
|
| 2148 |
+
0.834,
|
| 2149 |
+
0.48,
|
| 2150 |
+
0.889
|
| 2151 |
+
],
|
| 2152 |
+
"angle": 0,
|
| 2153 |
+
"content": "Xu, X.; Wu, C.; Rosenman, S.; Lal, V.; Che, W.; and Duan, N. 2023b. Bridgetower: Building bridges between encoders in vision-language representation learning. In AAAI, volume 37, 10637-10647."
|
| 2154 |
+
},
|
| 2155 |
+
{
|
| 2156 |
+
"type": "list",
|
| 2157 |
+
"bbox": [
|
| 2158 |
+
0.084,
|
| 2159 |
+
0.069,
|
| 2160 |
+
0.48,
|
| 2161 |
+
0.889
|
| 2162 |
+
],
|
| 2163 |
+
"angle": 0,
|
| 2164 |
+
"content": null
|
| 2165 |
+
},
|
| 2166 |
+
{
|
| 2167 |
+
"type": "ref_text",
|
| 2168 |
+
"bbox": [
|
| 2169 |
+
0.518,
|
| 2170 |
+
0.069,
|
| 2171 |
+
0.913,
|
| 2172 |
+
0.111
|
| 2173 |
+
],
|
| 2174 |
+
"angle": 0,
|
| 2175 |
+
"content": "Yang, Z.; Wang, J.; Tang, Y.; Chen, K.; Zhao, H.; and Torr, P. H. 2022. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 18155-18165."
|
| 2176 |
+
},
|
| 2177 |
+
{
|
| 2178 |
+
"type": "ref_text",
|
| 2179 |
+
"bbox": [
|
| 2180 |
+
0.518,
|
| 2181 |
+
0.114,
|
| 2182 |
+
0.913,
|
| 2183 |
+
0.156
|
| 2184 |
+
],
|
| 2185 |
+
"angle": 0,
|
| 2186 |
+
"content": "Yang, Z.; Wang, J.; Ye, X.; Tang, Y.; Chen, K.; Zhao, H.; and Torr, P. H. 2024. Language-aware vision transformer for referring segmentation. IEEE TPAMI."
|
| 2187 |
+
},
|
| 2188 |
+
{
|
| 2189 |
+
"type": "ref_text",
|
| 2190 |
+
"bbox": [
|
| 2191 |
+
0.519,
|
| 2192 |
+
0.159,
|
| 2193 |
+
0.914,
|
| 2194 |
+
0.213
|
| 2195 |
+
],
|
| 2196 |
+
"angle": 0,
|
| 2197 |
+
"content": "Yu, J.; Wang, Z.; Vasudevan, V.; Yeung, L.; Seyedhosseini, M.; and Wu, Y. 2022. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917."
|
| 2198 |
+
},
|
| 2199 |
+
{
|
| 2200 |
+
"type": "ref_text",
|
| 2201 |
+
"bbox": [
|
| 2202 |
+
0.519,
|
| 2203 |
+
0.217,
|
| 2204 |
+
0.914,
|
| 2205 |
+
0.259
|
| 2206 |
+
],
|
| 2207 |
+
"angle": 0,
|
| 2208 |
+
"content": "Yu, S.; Seo, P. H.; and Son, J. 2023. Zero-shot referring image segmentation with global-local context features. In CVPR, 19456-19465."
|
| 2209 |
+
},
|
| 2210 |
+
{
|
| 2211 |
+
"type": "ref_text",
|
| 2212 |
+
"bbox": [
|
| 2213 |
+
0.519,
|
| 2214 |
+
0.262,
|
| 2215 |
+
0.914,
|
| 2216 |
+
0.29
|
| 2217 |
+
],
|
| 2218 |
+
"angle": 0,
|
| 2219 |
+
"content": "Zhou, C.; Loy, C. C.; and Dai, B. 2022. Extract free dense labels from clip. In ECCV, 696-712. Springer."
|
| 2220 |
+
},
|
| 2221 |
+
{
|
| 2222 |
+
"type": "ref_text",
|
| 2223 |
+
"bbox": [
|
| 2224 |
+
0.519,
|
| 2225 |
+
0.293,
|
| 2226 |
+
0.914,
|
| 2227 |
+
0.334
|
| 2228 |
+
],
|
| 2229 |
+
"angle": 0,
|
| 2230 |
+
"content": "Zhu, C.; and Chen, L. 2024. A survey on open-vocabulary detection and segmentation: Past, present, and future. IEEE TPAMI."
|
| 2231 |
+
},
|
| 2232 |
+
{
|
| 2233 |
+
"type": "list",
|
| 2234 |
+
"bbox": [
|
| 2235 |
+
0.518,
|
| 2236 |
+
0.069,
|
| 2237 |
+
0.914,
|
| 2238 |
+
0.334
|
| 2239 |
+
],
|
| 2240 |
+
"angle": 0,
|
| 2241 |
+
"content": null
|
| 2242 |
+
}
|
| 2243 |
+
]
|
| 2244 |
+
]
|
data/2025/2503_00xxx/2503.00936/ee8e137b-6353-4aea-9f54-e9f3f0a4de81_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29e85949b5431ab53b1df52378353c41b2970c64bbbab1b54ae1686006bdf591
|
| 3 |
+
size 4619713
|
data/2025/2503_00xxx/2503.00936/full.md
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# IteRPrime: Zero-shot Referring Image Segmentation with Iterative Grad-CAM Refinement and Primary Word Emphasis
|
| 2 |
+
|
| 3 |
+
Yuji Wang*, Jingchen Ni*, Yong Liu, Chun Yuan, Yansong Tang†
|
| 4 |
+
|
| 5 |
+
Shenzhen International Graduate School, Tsinghua University *{yuji-wan24, njc24} @ mails.tsinghua.edu.cn, †tang.yansong@sz.tsinghua.edu.cn
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Zero-shot Referring Image Segmentation (RIS) identifies the instance mask that best aligns with a specified referring expression without training and fine-tuning, significantly reducing the labor-intensive annotation process. Despite achieving commendable results, previous CLIP-based models have a critical drawback: the models exhibit a notable reduction in their capacity to discern relative spatial relationships of objects. This is because they generate all possible masks on an image and evaluate each masked region for similarity to the given expression, often resulting in decreased sensitivity to direct positional clues in text inputs. Moreover, most methods have weak abilities to manage relationships between primary words and their contexts, causing confusion and reduced accuracy in identifying the correct target region. To address these challenges, we propose IteRPrimE (Iterative Grad-CAM Refinement and Primary word Emphasis), which leverages a saliency heatmap through Grad-CAM from a Vision-Language Pre-trained (VLP) model for image-text matching. An iterative Grad-CAM refinement strategy is introduced to progressively enhance the model's focus on the target region and overcome positional insensitivity, creating a self-correcting effect. Additionally, we design the Primary Word Emphasis module to help the model handle complex semantic relations, enhancing its ability to attend to the intended object. Extensive experiments conducted on the RefCOCO/+/g, and PhraseCut benchmarks demonstrate that IteRPrimE outperforms previous SOTA zero-shot methods, particularly excelling in out-of-domain scenarios.
|
| 10 |
+
|
| 11 |
+
Code — https://github.com/VoyageWang/IteRPrimE
|
| 12 |
+
|
| 13 |
+
# Introduction
|
| 14 |
+
|
| 15 |
+
Referring Image Segmentation (RIS) requires the model to generate a pixel-level referred object mask based on a textual description, extending the applicability to various tasks such as robot interaction and image editing (Yang et al. 2024, 2022; Liu et al. 2024b; Lai et al. 2024; Luo et al. 2024b). Different from standard semantic segmentation (Wang, Zhao, and Sun 2023; Wang et al. 2024b; Han
|
| 16 |
+
|
| 17 |
+
et al. 2023; Luo et al. 2024a; Bai et al. 2024), RIS necessitates the differentiation of instances within the same category and their relationships with other objects or the scene, which requires high demands on the semantic understanding and spatial perception of the model. However, annotating exact pairs of images, descriptions, and ground-truth masks is both expensive and time-intensive, as the annotation of a query needs a grasp of diverse positional and attributive details within the image (Liu, Ding, and Jiang 2023; Ding et al. 2023; Liu et al. 2019). Recent weakly supervised RIS techniques (Strudel, Laptev, and Schmid 2022; Lee et al. 2023; Xu et al. 2022) have been introduced to mitigate these annotation challenges, yet they still depend on paired data for training purposes and have relatively poor performance. In contrast, a zero-shot approach holds greater value. Leveraging vision-language pre-trained (VLP) models such as CLIP (Radford et al. 2021), this method efficiently generalizes across diverse concepts and unseen categories without further training and fine-tuning.
|
| 18 |
+
|
| 19 |
+
Existing methodologies to harness the characteristics of being unnecessary to fit training data presented by zero-shot learning often employ a two-stage pipeline, shown in Figure 1 (a). As a discriminator between the images masked by the candidate masks and the expression, CLIP is used to select the instance mask whose similarity score is the highest (Sun et al. 2024; Yu, Seo, and Son 2023; Suo, Zhu, and Yang 2023; Ni et al. 2023). However, we observed that these methods always malfunctioned when encountering text inputs with positional information such as "left" and "right". Due to only a single instance contained in a masked image, the absence of relative spatial perception can be the inherent limitation of these CLIP-based paradigms. Previous pieces of literature alleviate this issue by injecting the human priors or bias that explicitly prompts the CLIP with the given direction clues (Ni et al. 2023; Suo, Zhu, and Yang 2023). To be more specific, they manually design spatial decaying weights from 1 to 0 in the directions consistent with text phrases to make the model aware of positional information, but it can not generalize the scenarios out of predefined directions such as "next to". Additionally, the domain shift for CLIP from the natural image to the masked image can also impact the segmentation performance (Liu et al. 2024a; Ding et al. 2022; Zhu and Chen 2024).
|
| 20 |
+
|
| 21 |
+
Some researchers (Lee et al. 2023) have leveraged Grad
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: (a) The general pipeline of CLIP-based methods. They lack the perception of spatial relative position due to the masked images. (b) The pipeline of our IteRPrimE with Iterative Grad-CAM Refinement Strategy and Primary Word Emphasis of "bike". (c) This is a comparative experiment of positional phrase accuracy between IteRPrimE and GL-CLIP on RefCOCO and RefCOCOg.
|
| 25 |
+
|
| 26 |
+
CAM (Selvaraju et al. 2017) and created two specialized loss functions to attenuate the detrimental effects of positional phrases in weakly supervised settings. Although the losses are unsuitable for zero-shot scenarios, Grad-CAM can partially mitigate the deleterious effects associated with masked images. This is because the method maintains the integrity of the model's spatial perception capabilities by delineating the regions with the greatest attention in the original image for localization, shown in Figure 1 (b). Nevertheless, we still find two major problems by analyzing the occurrences and characteristics of Grad-CAM. First, Grad-CAM struggles to discriminate the semantic relations between different noun phrases, due to the lack of a stronger consideration of the primary word than other context words, shown in baseline predictions of Figure 2 (a). Specifically, the model's weak ability to effectively prioritize the main word in complex expressions undermines its overall performance. Second, Grad-CAM is limited to identifying only small areas of the referred object, which consequently results in selecting undesired instance masks.
|
| 27 |
+
|
| 28 |
+
To overcome these challenges, we propose a novel framework namely, IteRPrime (Iterative Grad-CAM Refinement and Primary word Emphasis) utilizing Grad-CAM for zero-shot RIS. First, we implement an iterative refinement strategy to enhance the representational accuracy and enlarge the indicated area of Grad-CAM, progressively improving the model's concentration on the target object with each cycle, shown in Figure 2 (b). Simultaneously, this strategy is particularly beneficial when the referring expression includes positional words, as it offers the model chances of self-correction at each iteration, shown in Figure 2 (c). Second, the Primary Word Emphasis Module (PWEM) plays a crucial role in enhancing the weak abilities to handle the complex semantic relationships between primary words and other contexts. This module is achieved by emphasizing the Grad-CAMs of the main word within the referring expression, from local and global aspects. Finally,
|
| 29 |
+
|
| 30 |
+
a post-processing module is designed to select a high-quality, contiguous instance mask from a mask proposal network, which encapsulates the target object as indicated by Grad-CAM. By addressing the limitations, the IteRPrimE approach achieves superior performance over prior zero-shot state-of-the-art techniques, notably excelling in out-of-domain scenarios and exhibiting robust cross-domain transfer proficiency. Our main contributions include
|
| 31 |
+
|
| 32 |
+
1. To our best knowledge, we are the first to use Grad-CAM to instruct Segmentors for zero-shot RIS tasks.
|
| 33 |
+
2. We propose the Iterative Grad-CAM Refinement Strategy (IGRS) and Primary Word Emphasis Module (PWEM) to enhance the accuracy and representation of Grad-CAM for better localization, shown in Figure 2.
|
| 34 |
+
3. Compared to the previous CLIP-based method, our method significantly outperforms it with inputs containing positional information, shown in Figure 1 (c). Additionally, the approach achieves a better performance on the four popular benchmarks, especially for the outdomain datasets.
|
| 35 |
+
|
| 36 |
+
# Related Works
|
| 37 |
+
|
| 38 |
+
Zero-shot referring image segmentation. For the fully-supervised setting, training a well-specified model for RIS needs massive paired text-visual annotations, which are sometimes not affordable and accessible (Shah, VS, and Patel 2024; Liu, Ding, and Jiang 2023; Yang et al. 2022; Wang et al. 2022b; Kim et al. 2022; Ding et al. 2021; Jing et al. 2021). Besides, these models have relatively weak ability in out-of-domain scenarios due to the limited data and a domain gap. Therefore, the zero-shot RIS methods are proposed as the alternative. Global- and local-CLIP (GL-CLIP) (Yu, Seo, and Son 2023) is the first proposed to segment the instance given the text input with zero-shot transfer. By interfacing with the mask proposal network FreeSOLO (Wang et al. 2022a), the approach leverages both global
|
| 39 |
+
|
| 40 |
+
(a) Expression: a man standing next to a young girl on a grassy hillside
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
Baseline
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
|
| 49 |
+

|
| 50 |
+
(b) Expression: businessman posing in front of an airplane door
|
| 51 |
+
1st
|
| 52 |
+
Iteration
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
2nd
|
| 56 |
+
Iteration
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
Figure 2: (a) The weak ability of the baseline model to differentiate the semantic relationships between the primary word "man" and the other noun phrases colored green and orange. PWEM can make the model aware of the targeted instance referred to by the main word. (b) The IGRS facilitates the expansion of highlighted areas, surpassing the confined small regions. (c) IGRS offers the model chances of self-correction.
|
| 62 |
+
|
| 63 |
+
and local textual-image similarity to enhance the discriminative capabilities of the CLIP model. Based on GL-CLIP, some researchers (Wang et al. 2024a) combine the original CLIP similarity score with their proposed Balanced Score with Auxiliary Prompts (BSAP), namely BSAP-H, to reduce the CLIP's text-to-image retrieval hallucination. Ref-Diff (Ni et al. 2023) demonstrates that the text-to-image generative model like Stable Diffusion (Rombach et al. 2022) can generate the intended mask from the cross-attention map, which has considerable performance. TAS (Suo, Zhu, and Yang 2023) mainly depends on another large captioner network BLIP2 (Li et al. 2023) to mine the negative text based on the previous mask proposal network plus discriminator paradigm, which achieves favorable performances. Additionally, SAM (Kirillov et al. 2023) is utilized for better segmentation accuracy. However, these CLIP-based methods struggle to segment the referred subject with positional-described text queries, due to the absence of spatial relationships in the masked image.
|
| 64 |
+
|
| 65 |
+
Grad-CAM for localization. Grad-CAM (Selvaraju et al. 2017) is proposed to provide explainable clues indicating the regions the model pays attention to for the prediction head. In the context of the Image Text Matching (ITM) objective from any VLP (Li et al. 2022; Xu et al. 2023a,b), Grad-CAM enables the establishment of a modality mapping from the textual to the visual domain, specifically calibrated for the task of visual localization. Many works utilize it to localize the objects with the given text (Shen et al. 2024; Lee et al. 2023; Xu et al. 2022; He et al. 2022; Li et al. 2021). However, these approaches either generate a bounding box annotation or are employed within weakly supervised scenarios. Compared to approaches (Shin, Xie, and Albanie 2022; Zhou, Loy, and Dai 2022; Luo et al. 2024a)
|
| 66 |
+
|
| 67 |
+
that perform zero-shot open vocabulary semantic segmentation with Grad-CAM, we are the first to propose the Grad-CAM for zero-shot RIS to study its behaviors under longer and complex textual inputs instead of a single category noun. To address problems of lacking consideration between main words and the other, the PWEM is proposed to aggregate the Grad-CAM from local-spatial and global-token levels. Secondly, a novel iterative refinement strategy is employed to obtain a better representation of Grad-CAM step by step.
|
| 68 |
+
|
| 69 |
+
# Preliminaries
|
| 70 |
+
|
| 71 |
+
The generation of Grad-CAM is essential for harnessing it for RIS. Given an image-expression pair $(I, E)$ , we can obtain their corresponding embeddings, $v$ and $e$ , by the visual encoder $v = f_{I}(I)$ and text encoder $e = f_{T}(E)$ , respectively. Then, for multimodal fusion, these two embeddings are fed to the cross-attention layers used to align the visual and textual information (Yu et al. 2022; Vaswani et al. 2017). The resultant attention activation maps, $\mathbf{A}$ , can indicate the activated and recognized regions of $v$ concerning each query textual token in $e$ . However, these indication clues are usually scattered and not densely distributed in the relevant regions. Thus, the gradients, $\mathbf{G}$ can be used to sharpen and dilute the effect of non-relevant regions in $\mathbf{A}$ , where contribute less to the output objective, $y$ , like Image Text Matching (ITM). The result of this gradient-weighted dilution process is known as Grad-CAM, $\mathbf{H}$ .
|
| 72 |
+
|
| 73 |
+
In the cross-attention layer, the Grad-CAM can be formulated by Equation (1)
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\mathbf {H} = \mathbf {A} \odot \mathbf {G}, \tag {1a}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\mathbf {G} = \operatorname {c l a m p} \left(\frac {\partial y}{\partial \mathbf {A}}, 0, \infty\right), \tag {1b}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where clamp removes negative gradients, which often represent noise or irrelevant features. Finally, the Grad-CAM used to indicate the image regions, $\mathbf{H}_f$ , is the mean over all the number of text tokens $|e|$ , as shown in Equation (2)
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathbf {H} _ {f} = \mathbb {E} _ {k} \left(\mathbf {H} ^ {k}\right), k \in | e |, \mathbf {H} _ {f} \in \mathbb {R} ^ {B \times h \times w} \tag {2}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $\mathbf{H}^k$ denotes the Grad-CAM for the $k$ -th text token, $B$ is the batch size, and $h \times w$ is the size of visual latent space. This averaging process treats every word equally and ignores the importance of the primary word, thereby undermining the performance of RIS.
|
| 90 |
+
|
| 91 |
+
# Method
|
| 92 |
+
|
| 93 |
+
# Overview
|
| 94 |
+
|
| 95 |
+
Figure 1 (b) demonstrates the entire workflow of our method for zero-shot RIS, IteRPrimE, which can be divided into two parts: an iterative Grad-CAM generator and a selective mask proposal network. First, the Grad-CAM generator is a VLP model with cross-attention layers. The proposed IGRS and PWEM are integrated into the generator. Finally, within the mask proposal network, a post-processing module is designed to select the candidate instance masks, ensuring the accurate and detailed localization of the target object.
|
| 96 |
+
|
| 97 |
+

|
| 98 |
+
(a)
|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
(b)
|
| 102 |
+
Figure 3: The Grad-CAMs and attention maps (AM) of "partially damaged car". Since the attention map (d) and Grad-CAM (e) of the primary word "car" both contain unique activation areas compared to the others, they can be harnessed from local-spatial and global-token perspectives to enhance the focus on the targeted regions, respectively.
|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
(c)
|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
(d)
|
| 109 |
+
|
| 110 |
+

|
| 111 |
+
(e)
|
| 112 |
+
|
| 113 |
+
# Primary Word Emphasis Module
|
| 114 |
+
|
| 115 |
+
The PWEM is an essential component of the IteRPrimE, designed to confront the challenge posed by the weak capability of Grad-CAM to manage the semantic relationships in input texts featuring multiple potential referred nouns. This module emphasizes the Grad-CAM of the primary word in the expression, thereby increasing the focus on the main word during the averaging operation. Specifically, we first use an NLP processing toolbox to parse the part-of-speech (POS) tags of each word, filtering out a set of text tokens that includes special $<CLS>$ token of BERT (Devlin et al. 2018), nouns, adjectives, verbs, proper nouns, and numerals. These words are recognized as effective tokens $W$ that can provide distinct semantics and their contextual information. They are composed of primary words $W_{m}$ and their contexts $W_{c}$ , where $W = W_{m} \cup W_{c}$ and $W_{m} \cap W_{c} = \emptyset$ . Then, we extract the primary noun from these effective words (e.g. "car" in "partially damaged car" shown in Figure 3) by employing a designed algorithm. It first generates a syntax tree, identifies the leftmost noun phrase (NP), and then finds the rightmost noun (NN) within that NP, which can be detailed in Algorithm 1 in the appendix.
|
| 116 |
+
|
| 117 |
+
As shown in the right part of Figure 4, we emphasize the effect of the primary word Grad-CAM from two perspectives: local spatial-level and global token-level augmentation. Different from the other contextual effective words $W_{c}$ , the attention map $\mathbf{A}^{W_m}$ and Grad-CAM $\mathbf{H}_m$ of the primary token holds the unique activated areas that probably indicate the correct localization of Grad-CAM shown in Figure 3. Therefore, to highlight and isolate the specific contribution of the primary word from the local spatial level, we compute the $\mathrm{L}_2$ normalized differences, $\mathbf{A}_{dif}$ between the main word activation map and the other context word activation maps, $\mathbf{A}^{W_c}$ . The activation difference is further integrated with gradients from the main word $\mathbf{G}^{W_m}$ , forming a spatial modulator to indicate the local spatial importance in the main word Grad-CAM, $\mathbf{H}_m$ . Thus, we can obtain the local spatial-level enhanced Grad-CAM of the primary word, $\mathbf{H}_l$ , as shown in Equation (3)
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
\mathbf {A} _ {d i f} = \frac {\mathbf {A} ^ {W _ {m}} - \mathbf {A} ^ {W _ {c}}}{\left\| \mathbf {A} ^ {W _ {m}} - \mathbf {A} ^ {W _ {c}} \right\| _ {2}}, \tag {3a}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\mathbf {H} _ {l} = \mathbf {A} _ {d i f} \odot \mathbf {G} ^ {W _ {m}} \odot \mathbf {H} _ {m}, \tag {3b}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $\mathbf{H}_m = \mathbf{A}^{W_m}\odot \mathbf{G}^{W_m}$ following Equation (1). Broadcasting occurs when the dimensions do not match.
|
| 128 |
+
|
| 129 |
+
From the global aspect, we manually add the weight of the main word Grad-CAM $\mathbf{H}_m$ along the token axis during mean operations, which provides additional enhanced focus on the primary token. Therefore, we can obtain the global token-level Grad-CAM $\mathbf{H}_g$ by Equation (4)
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
W ^ {\prime} = W \cup \left\{W _ {m} \right\} \times N _ {c}, \tag {4a}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\mathbf {H} _ {g} = \mathbf {A} ^ {W ^ {\prime}} \odot \mathbf {G} ^ {W ^ {\prime}} \tag {4b}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
where $N_{c}$ is the number of context tokens and $\{W_m\} \times N_c$ means repeating the main word for $N_{c}$ times. Finally, the resulting augmented Grad-CAM, $\mathbf{H}_a$ , is the mean of concatenated local and global Grad-CAMs, $\mathbf{H}_c$ , along the token axis, where $\mathbf{H}_c = [\mathbf{H}_g,\mathbf{H}_l]$ . This map significantly improves the model's Grad-CAM localization accuracy, shown in PWEM of Figure 2 (a).
|
| 140 |
+
|
| 141 |
+
# Iterative Grad-CAM Refinement Strategy
|
| 142 |
+
|
| 143 |
+
Masked Language Modeling (MLM) can be used for bidirectional image generative Transformers such as MasGIT (Chang et al. 2022). The iterative generative paradigm offers self-correction chances for the model to optimize step-by-step in the latent space. Inspired by this, we propose a novel iterative strategy of Grad-CAM to gradually steer the model's attention to the region that the model is not attentive to initially, which brings benefits from two sides. On the one hand, for the circumstances in which Grad-CAM correctly localizes the instance initially, the gradually refined Grad-CAM can be better gathered around the targeted instance region. On the other hand, for the first incorrect localization, the model can attend to other semantic instances to recheck the Grad-CAM prediction, especially for the positional phrase inputs. The overall approach of IGRS is illustrated in the left part of Figure 4.
|
| 144 |
+
|
| 145 |
+
For simple notification, we use $H_{t}$ to represent the resultant $t$ -th iteration Grad-CAM from the PWEM $\mathbf{H}_a$ . Equation (5) delineates the aggregation and refinement process of Grad-CAM representational updation, which entails the combination with Grad-CAM in the penultimate iteration step $(t - 1)$ , under the constraint of a zero initial condition
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
Figure 4: The proposed IGRS (left) and PWEM (right). The mask $M_t'$ is the attention mask for cross-attention layers by dropping the most salient regions of Grad-CAM to zero. PWEM filters the meaningless tokens and augments the Grad-CAM representation from local and global aspects.
|
| 149 |
+
|
| 150 |
+
$H_0 = 0$
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
H _ {t} ^ {\prime} = \lambda H _ {t - 1} ^ {\prime} + (1 - \lambda) \sigma \left(H _ {t}\right), \tag {5}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
where $H_{t-1}^{\prime}$ and $H_{t}^{\prime}$ are the resultant refined heatmaps from the $(t-1)$ -th and $t$ -th iterations, $\sigma(.)$ is a sigmoid function to scale the value appropriately, and the hyperparameter $\lambda$ is a balancing factor. To instruct the model to focus on the region previously not paid attention to, in each iteration, a binary attention mask $M_{t}$ would be generated from the refined Grad-CAM heatmap $H_{t}$ by dropping the most attentive region to 0, as shown in Equation (6).
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
M _ {t} = \mathcal {P} \left(H _ {t}, \theta\right), \mathcal {P} (H, \theta) = \left\{ \begin{array}{l l} 0 & \text {i f} \sigma (H) \geq \theta \\ 1 & \text {i f} \sigma (H) < \theta , \end{array} \right. \tag {6}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
where $\mathcal{P}(H,\theta)$ represents the process of applying a sigmoid function to stretch the values and then thresholding the result at $\theta$ to create a binary mask. The binary mask $M_{t}$ is then combined with the previous mask $M_{t - 1}^{\prime}$ by the logical and operation, $M_t' = M_{t - 1}'\wedge M_t$ , where $M_0$ is a tensor of ones. The $\wedge$ ensures the model can expand to other regions regardless of the places previously focused. This attention binary mask will be fed into the cross-attention layer of a VLP to mask out the visual regions in embedding $v$ , ensuring the text token queries no longer pay attention to the zero regions within the mask.
|
| 163 |
+
|
| 164 |
+
For an interactive algorithm, the stopping condition is essential. To make the iterative process more flexible, we introduce a dynamic stopping criterion based on the proposed soft ITM score at timestep $t$ , $S^{(t)}$ , which is calculated as the product of the ITM, from the VLP model and the relevance
|
| 165 |
+
|
| 166 |
+
score, $S^{(t)} = ITM^{(t)} \cdot R^{(t)}$ , where $R^{(t)}$ is defined by:
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
R ^ {(t)} = \frac {\sum_ {x \in X} \sum_ {y \in Y} \left(1 - \widetilde {H} _ {t - 1} ^ {\prime}\right)}{X \times Y}, \tag {7}
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
where $\widetilde{H}_{t-1}^{\prime}$ is the interpolated Grad-CAM heatmap of $H_{t-1}^{\prime}$ with the same size as the original image with width $X$ and height $Y$ . This relevance score measures the average overlooked Grad-CAM intensity. A higher $R^{(t)}$ indicates that there are some regions less attentive to previously, guiding the model to focus on these overlooked areas in the next iteration. If the score for the current iteration $S^{(t)}$ is less than the score from the previous iteration $S^{(t-1)}$ , the iterative process is terminated. The total iterative times should not exceed $\nu$ .
|
| 173 |
+
|
| 174 |
+
# Selective Mask Proposal Network
|
| 175 |
+
|
| 176 |
+
Through the aforementioned steps, we can employ the Grad-CAM indication clue to instruct the Segmentors to predict the referred instance mask. For a given image, the mask proposal network would predict the $N_{b}$ masks but they can not autonomously choose which object mask users refer to by the language. Therefore, the selection module within the network is designed to select the mask indicated by the Grad-CAM, which divides the selection procedures into two phases: the filtering phase and the scoring phase.
|
| 177 |
+
|
| 178 |
+
Assuming that the Grad-CAM has successfully localized the instance, the center point of Grad-CAM should be within the inner part of the object. Based on this hypothesis, the selection mechanism is initiated by a preliminary evaluation that involves two main criteria. First, we identify the set of coordinates, $\mathcal{C}_{max}$ , where the heatmap reaches its peak
|
| 179 |
+
|
| 180 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="4">RefCOCO</td><td colspan="4">RefCOCO+</td><td colspan="3">RefCOCOg</td><td rowspan="2">Average</td></tr><tr><td>val</td><td>testA</td><td>testB</td><td>avg.</td><td>val</td><td>testA</td><td>testB</td><td>avg.</td><td>val</td><td>test</td><td>avg.</td></tr><tr><td>Zero-shot methods</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>GL-CLIP (Yu, Seo, and Son 2023)</td><td>26.7</td><td>25.0</td><td>26.5</td><td>26.1</td><td>28.2</td><td>26.5</td><td>27.9</td><td>27.5</td><td>33.0</td><td>33.1</td><td>33.1</td><td>28.4</td></tr><tr><td>BSAP (Wang et al. 2024a)</td><td>27.3</td><td>27.0</td><td>27.1</td><td>27.1</td><td>28.7</td><td>27.8</td><td>28.3</td><td>28.3</td><td>34.5</td><td>34.5</td><td>34.5</td><td>29.4</td></tr><tr><td>Region token (Yu, Seo, and Son 2023)</td><td>23.4</td><td>22.1</td><td>24.6</td><td>23.4</td><td>24.5</td><td>22.6</td><td>25.4</td><td>24.2</td><td>27.6</td><td>27.3</td><td>27.5</td><td>24.7</td></tr><tr><td>SAM-CLIP (Ni et al. 2023)</td><td>26.3</td><td>25.8</td><td>26.4</td><td>26.2</td><td>25.7</td><td>28</td><td>26.8</td><td>26.8</td><td>38.8</td><td>38.9</td><td>38.9</td><td>29.6</td></tr><tr><td>Ref-Diff (Ni et al. 2023)</td><td>37.2</td><td>38.4</td><td>37.2</td><td>37.6</td><td>37.3</td><td>40.5</td><td>33</td><td>36.9</td><td>44</td><td>44.5</td><td>44.3</td><td>39.0</td></tr><tr><td>TAS (Suo, Zhu, and Yang 2023)</td><td>39.8</td><td>41.1</td><td>36.2</td><td>39.0</td><td>43.6</td><td>49.1</td><td>36.5</td><td>43.1</td><td>46.6</td><td>46.8</td><td>46.7</td><td>42.5</td></tr><tr><td>CaR (Sun et al. 2024)</td><td>33.6</td><td>35.4</td><td>30.5</td><td>33.0</td><td>34.2</td><td>36.0</td><td>31.0</td><td>33.7</td><td>36.7</td><td>36.6</td><td>36.7</td><td>34.3</td></tr><tr><td>Weakly-supervised methods</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>TSEG (Strudel, Laptev, and Schmid 2022)</td><td>25.4</td><td>-</td><td>-</td><td>-</td><td>22.0</td><td>-</td><td>-</td><td>-</td><td>22.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Chunk (Lee et al. 2023)</td><td>31.1</td><td>32.3</td><td>30.1</td><td>31.8</td><td>31.3</td><td>32.1</td><td>30.1</td><td>31.2</td><td>32.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td>IteRPrimE (ours)</td><td>40.2</td><td>46.5</td><td>33.9</td><td>40.2</td><td>44.2</td><td>51.6</td><td>35.3</td><td>43.7</td><td>46.0</td><td>45.8</td><td>45.9</td><td>42.9</td></tr></table>
|
| 181 |
+
|
| 182 |
+
values. Then, the $m$ -th candidate mask $B^{m}$ is examined to determine if it includes at least one activated pixel at any of these coordinates. Second, to ensure the quality of the masks, we apply a connected component labeling technique to constrain the number of connected components in each mask, ensuring that the number of these components does not exceed a predefined threshold of $\kappa$ . The combined criteria for the preliminary evaluation are defined as follows:
|
| 183 |
+
|
| 184 |
+
$$
|
| 185 |
+
\begin{array}{l} \mathcal {A} = \left\{m \in N _ {b} \mid B _ {(x, y)} ^ {m} \neq 0, \exists (x, y) \in \mathcal {C} _ {m a x}, \right\}, \\ \mathcal {F} = \left\{m \in N _ {b} \mid g _ {c c} \left(B ^ {m}\right) \leq \kappa \right\}, \tag {8} \\ \mathcal {D} = \mathcal {A} \cap \mathcal {F}. \\ \end{array}
|
| 186 |
+
$$
|
| 187 |
+
|
| 188 |
+
In the above equations, $g_{cc}$ denotes a function that quantifies the number of connected components within the $m$ -th candidate from total $N_b$ masks. The intersection of sets $\mathcal{A}$ and $\mathcal{F}$ , denoted as $\mathcal{D}$ , yields the subset of candidate masks that fulfill both the activation and mask quality requirements. This evaluation process filers the irrelevant and empty masks to reduce the computational cost and enhance efficiency.
|
| 189 |
+
|
| 190 |
+
Subsequent to the preliminary filtering phase, we proceed to evaluate each remaining candidate mask through a weighted scoring mechanism that leverages the Grad-CAM heatmap. This involves computing an element-wise product-based score for each mask concerning the heatmap. We define the score for the $j$ -th candidate mask as $Z(j)$ from the set $\mathcal{D}$ . The scoring process is formulated below:
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
\begin{array}{l} Z (j) = \sum_ {x \in X} \sum_ {y \in Y} \left(B _ {(x, y)} ^ {j} + B _ {(x, y)} ^ {j} \odot \widetilde {H} _ {(x, y)} ^ {\prime}\right) \\ \hat {Z} (j) = \frac {Z (j)}{\sum_ {x \in X} \sum_ {y \in Y} B _ {(x , y)} ^ {j}}, \quad j \in \mathcal {D}. \tag {9} \\ \end{array}
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
where $\widetilde{H}_{(x,y)}^{\prime}$ is the final output Grad-CAM of original image size. The final step in our selection process involves identifying the candidate mask with the maximum normalized score, $\hat{Z} (j)$ , as the chosen segmentation output:
|
| 197 |
+
|
| 198 |
+
$$
|
| 199 |
+
B _ {\text {s e l e c t}} = \arg \max _ {j \in \mathcal {D}} \hat {Z} (j). \tag {10}
|
| 200 |
+
$$
|
| 201 |
+
|
| 202 |
+
Table 1: Comparison of different methods on different datasets. "avg." denotes the mean performance across various splits within individual datasets, while the terminal "Average" column represents the composite mean derived from all dataset splits.
|
| 203 |
+
|
| 204 |
+
<table><tr><td>Method</td><td>Training dataset</td><td>All</td><td>Unseen</td></tr><tr><td rowspan="3">CRIS</td><td>RefCOCO</td><td>15.5</td><td>13.8</td></tr><tr><td>RefCOCO+</td><td>16.3</td><td>14.6</td></tr><tr><td>RefCOCOg</td><td>16.2</td><td>13.9</td></tr><tr><td rowspan="3">LAVT</td><td>RefCOCO</td><td>16.7</td><td>14.4</td></tr><tr><td>RefCOCO+</td><td>16.6</td><td>13.5</td></tr><tr><td>RefCOCOg</td><td>16.1</td><td>13.5</td></tr><tr><td>GL-CLIP</td><td>N/A</td><td>23.6</td><td>23.0</td></tr><tr><td>TAS</td><td>N/A</td><td>25.6</td><td>-</td></tr><tr><td>Ref-Diff</td><td>N/A</td><td>29.4</td><td>-</td></tr><tr><td>IteRPrimE (ours)</td><td>N/A</td><td>38.1</td><td>37.9</td></tr></table>
|
| 205 |
+
|
| 206 |
+
Table 2: Comparison of oIoU on PhraseCut for different supervised and zero-shot methods.
|
| 207 |
+
|
| 208 |
+
This approach ensures that the selected mask aligns with the regions of interest highlighted by the Grad-CAM heatmap, thereby ensuring the precision and efficacy of RIS.
|
| 209 |
+
|
| 210 |
+
# Experiments
|
| 211 |
+
|
| 212 |
+
# Experimental Settings
|
| 213 |
+
|
| 214 |
+
Datasets and metrics. We employ the RefCOCO (Nagaraja, Morariu, and Davis 2016), RefCOCO+ (Nagaraja, Morariu, and Davis 2016), RefCOCOg (Kazemzadeh et al. 2014; Mao et al. 2016), and PhraseCut datasets (Wu et al. 2020) for evaluating the proposed zero-shot methods. RefCOCO with shorter expressions (average 1.6 nouns, 3.6 words) contains massive positional phrases (50%), especially those with direct direction clues like "left" or "right". In contrast, RefCOCO+ focuses on the attribute phrases with the same average expression length. RefCOCOg is a more challenging benchmark that has longer phrases (average 2.8 nouns, 8.4 words) and complex expressions. To verify the effectiveness of the mode in out-of-domains, we adapt our model to the PhraseCut dataset which contains the additional 1271 categories in the test split based on 80 in COCO. Following (Sun et al. 2024; Yu, Seo, and Son 2023; Han et al. 2024), we utilize the mean Intersection over Union (mIoU) for RefCOCO series, a common metric for RIS. Following (Yu,
|
| 215 |
+
|
| 216 |
+
(a) Self-correction effect for positional phrases. the smaller bird at the left
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Ours
|
| 220 |
+
|
| 221 |
+
(b) Strong robustness of out-domain phrases. the arm of the photo not being pictured
|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Ours
|
| 225 |
+
|
| 226 |
+
(c) Strong robustness of out-domain categories. metal bridge
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
Figure 5: The qualitative comparisons with GL-CLIP. (a) The self-correction effect is brought by our IGRS, especially for positional phrases. (b) For the unseen phrases like "not", our model shows better robustness. (c) shows the gathering effect of IteRPrimE with high confidence to select the whole mask instead of a part like GL-CLIP.
|
| 230 |
+
|
| 231 |
+
<table><tr><td>Method</td><td>RefCOCO testA</td><td>RefCOCOg test</td></tr><tr><td>Overall Mean</td><td>43.4</td><td>41.3</td></tr><tr><td>GVLP (Shen et al. 2024)</td><td>45.0</td><td>41.9</td></tr><tr><td>Global Augment</td><td>46.5</td><td>45.7</td></tr><tr><td>Local Augment</td><td>45.3</td><td>42.3</td></tr><tr><td>PWEM</td><td>46.5</td><td>45.8</td></tr></table>
|
| 232 |
+
|
| 233 |
+
Table 3: Comparison of methods with different Grad-CAM generation methods on RefCOCO testA and RefCOCOg test datasets.
|
| 234 |
+
|
| 235 |
+
<table><tr><td>Method</td><td>RefCOCO testA</td><td>RefCOCOg test</td><td>RefCOCOg val</td></tr><tr><td>Mask image</td><td>46.3</td><td>45.3</td><td>45.2</td></tr><tr><td>Mask feature</td><td>46.5</td><td>45.8</td><td>46.0</td></tr></table>
|
| 236 |
+
|
| 237 |
+
Table 4: Performance comparison of masking out the salient regions in the image level and feature level (attention mask).
|
| 238 |
+
|
| 239 |
+
Seo, and Son 2023; Wu et al. 2020), we report the overall Intersection over Union (oIoU) for the PhraseCut dataset.
|
| 240 |
+
|
| 241 |
+
Implementation details. We use the commonly used mask proposal network, Mask2Former (Cheng et al. 2022; Liang et al. 2023), to obtain 200 instance-level mask proposals. Following (Shen et al. 2024; Lee et al. 2023), we utilize the base model ALBEF to study the Grad-CAM for localization and it is generated in the 8th cross-attention layer. In processing the input text, a prefatory phrase "there is a" is appended. The hyperparameter balancing factor $\lambda$ , upper connecting limit $\kappa$ , iterative number $\nu$ , and binarization threshold $\theta$ are 0.8, 12, 3, and 0.5, respectively. All experiments are conducted on a 24 GB RTX 3090 GPU.
|
| 242 |
+
|
| 243 |
+
# Results
|
| 244 |
+
|
| 245 |
+
Main results. As shown in Table 1, IteRPrimE almost achieves the best performance on all three datasets, especially in the testA splits of RefCOCO and RefCOCO+. It outperforms the SOTA TAS method with a $0.4\%$ average improvement. For all the splits of RefCOCO and RefCOCO+ rich in short positional phrases, our model obtains an average of $40.2\%$ and $43.7\%$ compared to the $39.0\%$ and $43.1\%$ of TAS, respectively. Therefore, our method is more robust to the positional information compared to the CLIP-based paradigms. However, the model may have the relatively weaker capability of complex expressions shown in RefCOCOg, which can be attributed to the data limitation and gap in the pertaining stage. Additionally, by using the additional captioner of BLIP2 (Li et al. 2023) and SAM (Kirillov et al. 2023), TAS maintains the best performance across some splits, especially for complex phrases, but it has the drawback of low throughput and heavy volumes.
|
| 246 |
+
|
| 247 |
+
Zero-shot evaluation on unseen domain. Notably, as shown in Table 2, our model has high capabilities of cross-domain zero-shot transfer compared to other zero-shot SOTA and the existing supervised methods CRIS (Wang et al. 2022b) and LAVT (Yang et al. 2022). IteRPrimE significantly outperforms both kinds of methods in the outdomain scenarios. Upon assessment within a subset of categories not present in the RefCOCO datasets (denoted as the "Unseen" column), our model shows the best robustness compared to the supervised methods with huge performance degradation. Notably, the underperformance of the TAS model on this dataset may be attributed to the predominance of complex outdoor scenes within the dataset. In such intricate environments, the reliance on an additional
|
| 248 |
+
|
| 249 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">RefCOCOg test</td><td colspan="3">RefCOCO testA</td></tr><tr><td>Position</td><td>Others</td><td>Overall</td><td>Position</td><td>Others</td><td>Overall</td></tr><tr><td>GVLP w/o IGRS</td><td>33.0</td><td>43.6</td><td>41.3</td><td>34.7</td><td>53.2</td><td>44.7</td></tr><tr><td>GVLP w/IGRS</td><td>33.7</td><td>44.3</td><td>41.9</td><td>35.1</td><td>53.6</td><td>45.0</td></tr><tr><td>PWEM w/o IGRS</td><td>36.4</td><td>47.5</td><td>45.1</td><td>36.1</td><td>54.8</td><td>46.1</td></tr><tr><td>PWEM w/IGRS</td><td>37.4</td><td>48.2</td><td>45.8</td><td>36.5</td><td>55.0</td><td>46.5</td></tr></table>
|
| 250 |
+
|
| 251 |
+
Table 5: Ablation studies of the proposed PWEM and IGRS.
|
| 252 |
+
|
| 253 |
+
captioning model for annotation by TAS could potentially introduce greater noise, thereby compromising the model's performance. However, facing complex environmental contexts, our model's efficacy in localizing pertinent regions is attributed to its retention of spatial perception. Concurrently, the integration of IGRS and PWEM has further bolstered IteRPrimE's proficiency in addressing the complicated interrelationships among objects within the scene, thereby leading to this commendable performance.
|
| 254 |
+
|
| 255 |
+
Qualitative comparisons. Figure 5 shows the comparisons with GL-CLIP (Yu, Seo, and Son 2023). First, we demonstrate that our IGRS module possesses a self-corrective mechanism, the same answer as GL-CLIP initially before refining its predictions by revisiting initially overlooked regions. In Figure 5 (b), the scarcity of such negative phrases in the training set is offset by our model's robustness. Finally, we address the limited highlighted region of initial Grad-CAM representation by the IGRS, demonstrated in Figure 5 (c). The more gathering of the Grad-CAM, the more likelihood that the correct instance mask will be selected instead of the part.
|
| 256 |
+
|
| 257 |
+
# Ablation Study
|
| 258 |
+
|
| 259 |
+
Effect of PWEM. According to Equation (2), the mean operation is essential for the generation of Grad-CAM and deeply influences the Grad-CAM representational accuracy. Therefore, Table 3 presents the results of the ablation study, examining the impact of various aggregation configurations for Grad-CAM generation. The "Overall Mean" is the direct mean of all the tokens' Grad-CAM, but the GVLP uses the selected effective tokens for averaging (Shen et al. 2024). The remaining is introduced before as shown in Equation (3) and Equation (4). Compared to the previous methods, the proposed PWEM can significantly improve the performances because it can save the examples that fail due to the weak complex semantic understanding between the main word and the other contexts. Additionally, global augmentation shows stronger potential than local because it could dominate the effect during aggregation.
|
| 260 |
+
|
| 261 |
+
Effect of mask position in IGRS. Table 4 evaluates the position that the binary mask $M$ applied. "Mask image" means adding the mask into the original image so that the indicated regions are masked out, similar to GL-CLIP. However, this can degrade the performance due to the absence of relative relationships of regions. Our method for attention masking in the cross-attention layer is more robust, with improvement on all three splits.
|
| 262 |
+
|
| 263 |
+
Effect of our proposed PWEM and IGRS. Table 5 evaluates the performance improvements achieved by integrating different modules within our methodology. The "Posi
|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
|
| 267 |
+

|
| 268 |
+
(a) Iteration times
|
| 269 |
+
(b) $\lambda$
|
| 270 |
+
Figure 6: The line charts of two hyperparameters.
|
| 271 |
+
|
| 272 |
+
tion” category encompasses those test samples that explicitly feature positional expressions. Conversely, the “Others” category serves as the complement. These results demonstrate that our modules not only improve general performance but also enhance the model’s ability to manage complex semantic and spatial relations, particularly in positional contexts.
|
| 273 |
+
|
| 274 |
+
Different assembly of iteration times and $\lambda$ . Figure 6 presents the ablation study of two hyperparameters in IGRS, analyzing the effect of varying iteration times and $\lambda$ on the RefCOCO testA dataset. Figure 6 (a) shows that as the number of iterations increases from 1 to 3, the metric improves, peaking at $46.5\%$ . However, beyond three iterations, the performance change becomes minimal. Therefore, selecting 3 iterations is optimal for balancing performance and time efficiency. Figure 6 (b) presents another line chart analyzing the impact of $\lambda$ in the Grad-CAM updation. The metric increases as $\lambda$ is gradually raised from 0 to 0.2 while exceeding this point, performance declines with higher alpha values. Overall, the optimal value of $\lambda$ is 0.2.
|
| 275 |
+
|
| 276 |
+
# Conclusion
|
| 277 |
+
|
| 278 |
+
This paper presents IteRPrimE, a novel framework for Zeroshot Referring Image Segmentation (RIS), addressing the limitations of previous methods in handling positional sensitivity and complex semantic relationships. By incorporating an Iterative Grad-CAM Refinement Strategy (IGRS) and a Primary Word Emphasis Module (PWEM), IteRPrimE enhances the model's ability to accurately focus on target regions and manage semantic nuances. Extensive experiments on RefCOCO $+ / + \mathrm{g}$ and PhraseCut benchmarks demonstrate that IteRPrimE significantly outperforms previous state-of-the-art zero-shot methods, particularly in out-of-domain contexts. These findings highlight the framework's potential to advance zero-shot RIS by improving model sensitivity to positional and semantic details. Future research endeavors may seek to extend the Grad-CAM-guided RIS paradigm to encompass all segmentation tasks across varying levels of granularity with linguistic directives.
|
| 279 |
+
|
| 280 |
+
# Acknowledgments
|
| 281 |
+
|
| 282 |
+
This work was supported by Shenzhen Science and Technology Program under Grant CJGJZD20220517142402006.
|
| 283 |
+
|
| 284 |
+
# References
|
| 285 |
+
|
| 286 |
+
Bai, S.; Liu, Y.; Han, Y.; Zhang, H.; and Tang, Y. 2024. Self-calibrated clip for training-free open-vocabulary segmentation. arXiv preprint arXiv:2411.15869.
|
| 287 |
+
Chang, H.; Zhang, H.; Jiang, L.; Liu, C.; and Freeman, W. T. 2022. Maskgit: Masked generative image transformer. In CVPR, 11315-11325.
|
| 288 |
+
Cheng, B.; Misra, I.; Schwing, A. G.; Kirillov, A.; and Girdhar, R. 2022. Masked-attention mask transformer for universal image segmentation. In CVPR, 1290–1299.
|
| 289 |
+
Devlin, J.; Chang, M.-W.; Lee, K.; and Toutanova, K. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.
|
| 290 |
+
Ding, H.; Liu, C.; He, S.; Jiang, X.; and Loy, C. C. 2023. MeViS: A large-scale benchmark for video segmentation with motion expressions. In CVPR, 2694-2703.
|
| 291 |
+
Ding, H.; Liu, C.; Wang, S.; and Jiang, X. 2021. Vision-language transformer and query generation for referring segmentation. In ICCV, 16321-16330.
|
| 292 |
+
Ding, J.; Xue, N.; Xia, G.-S.; and Dai, D. 2022. Decoupling zero-shot semantic segmentation. In CVPR, 11583-11592.
|
| 293 |
+
Han, K.; Liu, Y.; Liew, J. H.; Ding, H.; Liu, J.; Wang, Y.; Tang, Y.; Yang, Y.; Feng, J.; Zhao, Y.; et al. 2023. Global knowledge calibration for fast open-vocabulary segmentation. In CVPR, 797-807.
|
| 294 |
+
Han, Z.; Zhu, F.; Lao, Q.; and Jiang, H. 2024. Zero-shot referring expression comprehension via structural similarity between images and captions. In CVPR, 14364-14374.
|
| 295 |
+
He, S.; Guo, T.; Dai, T.; Qiao, R.; Wu, C.; Shu, X.; and Ren, B. 2022. VLMAE: Vision-language masked autoencoder. arXiv preprint arXiv:2208.09374.
|
| 296 |
+
Jing, Y.; Kong, T.; Wang, W.; Wang, L.; Li, L.; and Tan, T. 2021. Locate then segment: A strong pipeline for referring image segmentation. In CVPR, 9858-9867.
|
| 297 |
+
Kazemzadeh, S.; Ordonez, V.; Matten, M.; and Berg, T. 2014. Referitgame: Referring to objects in photographs of natural scenes. In EMNLP, 787-798.
|
| 298 |
+
Kim, N.; Kim, D.; Lan, C.; Zeng, W.; and Kwak, S. 2022. Restr: Convolution-free referring image segmentation using transformers. In CVPR, 18145-18154.
|
| 299 |
+
Kirillov, A.; Mintun, E.; Ravi, N.; Mao, H.; Rolland, C.; Gustafson, L.; Xiao, T.; Whitehead, S.; Berg, A. C.; Lo, W.-Y.; et al. 2023. Segment anything. In ICCV, 4015-4026.
|
| 300 |
+
Lai, X.; Tian, Z.; Chen, Y.; Li, Y.; Yuan, Y.; Liu, S.; and Jia, J. 2024. Lisa: Reasoning segmentation via large language model. In CVPR, 9579-9589.
|
| 301 |
+
Lee, J.; Lee, S.; Nam, J.; Yu, S.; Do, J.; and Taghavi, T. 2023. Weakly supervised referring image segmentation with intra-chunk and inter-chunk consistency. In ICCV, 21870-21881.
|
| 302 |
+
|
| 303 |
+
Li, C.; Xu, H.; Tian, J.; Wang, W.; Yan, M.; Bi, B.; Ye, J.; Chen, H.; Xu, G.; Cao, Z.; et al. 2022. mplug: Effective and efficient vision-language learning by cross-modal skip-connections. arXiv preprint arXiv:2205.12005.
|
| 304 |
+
Li, J.; Li, D.; Savarese, S.; and Hoi, S. 2023. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, 19730–19742. PMLR.
|
| 305 |
+
Li, J.; Selvaraju, R.; Gotmare, A.; Joty, S.; Xiong, C.; and Hoi, S. C. H. 2021. Align before fuse: Vision and language representation learning with momentum distillation. Advances in neural information processing systems, 34: 9694-9705.
|
| 306 |
+
Liang, F.; Wu, B.; Dai, X.; Li, K.; Zhao, Y.; Zhang, H.; Zhang, P.; Vajda, P.; and Marculescu, D. 2023. Open-vocabulary semantic segmentation with mask-adapted clip. In CVPR, 7061-7070.
|
| 307 |
+
Liu, C.; Ding, H.; and Jiang, X. 2023. Gres: Generalized referring expression segmentation. In CVPR, 23592-23601.
|
| 308 |
+
Liu, R.; Liu, C.; Bai, Y.; and Yuille, A. L. 2019. Clevr-ref+: Diagnosing visual reasoning with referring expressions. In CVPR, 4185-4194.
|
| 309 |
+
Liu, Y.; Bai, S.; Li, G.; Wang, Y.; and Tang, Y. 2024a. Open-vocabulary segmentation with semantic-assisted calibration. In CVPR, 3491-3500.
|
| 310 |
+
Liu, Y.; Zhang, C.; Wang, Y.; Wang, J.; Yang, Y.; and Tang, Y. 2024b. Universal segmentation at arbitrary granularity with language instruction. In CVPR, 3459-3469.
|
| 311 |
+
Luo, J.; Khandelwal, S.; Sigal, L.; and Li, B. 2024a. Emergent Open-Vocabulary Semantic Segmentation from Off-the-shelf Vision-Language Models. In CVPR, 4029-4040.
|
| 312 |
+
Luo, Z.; Xiao, Y.; Liu, Y.; Li, S.; Wang, Y.; Tang, Y.; Li, X.; and Yang, Y. 2024b. Soc: Semantic-assisted object cluster for referring video object segmentation. Advances in Neural Information Processing Systems, 36.
|
| 313 |
+
Mao, J.; Huang, J.; Toshev, A.; Camburu, O.; Yuille, A. L.; and Murphy, K. 2016. Generation and comprehension of unambiguous object descriptions. In CVPR, 11-20.
|
| 314 |
+
Nagaraja, V. K.; Morariu, V. I.; and Davis, L. S. 2016. Modeling context between objects for referring expression understanding. In ECCV, 792-807. Springer.
|
| 315 |
+
Ni, M.; Zhang, Y.; Feng, K.; Li, X.; Guo, Y.; and Zuo, W. 2023. Ref-diff: Zero-shot referring image segmentation with generative models. arXiv preprint arXiv:2308.16777.
|
| 316 |
+
Radford, A.; Kim, J. W.; Hallacy, C.; Ramesh, A.; Goh, G.; Agarwal, S.; Sastry, G.; Askell, A.; Mishkin, P.; Clark, J.; et al. 2021. Learning transferable visual models from natural language supervision. In ICML, 8748-8763. PMLR.
|
| 317 |
+
Rombach, R.; Blattmann, A.; Lorenz, D.; Esser, P.; and Omer, B. 2022. High-resolution image synthesis with latent diffusion models. In CVPR, 10684-10695.
|
| 318 |
+
Selvaraju, R. R.; Cogswell, M.; Das, A.; Vedantam, R.; Parikh, D.; and Batra, D. 2017. Grad-cam: Visual explanations from deep networks via gradient-based localization. In CVPR, 618-626.
|
| 319 |
+
|
| 320 |
+
Shah, N. A.; VS, V.; and Patel, V. M. 2024. LQMFormer: Language-aware Query Mask Transformer for Referring Image Segmentation. In CVPR, 12903-12913.
|
| 321 |
+
Shen, H.; Zhao, T.; Zhu, M.; and Yin, J. 2024. Ground-VLP: Harnessing Zero-Shot Visual Grounding from Vision-Language Pre-training and Open-Vocabulary Object Detection. In AAAI, volume 38, 4766-4775.
|
| 322 |
+
Shin, G.; Xie, W.; and Albanie, S. 2022. Reco: Retrieve and co-segment for zero-shot transfer. Advances in Neural Information Processing Systems, 35: 33754-33767.
|
| 323 |
+
Strudel, R.; Laptev, I.; and Schmid, C. 2022. Weakly-supervised segmentation of referring expressions. arXiv preprint arXiv:2205.04725.
|
| 324 |
+
Sun, S.; Li, R.; Torr, P.; Gu, X.; and Li, S. 2024. Clip as rnn: Segment countless visual concepts without training endeavor. In CVPR, 13171-13182.
|
| 325 |
+
Suo, Y.; Zhu, L.; and Yang, Y. 2023. Text augmented spatial-aware zero-shot referring image segmentation. EMNLP.
|
| 326 |
+
Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones, L.; Gomez, A. N.; Kaiser, L.; and Polosukhin, I. 2017. Attention is all you need. Advances in neural information processing systems, 30.
|
| 327 |
+
Wang, H.; Zhan, Y.; Liu, L.; Ding, L.; Yang, Y.; and Yu, J. 2024a. Towards Alleviating Text-to-Image Retrieval Hallucination for CLIP in Zero-shot Learning. arXiv preprint arXiv:2402.18400.
|
| 328 |
+
Wang, X.; Yu, Z.; De Mello, S.; Kautz, J.; Anandkumar, A.; Shen, C.; and Alvarez, J. M. 2022a. Freesolo: Learning to segment objects without annotations. In CVPR, 14176-14186.
|
| 329 |
+
Wang, Y.; Zhao, R.; and Sun, Z. 2023. Efficient Remote Sensing Transformer for Coastline Detection with Sentinel-2 Satellite Imagery. In IGARSS, 5439-5442. IEEE.
|
| 330 |
+
Wang, Y.; Zhao, R.; Wei, S.; Ni, J.; Wu, M.; Luo, Y.; and Luo, C. 2024b. Convolution Meets Transformer: Efficient Hybrid Transformer for Semantic Segmentation with Very High Resolution Imagery. In IGARSS 2024, 9688-9691. IEEE.
|
| 331 |
+
Wang, Z.; Lu, Y.; Li, Q.; Tao, X.; Guo, Y.; Gong, M.; and Liu, T. 2022b. Cris: Clip-driven referring image segmentation. In CVPR, 11686-11695.
|
| 332 |
+
Wu, C.; Lin, Z.; Cohen, S.; Bui, T.; and Maji, S. 2020. Phrasecut: Language-based image segmentation in the wild. In CVPR, 10216-10225.
|
| 333 |
+
Xu, H.; Ye, Q.; Yan, M.; Shi, Y.; Ye, J.; Xu, Y.; Li, C.; Bi, B.; Qian, Q.; Wang, W.; et al. 2023a. mplug-2: A modularized multi-modal foundation model across text, image and video. In ICML, 38728-38748. PMLR.
|
| 334 |
+
Xu, J.; De Mello, S.; Liu, S.; Byeon, W.; Breuel, T.; Kautz, J.; and Wang, X. 2022. Groupvit: Semantic segmentation emerges from text supervision. In CVPR, 18134-18144.
|
| 335 |
+
Xu, X.; Wu, C.; Rosenman, S.; Lal, V.; Che, W.; and Duan, N. 2023b. Bridgetower: Building bridges between encoders in vision-language representation learning. In AAAI, volume 37, 10637-10647.
|
| 336 |
+
|
| 337 |
+
Yang, Z.; Wang, J.; Tang, Y.; Chen, K.; Zhao, H.; and Torr, P. H. 2022. Lavt: Language-aware vision transformer for referring image segmentation. In CVPR, 18155-18165.
|
| 338 |
+
Yang, Z.; Wang, J.; Ye, X.; Tang, Y.; Chen, K.; Zhao, H.; and Torr, P. H. 2024. Language-aware vision transformer for referring segmentation. IEEE TPAMI.
|
| 339 |
+
Yu, J.; Wang, Z.; Vasudevan, V.; Yeung, L.; Seyedhosseini, M.; and Wu, Y. 2022. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917.
|
| 340 |
+
Yu, S.; Seo, P. H.; and Son, J. 2023. Zero-shot referring image segmentation with global-local context features. In CVPR, 19456-19465.
|
| 341 |
+
Zhou, C.; Loy, C. C.; and Dai, B. 2022. Extract free dense labels from clip. In ECCV, 696-712. Springer.
|
| 342 |
+
Zhu, C.; and Chen, L. 2024. A survey on open-vocabulary detection and segmentation: Past, present, and future. IEEE TPAMI.
|
data/2025/2503_00xxx/2503.00936/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9f908d0a7e22e7d12964d4bf771d6acba3c68bc0bbd4b7036ae94ff36bbd6fb
|
| 3 |
+
size 656571
|
data/2025/2503_00xxx/2503.00936/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_00xxx/2503.00986/6e80f082-67b0-4f10-b55b-09a86bd4f660_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_00xxx/2503.00986/6e80f082-67b0-4f10-b55b-09a86bd4f660_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_00xxx/2503.00986/6e80f082-67b0-4f10-b55b-09a86bd4f660_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3e5f8b6eb69006d12794d0b0cf0030b84769e281f4495f78cd98861b6487f4c
|
| 3 |
+
size 3159885
|
data/2025/2503_00xxx/2503.00986/full.md
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MODELING FINE-GRAINED HAND-OBJECT DYNAMICS FOR EGOCENTRIC VIDEO REPRESENTATION LEARNING
|
| 2 |
+
|
| 3 |
+
Baoqi Pei $^{1,2*}$ , Yifei Huang $^{2,3*}$ , Jilan Xu $^{2,4}$ , Guo Chen $^{5}$ , Yuping He $^{5}$ , Lijin Yang $^{3}$ , Yali Wang $^{2,6}$ , Weidi Xie $^{2,7}$ , Yu Qiao $^{2}$ , Fei Wu $^{1}$ , Limin Wang $^{2,5}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Zhejiang University, <sup>2</sup>Shanghai Artificial Intelligence Laboratory, <sup>3</sup>The University of Tokyo, <sup>4</sup>Fudan University, <sup>5</sup>Nanjing University, <sup>6</sup>SIAT, <sup>7</sup>Shanghai Jiao Tong University, peibaoqi@gmail.com; hyf@iis.u-tokyo.ac.jp
|
| 6 |
+
|
| 7 |
+
# ABSTRACT
|
| 8 |
+
|
| 9 |
+
In egocentric video understanding, the motion of hands and objects as well as their interactions play a significant role by nature. However, existing egocentric video representation learning methods mainly focus on aligning video representation with high-level narrations, overlooking the intricate dynamics between hands and objects. In this work, we aim to integrate the modeling of fine-grained hand-object dynamics into the video representation learning process. Since no suitable data is available, we introduce HOD, a novel pipeline employing a hand-object detector and a large language model to generate high-quality narrations with detailed descriptions of hand-object dynamics. To learn these fine-grained dynamics, we propose EgoVideo, a model with a new lightweight motion adapter to capture fine-grained hand-object motion information. Through our co-training strategy, EgoVideo effectively and efficiently leverages the fine-grained hand-object dynamics in the HOD data. Extensive experiments demonstrate that our method achieves state-of-the-art performance across multiple egocentric downstream tasks, including improvements of $6.3\%$ in EK-100 multi-instance retrieval, $5.7\%$ in EK-100 classification, and $16.3\%$ in EGTEA classification in zero-shot settings. Furthermore, our model exhibits robust generalization capabilities in hand-object interaction and robot manipulation tasks. Code and data are available at https://github.com/OpenRobotLab/EgoHOD/.
|
| 10 |
+
|
| 11 |
+
# 1 INTRODUCTION
|
| 12 |
+
|
| 13 |
+
Egocentric video understanding has recently garnered increasing attention due to its crucial role in areas such as augmented reality (Pan et al., 2023), embodied AI (Srivastava et al., 2022; Huang et al., 2024b), and personalized assistants (Huang et al., 2018). With the collection of large-scale egocentric video datasets (Damen et al., 2020; Grauman et al., 2022), researchers begin to adopt video-language pretraining (Lin et al., 2022) based on these annotations to learn egocentric video representations. Since the original annotations tend to be highly template-driven and lack diversity, previous works explore using Large Language Models (LLM) to rephrase the narration (Zhao et al., 2023) or introducing new video-language pairs from exocentric datasets (Dou et al., 2024). This scheme has shown its success in a wide range of downstream tasks (Plizzari et al., 2024).
|
| 14 |
+
|
| 15 |
+
However, as can be seen from the example in Figure 1 right, the original annotations in egocentric video datasets are typically highly condensed, describing only overall actions like "C draws on a book" or "C moves both hands". Since no additional information is provided, previous works like LaViLa (Zhao et al., 2023) can only rephrase at the same level of abstraction as the original annotations, neglecting a crucial aspect of egocentric videos – the fine-grained dynamics of hands and objects. Most egocentric videos contain a large portion of hand-object interactions, which reflects the camera wearer's behavior and intentions. As will be seen, integrating this information in vision-
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Left: Our EgoVideo model achieves state-of-the-art performance across multiple video benchmarks by learning fine-grained hand-object dynamics from videos. Right: Annotations from different sources: original Ego4D annotation (Grauman et al., 2022), LaViLa (Zhao et al., 2023), and our HOD. Our HOD annotations provide a detailed description of hand movements and object manipulation, demonstrating a higher level of detail and context.
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+
language pretraining significantly enhances egocentric video representation learning, resulting in state-of-the-art performance across various benchmarks.
|
| 23 |
+
|
| 24 |
+
Firstly, to incorporate hand-object dynamics into vision-language pretaining, it is essential to construct data that accurately captures the detailed motion of hands and objects in videos. A recent work directly uses the output of off-the-shelf hand-object detectors (Shan et al., 2020) as the ground truth of auxiliary targets in the pretraining (Zhang et al., 2023). However, this approach only models the appearance of hands and objects without considering their dynamics. It also fails to learn the semantic connections between hand-object interactions and the original narration. To address this, we introduce HOD, a novel framework for generating descriptions with fine-grained Hand-Object Dynamics for a given video clip. We begin with using hand-object detectors to obtain bounding boxes of hands and contact objects. Then we design prompts based on these bounding boxes to generate descriptions of the trajectories of the hand and object, as well as their contact states and positions. Finally, using the new prompts and original annotations, we leverage a large language model (LLM) to generate semantically rich captions that encompass the motion states of hands and objects. By utilizing high framerate inputs, we ensure the capture of more detailed motions.
|
| 25 |
+
|
| 26 |
+
Secondly, to efficiently and effectively exploit the fine-grained spatiotemporal information in HOD, we propose EgoVideo, a novel ViT-based model with a lightweight motion adapter. Cooperating with the HOD data, EgoVideo employs a dual-branch design and co-training strategy. The backbone branch is trained normally to learn fundamental video-language alignment, while the adapter branch is trained with a higher framerate to capture detailed hand-object dynamics. The motion adapter has a separable convolution design, allowing for information aggregation from both adjacent frames temporally and from hands and objects at different locations. This design enables EgoVideo to model detailed hand-object dynamics while maintaining low computational costs. This also allows us to scale the model size to 1B to fully unlock its potential to comprehend egocentric videos.
|
| 27 |
+
|
| 28 |
+
We extensively evaluate EgoVideo across multiple pretraining data sources and various egocentric downstream tasks. Experimental results show that our model sets a new state-of-the-art on 9 tasks as partially shown in Figure 1. Notably, our model also achieves the best performance under the same model size in both zero-shot and fine-tuning settings. Further experiments demonstrate that our HOD data is also beneficial for robot manipulation tasks.
|
| 29 |
+
|
| 30 |
+
Our main contributions are as follows: (1) We develop a HOD data pipeline to generate captions that describe fine-grained hand-object dynamics, which are crucial for egocentric video understanding; (2) We propose EgoVideo, a dual-branch model with a novel lightweight motion adapter and a co-training strategy to leverage the HOD data efficiently and effectively; (3) We demonstrate state-of-the-art performance on 12 downstream tasks, and our approach generalizes well to robot manipulation tasks. All code and data will be made publicly available.
|
| 31 |
+
|
| 32 |
+
# 2 RELATED WORK
|
| 33 |
+
|
| 34 |
+
Egocentric Video Understanding is receiving increasing research attention. Previous works focus on diverse tasks such as action recognition (Plizzari et al., 2022; Huang et al., 2020a), action anticipation (Girdhar & Grauman, 2021), and cross-view understanding (Xue et al., 2022; Huang et al., 2024a; Luo et al., 2024). Recent methods begin to work on egocentric representation learning (Lin et al., 2022; Pei et al., 2024) using the large-scale data from Ego4D (Grauman et al., 2022), or refining the Ego4D narrations by LLM rephrasing (Zhao & Krahenbuhl, 2023). A recent work also searches for additional data from exocentric datasets to improve the pretraining (Dou et al., 2024). However, since the Ego4D narrations are highly abstract, these methods fail to learn one critical aspect of egocentric videos – fine-grained hand-object dynamics. Recently, Helping Hands (Zhang et al., 2023) utilizes hand and object coordinates as auxiliary targets during pretraining. However, it only focuses on the spatial information of hands and objects, neglecting their motion dynamics. Additionally, the provided supervision does not integrate the states of hands and objects with the video descriptions, limiting the model's ability to comprehend fine-grained details.
|
| 35 |
+
|
| 36 |
+
Unlike previous works, we propose the first method to integrate the hand-object dynamics into egocentric representation learning. On the data side, we propose the HOD (Hand-Object Dynamics) pipeline, which generates high-quality video-language pairs. The language in these pairs explicitly represents the complex states and motions of hands and objects in the videos, enabling the model to learn detailed information about these dynamics. On the model side, we introduce EgoVideo, a model equipped with a lightweight motion adapter. This adapter is designed to effectively capture the intricate hand and object dynamics provided by the HOD data, enhancing the model's ability to understand and interpret fine-grained dynamics in egocentric videos.
|
| 37 |
+
|
| 38 |
+
Video-Language Representation Learning has also attracted researchers after the success of CLIP (Radford et al., 2021), due to the need for generating robust video representations. Several large-scale video-language datasets (Kay et al., 2017; Miech et al., 2019; Caba Heilbron et al., 2015) further fueled the research in this area. However, generating high-quality video-text pairs remains a challenging task, prompting researchers to develop innovative solutions. LaViLa (Zhao et al., 2023) leverages Large Language Models (LLMs) to generate dense narrations for videos. Video Recap (Islam et al., 2024) utilizes a curriculum learning training scheme to generate summaries for long videos. EMBED (Dou et al., 2024) and EgoInstructor (Xu et al., 2024) use rules or retrieval models to add additional training data. However, the previous methods can only pretrain their models at the same abstraction level as the original annotation. In contrast, our approach integrates finer-level details into the representation learning process.
|
| 39 |
+
|
| 40 |
+
Hand-Object Interaction Understanding has long been a key research topic within the field of egocentric vision. In recent years, several works have made significant strides in modeling estimate 3D hand joints (Brahmbhatt et al., 2020; Cai et al., 2018; Yang & Yao, 2019; Yuan et al., 2018; Ohkawa et al., 2023) and reconstructing hand-object shape (Cao et al., 2021; Doosti et al., 2020; Hasson et al., 2019; 2020; Liu et al., 2021). EgoHOS (Zhang et al., 2022) provides a labeled dataset with fine-grained per-pixel labels of hand and objects and a reliable foundational tool for 2D hand-object segmentation, 100DOH (Shan et al., 2020) introduces a large-scale video dataset containing hands and hand-object interactions, providing a rich resource for hand object detector training. In our work, we utilize existing hand and object detectors in our HOD pipeline to convert information related to hand/object motion and contact details into natural language descriptions. By integrating these detailed descriptions with our EgoVideo model, we can integrate this finer level of detail into the video representation learning process.
|
| 41 |
+
|
| 42 |
+
# 3 METHOD
|
| 43 |
+
|
| 44 |
+
# 3.1 DATA GENERATION PIPELINE: HOD
|
| 45 |
+
|
| 46 |
+
The fine-grained dynamics of hands and objects play a pivotal role in egocentric video understanding (Fathi et al., 2011a). To effectively integrate this information in the video-language pretraining process, we propose HOD, a novel data generation pipeline to transform hand-object dynamics into natural languages. An overview of HOD is illustrated in Figure 2 top. First, we utilize an off-the-shelf hand object detector (Shan et al., 2020) to generate bounding boxes for hands and objects in
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
Figure 2: Illustration of our HOD pipeline and EgoVideo model. In our Hand-Object Dynamics data generation pipeline (top), we first use a hand object detector to obtain the spatial coordinates of hands and objects in the clip, then we combine the motion information of hands and objects with the original narrations to generate semantically richer narrations. In our EgoVideo model (bottom), the backbone is trained with a lower framerate. We design a lightweight motion adapter to learn fine-grained dynamics efficiently with higher framerate inputs.
|
| 50 |
+
|
| 51 |
+
each frame of the video clips. Next, we employ a large language model (AI et al., 2024) to enrich the original video captions. The model is prompted to generate new narrations that integrate the original captions with hand-object dynamics information, enhancing the semantic richness of the annotations. Below, we go into the details of the HOD data generation process.
|
| 52 |
+
|
| 53 |
+
# 3.1.1 DATA SELECTION
|
| 54 |
+
|
| 55 |
+
Before going into the generation process, it is essential to select appropriate source data. The basic data component comes from the 4M subset (Lin et al., 2022) of Ego4D (Grauman et al., 2022) which has been proven useful in egocentric video-language pretraining (Pramanick et al., 2023). Additionally, we curate data from the large-scale HowTo100M dataset (Miech et al., 2019) since it contains rich hand-object interactions. We specifically choose How2-Interlink7M (Wang et al., 2024a) which contains 7M clips with high-quality GPT-4 (OpenAI, 2023) refined caption. Since the videos are from diverse sources that may include a portion that impedes egocentric representation learning, we employ a filtering technique to retain only clips with egocentric style. We train a style classifier $\mathcal{P}$ by manually annotating 10,000 clips as "ego-like" or "non-ego-like". With this classifier, we obtain an additional 3.4M egocentric-style clips. More details can be found in Appendix A.
|
| 56 |
+
|
| 57 |
+
# 3.1.2 GENERATING CAPTIONS WITH HAND OBJECT DYNAMICS
|
| 58 |
+
|
| 59 |
+
In this section, we introduce our HOD framework. Existing methods of refining descriptions in video-language pretraining(Zhao et al., 2023; Dou et al., 2024) focus on high-level abstracts but overlook the fine-grained details of hand-object dynamics. This oversight is detrimental in egocentric representation learning, where understanding these interactions forms a considerable proportion of egocentric videos by nature. To address this gap, in our HOD framework, we first detect the positions of hands and objects using a hand object detector. With this information, we prompt a large language model to augment the original annotation with detailed descriptions of hand and object movements. Followed by the subsequent video-language pretraining, our EgoVideo model can understand videos at a finer-grained level.
|
| 60 |
+
|
| 61 |
+
Hand Object Dynamics Detector. Thanks to the rapid advancement in the field of hand-object interaction (Jiang et al., 2021; Ohkawa et al., 2023), off-the-shelf hand-object detectors can provide robust hand and object positions. In our framework, we employ 100DOH (Shan et al., 2020) as the detector $\Phi_{\mathrm{det}}$ for bounding boxes extraction.
|
| 62 |
+
|
| 63 |
+
For a video clip $x = (x_{1}, x_{2}, \ldots, x_{T})$ , we uniformly sample $n = 16$ frames within the clip to obtain fine-grained motion information. Then we use $\Phi_{\mathrm{det}}$ to acquire the bounding boxes of hands and objects on these frames, which can be represented as
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
L H _ {i}, R H _ {i}, L O _ {i}, R O _ {i} = \Phi_ {\det} \left(x _ {i}\right) \tag {1}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
where $LH_{i}, RH_{i}, LO_{i}, RO_{i}$ denotes the bounding box of the left hand, right hand, objects in contact with the left hand, and objects in contact with the right hand in the $i$ -th frame. We use linear interpolation to compensate for missing hand boxes of frame $t$ if the corresponding hand boxes can be detected for both frames $x_{t-1}$ and $x_{t+1}$ .
|
| 70 |
+
|
| 71 |
+
Hand Object Dynamics Rephraser. Current pretraining methods only use high-level language descriptions (e.g., "C takes the scissors" in Figure 2), which lacks important egocentric details like hand and object interaction. In this work, we incorporate these details into the video-language pretraining process. Hand-object dynamics encompass a variety of information, including bounding boxes of hands and objects, hand and object movement directions and trajectories, as well as their contact conditions. To integrate all this information into the video-language pretraining process, we use a LLM as a rephraser to express these dynamics in natural language.
|
| 72 |
+
|
| 73 |
+
Specifically, we employ Yi-34B (AI et al., 2024) as our LLM. To capture the nuances of hand and object movements, we extract the central points of bounding boxes to derive trajectories for hands and objects. This process yields six essential categories of information: spatial-temporal data for 1) the left hand, 2) the right hand, 3) objects contacted by the left hand, 4) objects contacted by the right hand, 5) objects contacted by both hands, and 6) the original narration. We then prompt the LLM to amalgamate this detailed information, enabling the generation of rich narratives that intricately describe hand-object dynamics. Further details on prompting can be found in Appendix A.
|
| 74 |
+
|
| 75 |
+
Analysis of HOD Data. We conduct additional analyses on our HOD data to evaluate its quality. First, we identify the top 30 most frequent words in HOD captions and the original EgoClip narrations and plot their normalized frequencies in Figure 3. The EgoClip narrations exhibit a more pronounced long-tail distribution, while our HOD captions display a more balanced distribution. Notably, HOD captions include many "dynamic" words, such as "up" and "downwards," which aligns with the rationale behind our data generation process. To further verify the quality of our HOD, we employ GPT-4o (OpenAI, 2023) for quality assessment. We randomly select 1000 clips and let GPT evaluate the score of the caption data for the video clip in a range from 0 to 10. To ensure GPT does not simply assign high scores based on the length of the captions, we also conduct random gerund replacements on our data for comparison. The results, summarized in Table 1, show that our HOD data have a significantly better GPT-Score. Additional details on the scoring process and evaluations using other metrics are provided in Appendix A.
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
Figure 3: Normalized frequency of the Top-30 word in EgoClip (green) and our HOD (blue). Our HOD data has a less long-tail distribution, showing its word diversity.
|
| 79 |
+
|
| 80 |
+
Table 1: Results of narration quality, where HOD-random represents the narration after replacing keywords. The GPT-Score ranges from 0 to 10, with higher values indicating higher quality of narration.
|
| 81 |
+
|
| 82 |
+
<table><tr><td>Data</td><td>GPT-Score</td></tr><tr><td>EgoClip</td><td>5.53</td></tr><tr><td>HOD-random</td><td>3.70</td></tr><tr><td>HOD</td><td>7.71</td></tr></table>
|
| 83 |
+
|
| 84 |
+
# 3.2 EGOCENTRIC REPRESENTATION LEARNING MODEL: EGOVIDEO
|
| 85 |
+
|
| 86 |
+
The narrations generated by our HOD pipeline are highly detailed. As a result, the previous pretraining scheme struggles to capture the corresponding visual information at this level of detail. In response, we introduce EgoVideo (Figure 2 bottom), a model comprising a backbone and a motion adapter. The motion adapter aids in learning fine-grained hand-object dynamics from densely sampled video frames. Cooperating with a co-training strategy, our EgoVideo model can obtain richer video representations while maintaining computational efficiency.
|
| 87 |
+
|
| 88 |
+
Visual and Text Encoder. Following the standard video-language pretraining setting (Lin et al., 2022), our model includes a visual encoder $\mathcal{F}_v$ (including our motion adapter) and a text encoder
|
| 89 |
+
|
| 90 |
+
$\mathcal{F}_t$ . In the visual encoder, for a clip $x \in \mathbb{R}^{T \times H \times W \times 3}$ , we concatenate image tokens in $T$ frames with a learnable class token. The output of our visual encoder is $\mathbf{E}_{\mathbf{v}} \in \mathbb{R}^D$ . For the text encoder, we employ a 12-layer GPT-like Transformer (Radford et al., 2019) that input tokens after BPE tokenization (Sennrich, 2015). The output of our text encoder is $\mathbf{E}_{\mathbf{t}} \in \mathbb{R}^D$ .
|
| 91 |
+
|
| 92 |
+
Motion Adapter. Intuitively, to encode visual representations at the same level of detail as the languages, it is essential to utilize a greater number of frames as input. Since increasing the number of frames in training will result in unacceptable computational overhead, inspired by the PEFT technique in LLMs (Ding et al., 2023), we propose to use a lightweight motion adapter. The motion adapter is injected between the layers of the visual backbone, and is tailored to learn the finer-grained details with a high framerate. Since the hand and object motion forms a spatiotemporal pattern, unlike previous methods (Pan et al., 2022; Xing et al., 2024) that only focus on learning temporal information, our module is designed to learn both spatial and temporal information.
|
| 93 |
+
|
| 94 |
+
Our motion adapter is attached to the top of each of the $N$ transformer layers. Without loss of generality, here we illustrate the motion adapter for one transformer layer and illustrate it in Figure 4. Denote $\mathbf{Y} \in \mathbb{R}^{L \times D}$ as the output of a transformer layer in $\mathcal{F}_v$ where $L$ is the number of tokens, we first forward $\mathbf{Y}$ to a down-projection layer $\mathrm{W}_{\mathrm{down}}$ with ratio $\gamma$ and followed with a GELU activation function $\sigma$ . Then, we use a 2D convolution layer Conv2D with kernel size $(k,k)$ to aggregate spatial information from each frame, followed by a 1D temporal convolution layer TConv1D and Linear layer $\mathrm{W}_{\mathrm{m}}$ to model the dynamics between adjacent frames. Finally, an up-projection layer $\mathrm{W}_{\mathrm{up}}$ is used to restore the dimension. Formally, the structure can be described as:
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
\mathbf {Y} ^ {\prime} = \sigma (\mathbf {Y} \mathrm {W} _ {\text {d o w n}}), \quad \mathbf {Y} _ {\mathbf {s}} = \operatorname {R e L U} (\operatorname {B N} (\operatorname {C o n v 2 D} (\mathbf {Y} ^ {\prime}))),
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\mathbf {Y} _ {\mathbf {s t}} = \left(\operatorname {T C o n v 1 D} \left(\mathbf {Y} _ {\mathbf {s}}\right)\right) \mathrm {W} _ {\mathrm {m}}, \quad \text {M o t i o n A d a p t e r} (\mathbf {Y}) = \mathbf {Y} + \mathbf {Y} _ {\mathbf {s t}} \mathrm {W} _ {\mathrm {u p}}, \tag {2}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
where $\mathrm{W}_{\mathrm{down}}\in \mathbb{R}^{D\times \gamma D}$ $\mathrm{W_m}\in \mathbb{R}^{\gamma D\times \gamma D}$ and $\mathrm{W_{up}}\in \mathbb{R}^{\gamma D\times D}$ . BN denotes BatchNorm2D.
|
| 105 |
+
|
| 106 |
+
Co-training Strategy. In EgoVideo, the motion adapter receives input at a higher framerate to capture the fine-grained information. Additionally, the backbone must be trained to fully adapt to the egocentric domain. Thus, different from previous PEFT methods that freeze the backbone and only train the adapter part, we need to train both the backbone and adapter parameters. Motivated by the architecture of (Feichtenhofer et al., 2019), we employ a co-training strategy to train the backbone and the motion adapter jointly.
|
| 107 |
+
|
| 108 |
+
Specifically, we use an upsampling parameter $\lambda$ to sample the input using two sampling rates. For the input $x_{l} \in \mathbb{R}^{T \times H \times W \times C}$ with a low sampling rate, we pass it through the backbone and unfreeze all parameters. As a result, we get the output $\mathbf{E}_{\mathbf{v1}} \in \mathbb{R}^{D}$ . For the input with a higher sampling rate $x_{h} \in \mathbb{R}^{\lambda T \times H \times W \times C}$ , we pass the input through both the backbone and adapter parameters and get the output $\mathbf{E}_{\mathbf{vh}} \in \mathbb{R}^{D}$ , during which we freeze the parameters of the backbone and only train the adapter. Finally, for the outputs of the two pathways, we concatenate them and pass them through a fully connected layer to obtain the final output $\mathbf{E}_{\mathbf{v}} \in \mathbb{R}^{D}$ :
|
| 109 |
+
|
| 110 |
+

|
| 111 |
+
Figure 4: Architecture of our motion adapter. We use a 2D convolution layer and a 1D temporal convolution layer to capture the spatial and temporal dynamics efficiently.
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\begin{array}{r l} \mathbf {E} _ {\mathbf {v l}} & = \mathcal {F} _ {\text {b a c k b o n e}} (x _ {l}), \quad \mathbf {E} _ {\mathbf {v h}} = \mathcal {F} _ {v} (x _ {h}), \\ \mathbf {E} _ {\mathbf {v}} & = [ \mathbf {E} _ {\mathbf {v}} \cdot \mathbf {E} _ {\mathbf {v}} ] \mathbf {W}. \end{array} \tag {3}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\mathbf {E} _ {\mathbf {v}} = \left[ \mathbf {E} _ {\mathbf {v l}}; \mathbf {E} _ {\mathbf {v h}} \right] \mathrm {W} _ {\mathrm {o}},
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
Where “[;]” denotes the concatenation operation, $\mathcal{F}_{\text{backbone}}$ denotes the visual backbone, $\mathcal{F}_v$ denotes $\mathcal{F}_{\text{backbone}}$ with motion adapter and $\mathrm{W_o} \in \mathbb{R}^{2D \times D}$ . With this strategy, we integrate the training of the backbone and adapter into a single stage, reducing the cost of data and computation.
|
| 122 |
+
|
| 123 |
+
Vision-Text Alignment. We follow the standard InfoNCE (Oord et al., 2018) loss as the objective for alignment between visual embedding $\mathbf{E}_{\mathbf{v}}$ and text embedding $\mathbf{E}_{\mathbf{t}}$ . For a sampled batch $\mathcal{B}$ , we have:
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\mathcal {L} = \frac {1}{| \mathcal {B} |} \sum_ {\left(\mathbf {E} _ {\mathbf {v}} ^ {\mathrm {i}}, \mathbf {E} _ {\mathbf {t}} ^ {\mathrm {i}}\right) \in \mathcal {B}} \left(\log \frac {e ^ {\mathrm {s} \left(\mathbf {E} _ {\mathbf {v}} ^ {\mathrm {i}} , \mathbf {E} _ {\mathbf {t}} ^ {\mathrm {i}}\right) / \tau}}{\sum_ {\mathbf {E} _ {\mathbf {t}} ^ {\mathrm {j}}} {} _ {B} e ^ {\mathrm {s} \left(\mathbf {E} _ {\mathbf {v}} ^ {\mathrm {i}} , \mathbf {E} _ {\mathbf {t}} ^ {\mathrm {j}}\right) / \tau}} + \log \frac {e ^ {\mathrm {s} \left(\mathbf {E} _ {\mathbf {v}} ^ {\mathrm {i}} , \mathbf {E} _ {\mathbf {t}} ^ {\mathrm {i}}\right) / \tau}}{\sum_ {\mathbf {E} _ {\mathbf {v}} ^ {\mathrm {k}}} {} _ {B} e ^ {\mathrm {s} \left(\mathbf {E} _ {\mathbf {v}} ^ {\mathrm {k}} , \mathbf {E} _ {\mathbf {t}} ^ {\mathrm {i}}\right) / \tau}}\right), \tag {4}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
Table 2: Zero-shot performance comparison on 4 tasks between methods with different model sizes ('B' for base, 'L' for large, and 'G' for our 1B parameter backbone). Our EgoVideo outperforms previous methods with less but higher-quality pretraining data. Specifically, our EgoVideo-G achieves significant performance improvements across all datasets.
|
| 130 |
+
|
| 131 |
+
<table><tr><td rowspan="2">Method (ZS)</td><td rowspan="2">Data Size</td><td colspan="2">EK-100 MIR</td><td colspan="2">EK-100 CLS</td><td colspan="2">EGTEA</td><td colspan="2">EgoMCQ</td></tr><tr><td>mAP</td><td>nDCG</td><td>Top1-acc.</td><td>Top5-acc.</td><td>Mean-acc.</td><td>Top1-acc.</td><td>Intra</td><td>Inter</td></tr><tr><td>EgoVLPv2</td><td>4M</td><td>26.7</td><td>29.1</td><td>-</td><td>-</td><td>-</td><td>-</td><td>60.9</td><td>91.0</td></tr><tr><td>LaViLa-B</td><td>35M</td><td>30.9</td><td>32.0</td><td>16.4</td><td>34.4</td><td>28.9</td><td>35.4</td><td>59.9</td><td>93.8</td></tr><tr><td>AVION-B</td><td>35M</td><td>32.9</td><td>32.7</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>EMBED-B</td><td>38.3M</td><td>36.0</td><td>34.9</td><td>19.0</td><td>39.0</td><td>37.0</td><td>42.7</td><td>61.3</td><td>94.5</td></tr><tr><td>EgoVideo-B</td><td>7.4M</td><td>36.5</td><td>34.5</td><td>22.4</td><td>43.3</td><td>43.6</td><td>51.0</td><td>64.6</td><td>95.0</td></tr><tr><td>LaViLa-L</td><td>35M</td><td>36.1</td><td>34.6</td><td>20.8</td><td>41.4</td><td>34.1</td><td>40.1</td><td>63.1</td><td>94.5</td></tr><tr><td>AVION-L</td><td>35M</td><td>37.6</td><td>35.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Helping Hands</td><td>4M</td><td>37.5</td><td>37.8</td><td>-</td><td>-</td><td>39.1</td><td>46.6</td><td>63.0</td><td>94.5</td></tr><tr><td>EMBED-L</td><td>38.3M</td><td>40.8</td><td>37.5</td><td>22.8</td><td>45.0</td><td>40.3</td><td>46.7</td><td>64.7</td><td>95.6</td></tr><tr><td>EgoVideo-L</td><td>7.4M</td><td>41.8</td><td>37.0</td><td>24.0</td><td>46.8</td><td>47.1</td><td>51.7</td><td>65.5</td><td>95.9</td></tr><tr><td>EgoVideo-G</td><td>7.4M</td><td>47.1</td><td>39.0</td><td>28.5</td><td>54.3</td><td>58.0</td><td>63.0</td><td>69.1</td><td>96.6</td></tr></table>
|
| 132 |
+
|
| 133 |
+
where $\mathrm{s}(\mathbf{E}_{\mathbf{v}}^{\mathrm{i}}, \mathbf{E}_{\mathbf{t}}^{\mathrm{i}})$ denotes dot product operation between the $i$ -th sample in the batch of $\mathbf{E}_{\mathbf{v}}$ and $\mathbf{E}_{\mathbf{t}}$ , and $\tau$ is a temperature parameter that scales the similarity scores.
|
| 134 |
+
|
| 135 |
+
# 4 EXPERIMENTS
|
| 136 |
+
|
| 137 |
+
# 4.1 DATASETS AND EVALUATION PROTOCOLS
|
| 138 |
+
|
| 139 |
+
Pretraining Dataset. As stated in the previous section, the source of our pretraining data comes from Ego4D (Grauman et al., 2022) and How2-Interlink-7M (Wang et al., 2024a). After processing by our HOD pipeline, the total amount of data is 7.4M clips.
|
| 140 |
+
|
| 141 |
+
Evaluation Protocols. We follow previous works (Zhao et al., 2023; Pramanick et al., 2023) and use the following evaluation protocols. (1) Zero-shot (ZS): the pretrained video-text encoders are directly applied to the downstream datasets to perform video-text retrieval tasks without any additional tuning. For classification, we compute the similarity score between the video clip and the textual descriptions of all possible classes. (2) Finetuned (FT): This approach involves taking the pretrained video-text model and performing end-to-end finetuning on the training split of the target downstream dataset. (3) Feature-based: We extract video features using a frozen encoder and only train a task-specific head on the downstream dataset.
|
| 142 |
+
|
| 143 |
+
Model Architecture and Hyperparameters. Our vision-language model follows the initialization of CLIP (Radford et al., 2021), which is composed of a vision encoder and a text encoder. For the base and large models, ViT is used as our vision encoder, and we use a temporal position embedding to learn temporal information, which is randomly initialized. For our giant size model, we use Internvideo2 (Wang et al., 2024b). For hyperparameters, we use $T = 4$ and $\lambda = 4$ for frame inputs, and we use downsample ratio $\gamma = 0.5$ for the motion adapter. During pretraining, we freeze the temperature parameter $\tau = 0.07$ . More details are placed in Appendix C.
|
| 144 |
+
|
| 145 |
+
Downstream Tasks. We evaluate models on several egocentric downstream tasks: (1) EpicKitchens-100 (Damen et al., 2020) (EK-100) tasks. For this dataset, we evaluate our method on multi-instance retrieval (EK-100 MIR) and action recognition (EK-100 CLS) tasks; 2) Ego4D (Grauman et al., 2022) tasks. For Ego4D, we evaluate our model on multiple choice questions (EgoMCQ) (Li et al., 2021), and natural language query (EgoNLQ) and moment query (EgoMQ) tasks; 3) EGTEA (Li et al., 2018) tasks. We evaluate our model on the action recognition task that is focused on fine-grained cooking activities and hand-object interaction. 4) Other tasks. We also evaluate our model on GTEA (Fathi et al., 2011b) and HOI4D (Liu et al., 2022) datasets for the action segmentation task. Meanwhile, to show the generalization ability of our learned video representation, we evaluate the task success rate on Franka Kitchen dataset (Gupta et al., 2019), a simulation environment for embodied AI.
|
| 146 |
+
|
| 147 |
+
Table 3: Fine-tuning performance of models of different sizes on 5 tasks. Compared to the previous SOTA EMBED-L, our EgoVideo achieves new state-of-the-art performances on all datasets.
|
| 148 |
+
|
| 149 |
+
<table><tr><td rowspan="2">Method (FT)</td><td colspan="2">EK-100 MIR</td><td rowspan="2">EK-100 CLS Top1-acc.</td><td rowspan="2">EGTEA Top1-acc.</td><td rowspan="2">EgoNLQ R1@0.5</td><td rowspan="2" colspan="2">EgoMQ R1@0.5 mAP</td></tr><tr><td>mAP</td><td>nDCG</td></tr><tr><td>EgoVLPv2-B</td><td>47.3</td><td>61.9</td><td>-</td><td>-</td><td>7.9</td><td>31.1</td><td>12.2</td></tr><tr><td>EgoVideo-B</td><td>52.7</td><td>65.3</td><td>49.8</td><td>74.6</td><td>8.1</td><td>34.7</td><td>14.7</td></tr><tr><td>Helping Hands-L</td><td>-</td><td>-</td><td>-</td><td>-</td><td>7.9</td><td>33.4</td><td>16.0</td></tr><tr><td>LaViLa-L</td><td>50.9</td><td>66.5</td><td>51.0</td><td>76.0</td><td>7.3</td><td>32.5</td><td>13.4</td></tr><tr><td>EMBED-L</td><td>56.0</td><td>67.9</td><td>51.9</td><td>76.1</td><td>8.5</td><td>33.9</td><td>15.1</td></tr><tr><td>EgoVideo-L</td><td>56.6</td><td>69.0</td><td>53.7</td><td>79.2</td><td>8.9</td><td>36.6</td><td>17.1</td></tr><tr><td>EgoVideo-G</td><td>60.3</td><td>70.0</td><td>56.0</td><td>80.0</td><td>10.0</td><td>38.7</td><td>19.6</td></tr></table>
|
| 150 |
+
|
| 151 |
+
# 4.2 COMPARISON TO STATE-OF-THE-ART
|
| 152 |
+
|
| 153 |
+
Zero-shot Evaluation. Table 2 shows the results on 4 tasks in the zero-shot setting. We compare our method against previous egocentric representation learning methods including EgoVLPv2 (Pramanick et al., 2023), LaViLa (Zhao et al., 2023), AVION (Zhao & Krahenbuhl, 2023), Helping Hands (Zhang et al., 2023) and EMBED (Dou et al., 2024). Notably, despite the use of refined captions and significantly larger training datasets, LaViLa, AVION, and EMBED fail to achieve results as our EgoVideo. In the following experiments we will demonstrate that both our high-quality HOD data and our design of the EgoVideo model play important roles in achieving good performance. Helping Hands uses a stronger backbone TimeSformer (Bertasius et al., 2021) and adds additional decoders for auxiliary object-oriented tasks. However, our method can still outperform Helping Hands, demonstrating the superiority of our representation learning scheme.
|
| 154 |
+
|
| 155 |
+
Specifically, in the EK-100 MIR task, our EgoVideo outperforms EMBED by $0.5\%$ , $1.0\%$ , $6.3\%$ in mAP and significantly outperforms the LaViLa at the same model size. In the EK-100 CLS task, our EgoVideo-B model demonstrates superior performance with a top-1 accuracy of $22.4\%$ and a top-5 accuracy of $43.3\%$ , significantly outperforming LaViLa-B and EMBED-B.
|
| 156 |
+
|
| 157 |
+
On the EGTEA dataset known for its focus on hand-object interactions, our EgoVideo-B achieves a mean accuracy of $43.6\%$ and a top-1 accuracy of $51.0\%$ , surpassing EMBED-B and even EMBED-L. This underscores the importance of learning hand object dynamics and shows the strong generalization capability of our model. The EgoMCQ task further highlights the efficacy of our method, with EgoVideo-B outperforming LaViLa-B by $4.7\%$ , $1.2\%$ and EMBED-B's by $3.3\%$ and $0.5\%$ on the inter-class and intra-class accuracy, respectively. Our EgoVideo-L model also shows significant improvements with an inter-class accuracy of $65.5\%$ and an intra-class accuracy of $95.9\%$ . These results demonstrate the superior performance and generalization capability of our method without any additional supervision. We take a step forward to explore the scaling law in egocentric representation learning, finding that EgoVideo-G has elevated performance to the next level.
|
| 158 |
+
|
| 159 |
+
Fine-tuning Evaluation. Table 3 shows the result of the fine-tuning evaluation. Our EgoVideo method outperforms previous approaches across all tasks and datasets. Our EgoVideo-B demonstrates significant performance enhancements compared to EgoVLPv2-B, with improvements of $5.4\%$ and $2.5\%$ in mAP for the EK-100 MIR and EgoMCQ tasks, respectively. This performance is even comparable to the larger LaViLa-L. For our EgoVideo-L, we observe consistent improvements across all tasks, including a substantial enhancement by $1.8\%$ and $3.1\%$ in EK-100 CLS and EGTEA action recognition tasks, highlighting the superior performance of our model in fine-grained action understanding. Moreover, we achieve improvements of $0.4\%$ in R1@0.5 in the EgoNLQ task and $2.7\%$ and $2.0\%$ in R1@0.5 and mAP in the EgoMQ task, confirming the richness of representations learned by our model and its capacity to capture intricate hand-object interaction information.
|
| 160 |
+
|
| 161 |
+
# 4.3 ABLATION STUDIES
|
| 162 |
+
|
| 163 |
+
Pretraining Data. We first conduct experiments by fixing the models and varying the pretraining data. Here we choose to use AVION for fair comparison since both AVION and EgoVideo use ViT as the backbone. As shown in Table 4, both our EgoVideo and AVION achieve the best performance
|
| 164 |
+
|
| 165 |
+
when the combination of Ego4D-HOD data and How2-HOD data is used, and EgoVideo consistently outperforms AVION when trained on the same data, emphasizing the effectiveness of the model design. Comparing models trained with EgoClip and Ego4D-HOD (rows 1,2 and 5,6), it is clear that significant improvements can be observed in the EK-100 MIR and EGTEA tasks. Adding additional data from How2-HOD can improve both models substantially (rows 1,3 and 5,7). Furthermore, when using only Ego4D-HOD, the performance on EGTEA surpasses EgoClip and How2-HOD together, indicating the beneficial impact of our data on fine-grained dynamics understanding.
|
| 166 |
+
|
| 167 |
+
Table 4: Ablations on different pretrain datasets, include original EgoClip (Zhao & Krahenbuhl, 2023), Ego4D-HOD and How2-HOD selected by our classifier from How2Interlink-7M.
|
| 168 |
+
|
| 169 |
+
<table><tr><td rowspan="2">ID</td><td rowspan="2">Model</td><td rowspan="2">Ego4D-EgoClip</td><td rowspan="2">Ego4D-HOD</td><td rowspan="2">How2-HOD</td><td colspan="2">EK-100 MIR</td><td colspan="2">EGTEA</td></tr><tr><td>mAP</td><td>nDCG</td><td>Mean-acc.</td><td>Top1-acc.</td></tr><tr><td>1</td><td rowspan="4">AVION-B</td><td>✓</td><td></td><td></td><td>27.3</td><td>29.3</td><td>26.2</td><td>30.5</td></tr><tr><td>2</td><td></td><td>✓</td><td></td><td>31.0(+3.7)</td><td>31.3(+2.0)</td><td>32.3(+6.1)</td><td>37.0(+6.5)</td></tr><tr><td>3</td><td>✓</td><td></td><td>✓</td><td>33.2(+5.9)</td><td>32.5(+3.2)</td><td>31.6(+5.4)</td><td>35.6(+5.1)</td></tr><tr><td>4</td><td></td><td>✓</td><td>✓</td><td>34.4(+7.1)</td><td>33.7(+4.4)</td><td>39.4(+13.2)</td><td>46.4(+15.9)</td></tr><tr><td>5</td><td rowspan="4">EgoVideo-B</td><td>✓</td><td></td><td></td><td>31.1</td><td>32.0</td><td>30.8</td><td>36.0</td></tr><tr><td>6</td><td></td><td>✓</td><td></td><td>34.4(+3.3)</td><td>33.9(+1.9)</td><td>41.1(+10.3)</td><td>47.9(+11.9)</td></tr><tr><td>7</td><td>✓</td><td></td><td>✓</td><td>35.5(+4.4)</td><td>34.1(+2.1)</td><td>40.8(+10.0)</td><td>47.1(+11.1)</td></tr><tr><td>8</td><td></td><td>✓</td><td>✓</td><td>36.5(+5.4)</td><td>34.5(+2.5)</td><td>43.6(+12.8)</td><td>51.0(+15.0)</td></tr></table>
|
| 170 |
+
|
| 171 |
+
Table 5: Comparison of the number of parameters.
|
| 172 |
+
|
| 173 |
+
<table><tr><td>Method</td><td>Backbone</td><td>Params</td><td>EK100
|
| 174 |
+
mAP</td></tr><tr><td>LaViLa-B</td><td>TSF-B</td><td>121M</td><td>30.9</td></tr><tr><td>AVION-B</td><td>ViT-B</td><td>86M</td><td>32.9</td></tr><tr><td>EMBED-B</td><td>TSF-B</td><td>121M</td><td>36.0</td></tr><tr><td>EgoVideo-B</td><td>ViT-B</td><td>112M</td><td>36.5</td></tr><tr><td>LaViLa-L</td><td>TSF-L</td><td>438M</td><td>36.1</td></tr><tr><td>AVION-L</td><td>ViT-L</td><td>307M</td><td>37.6</td></tr><tr><td>EMBED-L</td><td>TSF-L</td><td>438M</td><td>40.8</td></tr><tr><td>EgoVideo-L</td><td>ViT-L</td><td>375M</td><td>41.8</td></tr><tr><td>EgoVideo-G</td><td>ViT-G</td><td>1050M</td><td>47.1</td></tr></table>
|
| 175 |
+
|
| 176 |
+
Table 6: The computational cost during inference. Views = #frames × #spatial crops × #temporal clips. "Extra GFLOPs" means extra computation compared to ViT.
|
| 177 |
+
|
| 178 |
+
<table><tr><td>Method</td><td>Views</td><td>GFLOPs</td><td>Extra GFLOPs</td></tr><tr><td>ViT-B</td><td>4 × 1 × 3</td><td>201</td><td>-</td></tr><tr><td>ViT-B</td><td>16 × 1 × 3</td><td>804</td><td>-</td></tr><tr><td>LaViLa-B</td><td>16 × 1 × 3</td><td>1432</td><td></td></tr><tr><td>EgoVideo-B</td><td>16 × 1 × 3</td><td>1092</td><td>288</td></tr><tr><td>ViT-L</td><td>4 × 1 × 3</td><td>1047</td><td>-</td></tr><tr><td>ViT-L</td><td>16 × 1 × 3</td><td>4188</td><td>-</td></tr><tr><td>LaViLa-L</td><td>16 × 1 × 3</td><td>4956</td><td></td></tr><tr><td>EgoVideo-L</td><td>16 × 1 × 3</td><td>5350</td><td>1162</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Model Size and Inference Computational Cost. Table 5 compares the number of parameters. Our EgoVideo model maintains a relatively small parameter count, and even with the addition of the motion adapter, the total remains lower than that of LaViLa and EMBED, highlighting the efficiency of our approach. Meanwhile, in Table 6 we compare the inference computational cost of our EgoVideo with ViT and LaViLa. Thanks to our MotionAdapter, the increase in inference time for our model compared to ViT at 16 frames, is only similar to ViT's inference time at 4 frames.
|
| 181 |
+
|
| 182 |
+
Training Efficiency. In Table 7, we compare the performance and computational speed of our EgoVideo-B with AVION-B, where AVION-B is trained under two different parameter settings: pre-training with 16 frames and pre-training with 4 frames. Our EgoVideo is trained in a mixed 16 and 4 frame fashion, thus being faster than directly using all 16 frames to train the whole backbone. Meanwhile, EgoVideo achieves the best performance on the EK-100 MIR and EGTEA datasets. These results strongly demonstrate the effectiveness of our training strategy and motion adapter design in the EgoVideo model.
|
| 183 |
+
|
| 184 |
+
Motion Adapter vs Other Adapters. We compare our motion adapter with the standard Adapter (Houlsby et al., 2019) and the ST-adapter (Pan et al., 2022). In the standard adapter, we only use a downsample MLP and upsample MLP, while in the ST-adapter, we perform convolution operations solely along the temporal dimension. As shown in Table 8, the results on the EK-100 MIR
|
| 185 |
+
|
| 186 |
+
task demonstrate that both the ST-adapter and our motion adapter outperform the standard adapter. This improvement can be attributed to the limited parameters of the standard adapter, which restrict its ability to capture complex, fine-grained information. Compared to the ST-adapter, our Motion Adapter achieves the best performance by adding a spatial convolution operation, suggesting that both spatial and temporal information are crucial for egocentric video representation learning.
|
| 187 |
+
|
| 188 |
+
Table 7: Ablations on training strategy. Models are all trained on our Ego4D-HOD dataset with 10 epochs.
|
| 189 |
+
|
| 190 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">GPU Hours</td><td colspan="2">EK-100 MIR</td><td rowspan="2">EGTEA Top1-acc.</td></tr><tr><td>mAP</td><td>nDCG</td></tr><tr><td>AVION-4f</td><td>95.5</td><td>34.4</td><td>33.7</td><td>46.4</td></tr><tr><td>AVION-16f</td><td>395.5</td><td>36.2</td><td>34.3</td><td>47.4</td></tr><tr><td>EgoVideo</td><td>180.6</td><td>36.5</td><td>34.5</td><td>51.0</td></tr></table>
|
| 191 |
+
|
| 192 |
+
Table 8: Experiment with different adapters. Our motion adapter achieves the best performance with a small increase in parameters.
|
| 193 |
+
|
| 194 |
+
<table><tr><td rowspan="2">Design</td><td rowspan="2">Param Size</td><td colspan="2">EK-100 MIR</td></tr><tr><td>mAP</td><td>nDCG</td></tr><tr><td>Adapter</td><td>8.28M</td><td>34.7</td><td>33.0</td></tr><tr><td>ST-adapter</td><td>10.08M</td><td>35.9</td><td>34.1</td></tr><tr><td>Motion Adapter</td><td>26.01M</td><td>36.5</td><td>34.5</td></tr></table>
|
| 195 |
+
|
| 196 |
+
# 4.4 FEATURE-BASED EVALUATION ON OTHER TASKS
|
| 197 |
+
|
| 198 |
+
With the knowledge of hand-object dynamics, EgoVideo features can well generalize to other human behavior understanding tasks and robot manipulation tasks. Table 9 shows the results of Action Segmentation on the HOI4D (Liu et al., 2022) and GTEA (Fathi et al., 2011a) datasets, using features extracted from I3D (Carreira & Zisserman, 2017), AVION, and our EgoVideo. The results demonstrate our EgoVideo is also effective in the action segmentation task, especially for HOI4D which requires differentiating fine-grained hand-object interaction.
|
| 199 |
+
|
| 200 |
+
Also, we test the generalization capability of EgoVideo on the robot manipulation task on the Franka Kitchen dataset (Gupta et al., 2019). We follow the same setting and compare with previous robotic representation learning works MVP (Radosavovic et al., 2023), Voltron (Karamcheti et al., 2023) and MPI (Zeng et al., 2024). For MPI we compare both MPI with and without additional detection supervision. From Table 10, our EgoVideo can consistently surpass MVP and Voltron on the "Turn knob (TK)", "Open Microwave (OM)" and "Open door (OD)" tasks. While MPI uses additional detection and prediction transformers and performs better than EgoVideo on two tasks, EgoVideo still performs comparably in overall success rate. Complete results with more details and analyses can be seen in Appendix E. The results strongly prove the delicacy and generalization ability of our EgoVideo learned representations.
|
| 201 |
+
|
| 202 |
+
Table 9: Experiment on the action segmentation task. We report results of ASFormer (Yi et al., 2021) with different input features.
|
| 203 |
+
|
| 204 |
+
<table><tr><td rowspan="2">Feature</td><td colspan="2">HOI4D</td><td colspan="2">GTEA</td></tr><tr><td>F1@50</td><td>Edit</td><td>F1@50</td><td>Edit</td></tr><tr><td>I3D</td><td>35.0</td><td>80.3</td><td>79.2</td><td>84.6</td></tr><tr><td>AVION</td><td>70.2</td><td>89.1</td><td>84.5</td><td>89.4</td></tr><tr><td>EgoVideo</td><td>74.8</td><td>90.1</td><td>87.1</td><td>90.1</td></tr></table>
|
| 205 |
+
|
| 206 |
+
Table 10: Results on Franka Kitchen. We report the success rate $(\%)$ on 50 sampled trajectories.
|
| 207 |
+
|
| 208 |
+
<table><tr><td>Method</td><td>TK</td><td>OM</td><td>OD</td><td>Avg.</td></tr><tr><td>MVP</td><td>79.0</td><td>41.0</td><td>48.0</td><td>56.0</td></tr><tr><td>Voltron</td><td>76.0</td><td>41.0</td><td>45.3</td><td>54.1</td></tr><tr><td>MPI</td><td>85.5</td><td>49.0</td><td>52.5</td><td>62.3</td></tr><tr><td>EgoVideo</td><td>80.1</td><td>65.0</td><td>52.7</td><td>66.0</td></tr><tr><td>MPI+Det</td><td>89.0</td><td>54.0</td><td>57.7</td><td>66.9</td></tr></table>
|
| 209 |
+
|
| 210 |
+
# 5 CONCLUSION
|
| 211 |
+
|
| 212 |
+
In this work, we inject fine-grained hand-object dynamics into egocentric video representation learning. Our method addresses the drawbacks of the existing method from two perspectives. On the data side, we propose HOD, a novel framework to generate new paired video-language data, where the language contains intricately depicted hand-object dynamics. On the model side, we propose EgoVideo, where we use a model with a motion adapter combined with a co-training technique, to fully exploit the fine-grained dynamics provided by HOD data in the representation learning process. Experimental results demonstrate that our method achieves state-of-the-art performance across multiple downstream tasks, and can generalize in the embodied manipulation environment.
|
| 213 |
+
|
| 214 |
+
Acknowledgement This work is funded in part by the National Key R&D Program of China (2022ZD0160201), and Shanghai Artificial Intelligence Laboratory, and JSPS KAKENHI Grant Number JP22KF0119.
|
| 215 |
+
|
| 216 |
+
# REFERENCES
|
| 217 |
+
|
| 218 |
+
01. AI, :, Alex Young, Bei Chen, Chao Li, Chengen Huang, Ge Zhang, Guanwei Zhang, Heng Li, Jiangcheng Zhu, Jianqun Chen, Jing Chang, Kaidong Yu, Peng Liu, Qiang Liu, Shawn Yue, Senbin Yang, Shiming Yang, Tao Yu, Wen Xie, Wenhao Huang, Xiaohui Hu, Xiaoyi Ren, Xinyao Niu, Pengcheng Nie, Yuchi Xu, Yudong Liu, Yue Wang, Yuxuan Cai, Zhenyu Gu, Zhiyuan Liu, and Zonghong Dai. Yi: Open foundation models by 01.ai, 2024.
|
| 219 |
+
Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, volume 2, pp. 4, 2021.
|
| 220 |
+
Samarth Brahmbhatt, Chengcheng Tang, Christopher D Twigg, Charles C Kemp, and James Hays. Contactpose: A dataset of grasps with object contact and hand pose. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XIII 16, pp. 361-378. Springer, 2020.
|
| 221 |
+
Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 961-970, 2015.
|
| 222 |
+
Yujun Cai, Liuhao Ge, Jianfei Cai, and Junsong Yuan. Weakly-supervised 3d hand pose estimation from monocular rgb images. In Proceedings of the European conference on computer vision (ECCV), pp. 666-682, 2018.
|
| 223 |
+
Zhe Cao, Ilija Radosavovic, Angjoo Kanazawa, and Jitendra Malik. Reconstructing hand-object interactions in the wild. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 12417-12426, 2021.
|
| 224 |
+
Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6299-6308, 2017.
|
| 225 |
+
Guo Chen, Yifei Huang, Jilan Xu, Baoqi Pei, Zhe Chen, Zhiqi Li, Jiahao Wang, Kunchang Li, Tong Lu, and Limin Wang. Video mamba suite: State space model as a versatile alternative for video understanding. arXiv preprint arXiv:2403.09626, 2024.
|
| 226 |
+
Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonio Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. Scaling egocentric vision: The epic-kitchens dataset. In Proceedings of the European conference on computer vision (ECCV), pp. 720-736, 2018.
|
| 227 |
+
Dima Damen, Hazel Doughty, Giovanni Maria Farinella, Sanja Fidler, Antonio Furnari, Evangelos Kazakos, Davide Moltisanti, Jonathan Munro, Toby Perrett, Will Price, et al. The epic-kitchens dataset: Collection, challenges and baselines. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(11):4125-4141, 2020.
|
| 228 |
+
Ning Ding, Yujia Qin, Guang Yang, Fuchao Wei, Zonghan Yang, Yusheng Su, Shengding Hu, Yulin Chen, Chi-Min Chan, Weize Chen, et al. Parameter-efficient fine-tuning of large-scale pre-trained language models. Nature Machine Intelligence, 5(3):220-235, 2023.
|
| 229 |
+
Bardia Doosti, Shujon Naha, Majid Mirbagheri, and David J Crandall. Hope-net: A graph-based model for hand-object pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 6608-6617, 2020.
|
| 230 |
+
Zi-Yi Dou, Xitong Yang, Tushar Nagarajan, Huiyu Wang, Jing Huang, Nanyun Peng, Kris Kitani, and Fu-Jen Chu. Unlocking exocentric video-language data for egocentric video representation learning. arXiv preprint arXiv:2408.03567, 2024.
|
| 231 |
+
|
| 232 |
+
Yazan Abu Farha and Jurgen Gall. Ms-tcn: Multi-stage temporal convolutional network for action segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 3575-3584, 2019.
|
| 233 |
+
Alireza Fathi, Ali Farhadi, and James M Rehg. Understanding egocentric activities. In 2011 international conference on computer vision, pp. 407-414. IEEE, 2011a.
|
| 234 |
+
Alireza Fathi, Xiaofeng Ren, and James M Rehg. Learning to recognize objects in egocentric activities. In CVPR 2011, pp. 3281-3288. IEEE, 2011b.
|
| 235 |
+
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 6202-6211, 2019.
|
| 236 |
+
Rohit Girdhar and Kristen Grauman. Anticipative video transformer. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 13505-13515, 2021.
|
| 237 |
+
Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18995-19012, 2022.
|
| 238 |
+
Abhishek Gupta, Vikash Kumar, Corey Lynch, Sergey Levine, and Karol Hausman. Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning. arXiv preprint arXiv:1910.11956, 2019.
|
| 239 |
+
Yana Hasson, Gul Varol, Dimitrios Tzionas, Igor Kalevatykh, Michael J Black, Ivan Laptev, and Cordelia Schmid. Learning joint reconstruction of hands and manipulated objects. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 11807-11816, 2019.
|
| 240 |
+
Yana Hasson, Bugra Tekin, Federica Bogo, Ivan Laptev, Marc Pollefeys, and Cordelia Schmid. Leveraging photometric consistency over time for sparsely supervised hand-object reconstruction. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 571-580, 2020.
|
| 241 |
+
Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International conference on machine learning, pp. 2790-2799. PMLR, 2019.
|
| 242 |
+
Yifei Huang, Minjie Cai, Zhenqiang Li, and Yoichi Sato. Predicting gaze in egocentric video by learning task-dependent attention transition. In European Conference on Computer Vision, 2018.
|
| 243 |
+
Yifei Huang, Minjie Cai, Zhenqiang Li, Feng Lu, and Yoichi Sato. Mutual context network for jointly estimating egocentric gaze and action. IEEE Transactions on Image Processing, 29:7795-7806, 2020a.
|
| 244 |
+
Yifei Huang, Yusuke Sugano, and Yoichi Sato. Improving action segmentation via graph-based temporal reasoning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 14024-14034, 2020b.
|
| 245 |
+
Yifei Huang, Guo Chen, Jilan Xu, Mingfang Zhang, Lijin Yang, Baoqi Pei, Hongjie Zhang, Lu Dong, Yali Wang, Limin Wang, et al. Egoxeolearn: A dataset for bridging asynchronous ego-and exo-centric view of procedural activities in real world. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 22072-22086, 2024a.
|
| 246 |
+
Yifei Huang, Jilan Xu, Baoqi Pei, Yuping He, Guo Chen, Lijin Yang, Xinyuan Chen, Yaohui Wang, Zheng Nie, Jinyao Liu, et al. Vinci: A real-time embodied smart assistant based on egocentric vision-language model. arXiv preprint arXiv:2412.21080, 2024b.
|
| 247 |
+
Md Mohaiminul Islam, Ngan Ho, Xitong Yang, Tushar Nagarajan, Lorenzo Torresani, and Gedas Bertasius. Video recap: Recursive captioning of hour-long videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 18198-18208, 2024.
|
| 248 |
+
|
| 249 |
+
Hanwen Jiang, Shaowei Liu, Jiashun Wang, and Xiaolong Wang. Hand-object contact consistency reasoning for human grasps generation. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 11107-11116, 2021.
|
| 250 |
+
Siddharth Karamcheti, Suraj Nair, Annie S Chen, Thomas Kollar, Chelsea Finn, Dorsa Sadigh, and Percy Liang. Language-driven representation learning for robotics. arXiv preprint arXiv:2302.12766, 2023.
|
| 251 |
+
Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017.
|
| 252 |
+
Shijie Li, Yazan Abu Farha, Yun Liu, Ming-Ming Cheng, and Juergen Gall. Ms-tcn++: Multi-stage temporal convolutional network for action segmentation. IEEE transactions on pattern analysis and machine intelligence, 45(6):6647-6658, 2020.
|
| 253 |
+
Yanghao Li, Tushar Nagarajan, Bo Xiong, and Kristen Grauman. Ego-exo: Transferring visual representations from third-person to first-person videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6943-6953, 2021.
|
| 254 |
+
Yin Li, Miao Liu, and James M Rehg. In the eye of beholder: Joint learning of gaze and actions in first person video. In Proceedings of the European conference on computer vision (ECCV), pp. 619-635, 2018.
|
| 255 |
+
Kevin Qinghong Lin, Jinpeng Wang, Mattia Soldan, Michael Wray, Rui Yan, Eric Z Xu, Difei Gao, Rong-Cheng Tu, Wenzhe Zhao, Weijie Kong, et al. Egocentric video-language pretraining. Advances in Neural Information Processing Systems, 35:7575-7586, 2022.
|
| 256 |
+
Shaowei Liu, Hanwen Jiang, Jiarui Xu, Sifei Liu, and Xiaolong Wang. Semi-supervised 3d hand-object poses estimation with interactions in time. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14687-14697, 2021.
|
| 257 |
+
Yunze Liu, Yun Liu, Che Jiang, Kangbo Lyu, Weikang Wan, Hao Shen, Boqiang Liang, Zhoujie Fu, He Wang, and Li Yi. Hoi4d: A 4d egocentric dataset for category-level human-object interaction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 21013-21022, 2022.
|
| 258 |
+
Mi Luo, Zihui Xue, Alex Dimakis, and Kristen Grauman. Put myself in your shoes: Lifting the egocentric perspective from exocentric videos. arXiv preprint arXiv:2403.06351, 2024.
|
| 259 |
+
Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE/CVF international conference on computer vision, pp. 2630-2640, 2019.
|
| 260 |
+
Suraj Nair, Aravind Rajeswaran, Vikash Kumar, Chelsea Finn, and Abhinav Gupta. R3m: A universal visual representation for robot manipulation. arXiv preprint arXiv:2203.12601, 2022.
|
| 261 |
+
Takehiko Ohkawa, Kun He, Fadime Sener, Tomas Hodan, Luan Tran, and Cem Keskin. Assemblyhands: Towards egocentric activity understanding via 3d hand pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 12999-13008, 2023.
|
| 262 |
+
Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018.
|
| 263 |
+
R OpenAI. Gpt-4 technical report. arxiv 2303.08774. View in Article, 2(5), 2023.
|
| 264 |
+
Junting Pan, Ziyi Lin, Xiatian Zhu, Jing Shao, and Hongsheng Li. St-adapter: Parameter-efficient image-to-video transfer learning. Advances in Neural Information Processing Systems, 35:26462-26477, 2022.
|
| 265 |
+
Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Yuheng Carl Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 20133-20143, 2023.
|
| 266 |
+
|
| 267 |
+
Baoqi Pei, Guo Chen, Jilan Xu, Yuping He, Yicheng Liu, Kanghua Pan, Yifei Huang, Yali Wang, Tong Lu, Limin Wang, et al. Egovieo: Exploring egocentric foundation model and downstream adaptation. arXiv preprint arXiv:2406.18070, 2024.
|
| 268 |
+
Chiara Plizzari, Mirco Planamente, Gabriele Goletto, Marco Cannici, Emanuele Gusso, Matteo Matteucci, and Barbara Caputo. E2 (go) motion: Motion augmented event stream for egocentric action recognition. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 19935-19947, 2022.
|
| 269 |
+
Chiara Plizzari, Gabriele Goletto, Antonino Furnari, Siddhant Bansal, Francesco Ragusa, Giovanni Maria Farinella, Dima Damen, and Tatiana Tommasi. An outlook into the future of egocentric vision. International Journal of Computer Vision, pp. 1-57, 2024.
|
| 270 |
+
Shraman Pramanick, Yale Song, Sayan Nag, Kevin Qinghong Lin, Hardik Shah, Mike Zheng Shou, Rama Chellappa, and Pengchuan Zhang. Egovlpv2: Egocentric video-language pre-training with fusion in the backbone. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5285-5297, 2023.
|
| 271 |
+
Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019.
|
| 272 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021.
|
| 273 |
+
Ilija Radosavovic, Tete Xiao, Stephen James, Pieter Abbeel, Jitendra Malik, and Trevor Darrell. Real-world robot learning with masked visual pre-training. In Conference on Robot Learning, pp. 416-426. PMLR, 2023.
|
| 274 |
+
Snehanshu Saha, Archana Mathur, Aditya Pandey, and Harshith Arun Kumar. Diffact: A unifying framework for activation functions. In 2021 International Joint Conference on Neural Networks (IJCNN), pp. 1-8. IEEE, 2021.
|
| 275 |
+
Rico Sennrich. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909, 2015.
|
| 276 |
+
Dandan Shan, Jiaqi Geng, Michelle Shu, and David F Fouhey. Understanding human hands in contact at internet scale. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9869-9878, 2020.
|
| 277 |
+
Sanjana Srivastava, Chengshu Li, Michael Lingelbach, Roberto Martín-Martín, Fei Xia, Kent Elliott Vainio, Zheng Lian, Cem Gokmen, Shyamal Buch, Karen Liu, et al. Behavior: Benchmark for everyday household activities in virtual, interactive, and ecological environments. In Conference on robot learning, pp. 477-490. PMLR, 2022.
|
| 278 |
+
Alex Jinpeng Wang, Linjie Li, Kevin Qinghong Lin, Jianfeng Wang, Kevin Lin, Zhengyuan Yang, Lijuan Wang, and Mike Zheng Shou. Cosmo: Contrastive streamlined multimodal model with interleaved pre-training. arXiv preprint arXiv:2401.00849, 2024a.
|
| 279 |
+
Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Internvideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024b.
|
| 280 |
+
Zhen Xing, Qi Dai, Zejia Weng, Zuxuan Wu, and Yu-Gang Jiang. Aid: Adapting image2video diffusion models for instruction-guided video prediction. arXiv preprint arXiv:2406.06465, 2024.
|
| 281 |
+
Jilan Xu, Yifei Huang, Junlin Hou, Guo Chen, Yuejie Zhang, Rui Feng, and Weidi Xie. Retrievalaugmented egocentric video captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13525-13536, 2024.
|
| 282 |
+
|
| 283 |
+
Hongwei Xue, Tiankai Hang, Yanhong Zeng, Yuchong Sun, Bei Liu, Huan Yang, Jianlong Fu, and Baining Guo. Advancing high-resolution video-language representation with large-scale video transcriptions. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5036-5045, 2022.
|
| 284 |
+
Linlin Yang and Angela Yao. Disentangling latent hands for image synthesis and pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 9877-9886, 2019.
|
| 285 |
+
Fangqiu Yi, Hongyu Wen, and Tingting Jiang. Asformer: Transformer for action segmentation. arXiv preprint arXiv:2110.08568, 2021.
|
| 286 |
+
Shanxin Yuan, Guillermo Garcia-Hernando, Björn Stenger, Gyeongsik Moon, Ju Yong Chang, Kyoung Mu Lee, Pavlo Molchanov, Jan Kautz, Sina Honari, Liuhao Ge, et al. Depth-based 3d hand pose estimation: From current achievements to future goals. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2636-2645, 2018.
|
| 287 |
+
Jia Zeng, Qingwen Bu, Bangjun Wang, Wenke Xia, Li Chen, Hao Dong, Haoming Song, Dong Wang, Di Hu, Ping Luo, et al. Learning manipulation by predicting interaction. arXiv preprint arXiv:2406.00439, 2024.
|
| 288 |
+
Chuhan Zhang, Ankush Gupta, and Andrew Zisserman. Helping hands: An object-aware egocentric video recognition model. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 13901-13912, 2023.
|
| 289 |
+
Hao Zhang, Aixin Sun, Wei Jing, and Joey Tianyi Zhou. Span-based localizing network for natural language video localization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 6543-6554, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.585.
|
| 290 |
+
Lingzhi Zhang, Shenghao Zhou, Simon Stent, and Jianbo Shi. Fine-grained egocentric hand-object segmentation: Dataset, model, and applications. In European Conference on Computer Vision, pp. 127-145. Springer, 2022.
|
| 291 |
+
Chen Zhao, Ali K Thabet, and Bernard Ghanem. Video self-stitching graph network for temporal action localization. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 13658-13667, 2021.
|
| 292 |
+
Yue Zhao and Philipp Krahenbuhl. Training a large video model on a single machine in a day. arXiv preprint arXiv:2309.16669, 2023.
|
| 293 |
+
Yue Zhao, Ishan Misra, Philipp Kähenbuhl, and Rohit Girdhar. Learning video representations from large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 6586-6597, 2023.
|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
A DETAILS ABOUT HOD
|
| 297 |
+
|
| 298 |
+

|
| 299 |
+
Figure 5: Examples of egocentric video/ego-like video and non-ego video.
|
| 300 |
+
|
| 301 |
+

|
| 302 |
+
|
| 303 |
+
Data Selection Since our HOD involves data from not only Ego4D but also How2link-7M, we use a style classifier $\mathcal{P}$ to filter egocentric style videos from the How2link-7M dataset. Specifically, our style classifier employs a simple two-layer MLP architecture. We utilize InternVideo2 (Wang et al., 2024b) to extract video features from all videos of the How2-Interlink7M dataset. After that, we manually annotate 10,000 clips with positive and negative labels, where the positive label indicates this video is an egocentric video (or ego-like video). Examples of positive and negative labeled videos can be found in Figure 5. We randomly select $10\%$ of these clips to form the validation set. After training our classifier on the train set, we get $89\%$ accuracy on the validation set.
|
| 304 |
+
|
| 305 |
+
HOD Rephraser We use Yi-34B model to generate hand object dynamics narrations. The Yi-34B model is trained on a corpus of over 150,000 high-quality texts and its model weights are open-source, which has a high ranking among all existing open-source Large Language Models. We directly use the model without finetuning.
|
| 306 |
+
|
| 307 |
+
To generate reliable narrations, we need to convert the obtained hand-object information into appropriate texts. For the movement trajectories of hands and objects, we directly calculate the center points of the bounding boxes and perform normalization to get a sequence $L = ((w_0, h_0), (w_1, h_1), \ldots, (w_{15}, h_{15}))$ . To determine whether the object is contacted by the left hand or right hand separately, or contacted by both hands simultaneously, we use a generalized IoU function on the left-contact object and right-contact object. For IoU value greater than 0.9, we classify the object as being contacted by both hands.
|
| 308 |
+
|
| 309 |
+
Subsequently, we prompt the LLM with a system prompt of:
|
| 310 |
+
|
| 311 |
+
System Prompt
|
| 312 |
+
|
| 313 |
+
Now you are a captioning assistant, you need to generate hand object interaction caption and combine them with the origin narration. Given the origin narration of the video clip and spatial localization ([x, y]) of hands and objects in the clip, please help me describe the direction of motion of the left and right hands, their relative relationship to objects and whether they are touching or not. Do not
|
| 314 |
+
|
| 315 |
+
mention the pixel info. Two_hand_object means objects with two hands in contact, left_hand_object means objects with left hand in contact, right_hand_object means objects with right hand in contact.
|
| 316 |
+
|
| 317 |
+
Hand Object Dynamics
|
| 318 |
+
|
| 319 |
+
left_hand:((w_0,h_0),(w_1,h_1),...,(w_15,h_15))
|
| 320 |
+
|
| 321 |
+
right_hand:((w_0,h_0),(w_1,h_1),...,(w_15,h_15))
|
| 322 |
+
|
| 323 |
+
left_hand_object:((w_0,h_0),(w_1,h_1),..., (w_15,h_15))
|
| 324 |
+
|
| 325 |
+
right_hand_object:((w_0,h_0),(w_1,h_1),...,(w_15,h_15))
|
| 326 |
+
|
| 327 |
+
two_hand_object:((w_0,h_0),(w_1,h_1),...,(w_15,h_15))
|
| 328 |
+
|
| 329 |
+
origin narration: C takes a scissors.
|
| 330 |
+
|
| 331 |
+
System Prompt
|
| 332 |
+
|
| 333 |
+
Please help me summarize the direction of movement of the left hand, right hand, and objects, and generate a new caption based on the original caption. It is strictly forbidden to mention the frame number and spatial position coordinates in the description.
|
| 334 |
+
|
| 335 |
+
For the computational cost, it takes around 2 days to extract bounding boxes from all vision-language clips and 3 days to generate narrations with LLM using 32 A100 GPUs, resulting in a total of around 4000 GPU-hours.
|
| 336 |
+
|
| 337 |
+
Data Evaluation. Here, we provide a detailed explanation of our evaluation process. First, we prompted the LLM to generate new narrations with different verbs/nouns using the prompt:
|
| 338 |
+
|
| 339 |
+
Please help me modify the key verbs and nouns in this sentence to slightly alter its meaning while keeping the sentence structure largely unchanged. Just return the modified sentence to me. Ensure the semantic shift is minimal, such as changing one or two verbs and nouns.
|
| 340 |
+
|
| 341 |
+
Then we utilized GPT-4o as a judge to determine the quality of the narrations with the prompt:
|
| 342 |
+
|
| 343 |
+
You are a judge. There are 16 frames in the video, I have three captions and need your help to score the three captions based on three criteria: relevance, accuracy, and level of detail. The score ranges from 0 to 10, with a higher score indicating better quality of the caption. You can just answer me in the following format: First: score1, Second: score2, Third: score3. First caption: text1 Second caption: text2 Third caption: text3
|
| 344 |
+
|
| 345 |
+
As mentioned in the main manuscript, to further validate the quality of our HOD dataset, we utilize two standard unsupervised automatic metrics to evaluate the quality of narrations. We use the human narration as the ground truth and compare our HOD data with LaViLa-Narrator on METEOR and CIDEr scores. The results in Table 11 reveal that while our HOD data achieves a slightly lower METEOR score, it outperforms LaViLa-Narrator in CIDEr. This discrepancy arises because many LaViLa narrations closely mirror the original text, whereas our narrations incorporate additional dynamic information. Although our performance does not drastically exceed that of LaViLa, the results demonstrate that our narrations successfully retain the original semantic content.
|
| 346 |
+
|
| 347 |
+
Table 11: Comparison with LaViLa-Narrator on the narration quality.
|
| 348 |
+
|
| 349 |
+
<table><tr><td>Text</td><td>METEOR</td><td>CIDEr</td></tr><tr><td>LaViLa-Narrator</td><td>0.45</td><td>0.34</td></tr><tr><td>HOD</td><td>0.39</td><td>0.40</td></tr></table>
|
| 350 |
+
|
| 351 |
+
Limitations and future work. Our model relies on the quality of hand-object detection and the rephrasing of LLM, which may include error accumulation. In addition to reducing the error in the data construction, exploring how the hand-object dynamics can be better involved into language or in other formats is a promising direction for our future work.
|
| 352 |
+
|
| 353 |
+
# B DATASET DETAILS
|
| 354 |
+
|
| 355 |
+
Ego4D Ego4D (Grauman et al., 2022) contains 3,670 hours of egocentric videos with temporally dense narrations. Each narration has a timestamp and an associated free-form sentence. We follow previous works ((Zhao et al., 2023),(Lin et al., 2022)) to prepare the Ego4D dataset for vision-language pretraining. Specifically, we drop the narrations that either contain "#unsure"/"#Unsure" tags or are shorter than 4 words. This results in 4M video-text clip pairs.
|
| 356 |
+
|
| 357 |
+
Howto-Interlink7M Howto-Interlink7M (Wang et al., 2024a) contains 1M videos and 7M clips, which is part of the broader Howto100M dataset. Diverging from the original dataset, clips in Howto-Interlink7M have concise descriptions, and dense region captions and leverage GPT-4 to generate comprehensive summaries from detailed annotation. We use a classifier to select 3.3M vision-text pairs from the dataset.
|
| 358 |
+
|
| 359 |
+
EpicKitchens-100 The Epic-Kitchens-100 (EK-100) dataset (Damen et al., 2020; 2018) contains 100 hours of egocentric cooking videos. Each clip is annotated with a start and end timestamp, a short textual narration, and a verb and noun class that the narration belongs to. The action class can also be uniquely determined by combining the verb and the noun. In EpicKitchens-MIR, we use Mean Average Precision and normalized Discounted Cumulative Gain (nDCG) as evaluation metrics. In EpicKitchens-CLS, we use top-1 action accuracy and top-5 action accuracy as evaluation metrics.
|
| 360 |
+
|
| 361 |
+
EGTEA EGTEA (Li et al., 2018) contains 28 hours of cooking activities from 86 unique sessions of 32 subjects. In zero-shot evaluation, we compute the similarity score between every video embedding and the 106 text embeddings, and take the text embedding with the highest similarity score as the predicted class. In fine-tuning evaluation, we finetune the video encoder for action classification using the linear probing protocol.
|
| 362 |
+
|
| 363 |
+
GTEA The Georgia Tech Egocentric Activities (GTEA) dataset (Fathi et al., 2011b) consists of seven distinct types of everyday activities, including making a sandwich, preparing tea, and brewing coffee. Each of these activities is demonstrated by four different individuals, resulting in a total of 28 unique video recordings. Each video captures around 20 fine-grained action instances, such as "take
|
| 364 |
+
|
| 365 |
+
bread" or "pour ketchup," all occurring within approximately one minute. This dataset provides a comprehensive look at egocentric perspectives, making it an invaluable resource for research in activity recognition and human-computer interaction.
|
| 366 |
+
|
| 367 |
+
HOI4D The HOI4D dataset (Liu et al., 2022) represents a significant advancement in the study of category-level human-object interaction, offering a large-scale 4D egocentric resource enriched with detailed annotations. Comprising 2.4 million RGB-D egocentric video frames across more than 4,000 sequences, the dataset captures interactions performed by nine participants with 800 unique object instances spanning 16 categories within 610 diverse indoor environments. To foster advancements in category-level human-object interaction, HOI4D introduces three benchmarking tasks: semantic segmentation of 4D dynamic point cloud sequences, category-level object pose tracking, and egocentric action segmentation involving a variety of interaction targets.
|
| 368 |
+
|
| 369 |
+
Franka Kitchen The Franka Kitchen dataset (Gupta et al., 2019) is a comprehensive resource designed to facilitate research in robotic manipulation and human-robot interaction within a kitchen environment. This dataset comprises a diverse collection of videos showcasing a humanoid robot, Franka Emika Panda, performing various cooking tasks. The setup features a Franka robot with 9 degrees of freedom positioned within a kitchen environment equipped with various common household items, including a microwave, kettle, overhead light, cabinets, and an oven. This environment is designed for multitask objectives, requiring the robot to interact with these items to achieve specific goal configurations.
|
| 370 |
+
|
| 371 |
+
# C IMPLEMENTATION DETAILS
|
| 372 |
+
|
| 373 |
+
Pretraining Details We pre-train on the video-narration pairs generated by our HOD from Ego4D and How-InterLink7M. We use AdamW optimizer with betas = (0.9,0.999) for 15 epochs. We use different settings for different size models. For EgoVideo-B, we adopt a batch size of 128 over 16 GPUs with a fixed learning rate of 5e-5, For EgoVideo-L, we use a batch size of 32 over 16 GPUs with a fixed learning rate of 3e-5. For EgoVideo-G, we choose to use a batch size of 16 over 16 GPUs with a fixed learning rate of 1e-5. For input frames, we preprocess the frames by resizing the shorter side to 320 pixels, which accelerates the data loading speed. Subsequently, we applied a standard RandomResizedCrop function (Zhao & Krahenbuhl, 2023) with a scale parameter of (0.5, 1.0) to obtain the corresponding input frames.
|
| 374 |
+
|
| 375 |
+
Finetuning Details We finetune the downstream tasks using AdamW with $(\beta 1, \beta 2) = (0.9, 0.999)$ and weight decay of 0.05 with cosine annealing. Table 12 shows the hyperparameters details and in all tasks we use 8 GPUs for finetuning. During the training phase, we resize the shorter side of the video to 256 pixels and subsequently extract a $224 \times 224$ crop. During the testing phase, we scale the shorter side to 224 pixels and take a central $224 \times 224$ crop.
|
| 376 |
+
|
| 377 |
+
Table 12: Hyperparameters for Different Downstream Tasks
|
| 378 |
+
|
| 379 |
+
<table><tr><td>Task</td><td>Model Size</td><td>Epochs</td><td>LR_start</td><td>LR_end</td><td>Batch Size</td></tr><tr><td rowspan="3">EK100-MIR</td><td>EgoVideo-B</td><td>100</td><td>1e-6</td><td>1e-5</td><td>256</td></tr><tr><td>EgoVideo-L</td><td>70</td><td>5e-7</td><td>5e-6</td><td>64</td></tr><tr><td>EgoVideo-G</td><td>50</td><td>4e-7</td><td>4e-6</td><td>32</td></tr><tr><td rowspan="3">EK100-CLS</td><td>EgoVideo-B</td><td>100</td><td>1e-6</td><td>1e-5</td><td>256</td></tr><tr><td>EgoVideo-L</td><td>70</td><td>5e-7</td><td>5e-6</td><td>64</td></tr><tr><td>EgoVideo-G</td><td>60</td><td>4e-7</td><td>4e-6</td><td>32</td></tr><tr><td rowspan="3">EGTEA</td><td>EgoVideo-B</td><td>100</td><td>1e-6</td><td>1e-5</td><td>256</td></tr><tr><td>EgoVideo-L</td><td>70</td><td>7e-7</td><td>7e-6</td><td>64</td></tr><tr><td>EgoVideo-G</td><td>50</td><td>4e-7</td><td>4e-6</td><td>32</td></tr></table>
|
| 380 |
+
|
| 381 |
+
For the EgoNLQ task(Grauman et al., 2022), we build on the methodologies introduced by EgoVLP (Lin et al., 2022) and LAVILA (Zhao et al., 2023) for fairness. We adopt VSLNet (Zhang et al., 2020) as the task head. We train the task head for 50 epochs, using a learning rate of 3e-3, dropout
|
| 382 |
+
|
| 383 |
+
0.3, batch size 32 on a single A100 GPU. For the EgoMQ task, we use VSGN (Zhao et al., 2021) as our task head for training. We set batch size as 16, learning rate as 2e-4, gamma as 0.6, and train the task head on a single A100 GPU.
|
| 384 |
+
|
| 385 |
+
Table 13: Comparison of performance across different model sizes and vision-language datasets.
|
| 386 |
+
|
| 387 |
+
<table><tr><td rowspan="2">Model</td><td rowspan="2">Pretrain Data</td><td rowspan="2">Data Size</td><td colspan="2">EK-100 MIR</td></tr><tr><td>mAP</td><td>nDCG</td></tr><tr><td>EgoVideo-B</td><td>EgoClip</td><td>4M</td><td>31.1</td><td>32.0</td></tr><tr><td>EgoVideo-B</td><td>Ego4D-HOD</td><td>4M</td><td>34.4</td><td>33.9</td></tr><tr><td>EgoVideo-L</td><td>EgoClip</td><td>4M</td><td>35.3</td><td>34.6</td></tr><tr><td>EgoVideo-L</td><td>Ego4D-HOD</td><td>4M</td><td>38.3</td><td>35.9</td></tr><tr><td>EgoVideo-G</td><td>EgoClip</td><td>4M</td><td>42.1</td><td>37.5</td></tr><tr><td>EgoVideo-G</td><td>Ego4D-HOD</td><td>4M</td><td>44.8</td><td>38.2</td></tr></table>
|
| 388 |
+
|
| 389 |
+
# D ADDITIONAL ABLATIONS
|
| 390 |
+
|
| 391 |
+
Pretraining Data To further demonstrate the effectiveness of our HOD, we fix the amount of data and conduct experiments using different model sizes. As shown in Table 13, with the same model size and the same data size, using our Ego4D-HOD data can consistently achieve improvement. Since one sample in EgoClip corresponds strictly to on sample in Ego4D-HOD, this table strongly demonstrates the high quality of our HOD data.
|
| 392 |
+
|
| 393 |
+
<table><tr><td>Frame count</td><td>λ</td><td>EK MIR mAP</td></tr><tr><td>4</td><td>1</td><td>34.2</td></tr><tr><td>8</td><td>2</td><td>35.3</td></tr><tr><td>12</td><td>3</td><td>35.9</td></tr><tr><td>16</td><td>4</td><td>36.5</td></tr><tr><td>32</td><td>8</td><td>36.5</td></tr></table>
|
| 394 |
+
|
| 395 |
+
Table 14: Comparison of performance across different frame upsampling rate $\lambda$ .
|
| 396 |
+
|
| 397 |
+

|
| 398 |
+
Figure 6: Ablations on Downsampling Ratio $\gamma$ .
|
| 399 |
+
|
| 400 |
+
Adapter Downsampling Ratio We test the design of our motion adapter by studying the effect of adapter downsampling ratio $\gamma$ , and show the result in Figure 6. It can be observed that as the value of $\gamma$ increases, the model's performance continues to improve. This indicates that our generated narrations contain rich semantic information and further validates the effectiveness of our motion adapter. To reduce computational overhead, we ultimately decide to set $\gamma = 0.5$ .
|
| 401 |
+
|
| 402 |
+
Frame number We further study the effect of the number of sampled frames as input. We consistently use 4 frames as the sampling rate for the backbone part. The results in Table 14 indicate that as the number of frames increases from 4 to 16, the model's performance improves continuously from $34.2\%$ to $36.5\%$ . However, when the frame count reaches to 32, the performance plateaus, showing no significant improvement with further increases in frame count. Besides, increasing the number of frames beyond this point incurs substantial computational cost. As a result, we choose to use $\lambda = 4$ as the default value in our EgoVideo, balancing the speed and accuracy.
|
| 403 |
+
|
| 404 |
+
# E ADDITIONAL RESULTS
|
| 405 |
+
|
| 406 |
+
Table 15: Comparison with the state-of-the-art methods on 50Salads, GTEA and HOI4D dataset.
|
| 407 |
+
|
| 408 |
+
<table><tr><td rowspan="2">Feature</td><td rowspan="2">Method</td><td colspan="3">GTEA</td><td colspan="3">HOI4D</td></tr><tr><td>F1@10, 25, 50</td><td>Edit</td><td>Acc</td><td>F1@10, 25, 50</td><td>Edit</td><td>Acc</td></tr><tr><td>I3D</td><td>MS-TCN</td><td>85.8 / 83.4 / 69.8</td><td>79.0</td><td>76.3</td><td>55.6 / 47.8 / 31.8</td><td>74.7</td><td>44.2</td></tr><tr><td>I3D</td><td>MS-TCN++</td><td>88.8 / 85.7 / 76.0</td><td>83.5</td><td>80.1</td><td>54.7 / 46.5 / 30.3</td><td>75.2</td><td>42.2</td></tr><tr><td>I3D</td><td>ASFormer</td><td>90.1 / 88.8 / 79.2</td><td>84.6</td><td>79.7</td><td>-</td><td></td><td></td></tr><tr><td>I3D</td><td>DiffAct</td><td>92.5 / 91.5 / 84.7</td><td>89.6</td><td>82.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>AVION</td><td>ASFormer</td><td>92.5 / 91.0 / 84.5</td><td>89.4</td><td>81.4</td><td>84.4 / 81.1 / 70.2</td><td>89.2</td><td>74.2</td></tr><tr><td>EgoVideo</td><td>ASFormer</td><td>92.7 / 92.2 / 87.1</td><td>90.1</td><td>82.7</td><td>88.9 / 85.3 / 74.8</td><td>90.1</td><td>76.2</td></tr></table>
|
| 409 |
+
|
| 410 |
+
# E.1 DETAILS ON ACTION SEGMENTATION TASKS.
|
| 411 |
+
|
| 412 |
+
Action segmentation tests the representation on its understanding of the temporal dependencies of the video Huang et al. (2020b); Yi et al. (2021). We evaluate our model on two benchmark datasets: GTEA (Fathi et al., 2011b), and HOI4D (Liu et al., 2022). We follow the previous work to use four-fold cross-validations on both datasets. We use accuracy (Acc), the edit distance (Edit), and the F1 scores at overlap thresholds $10\%$ , $25\%$ , $50\%$ (F1@10, 25, 50) as metrics for evaluation.
|
| 413 |
+
|
| 414 |
+
We use ASFormer (Yi et al., 2021) as the task head, with input features extracted by our Egovideo, I3D (Carreira & Zisserman, 2017), and AVION (Zhao & Krahenbuhl, 2023). We follow (Chen et al., 2024), using learning rate $= 5\mathrm{e - }4$ , drop rate $= 0.3$ , epoch $= 100$ for training. Table 15 presents the experimental results of our method and other recent approaches, including MS-TCN (Farha & Gall, 2019), MS-TCN++ (Li et al., 2020), ASFormer (Yi et al., 2021), and DiffAct (Saha et al., 2021). The results clearly show the high quality of our EgoVideo feature. With the same task head ASFormer, the EgoVideo feature can outperform the AVION feature consistently. EgoVideo can even help ASFormer to beat the stronger task head DiffAct.
|
| 415 |
+
|
| 416 |
+
# E.2 DETAILS ON FRANKA KITCHEN DATASET.
|
| 417 |
+
|
| 418 |
+
Here we introduce the details of experiments on the Franka Kitchen dataset(Gupta et al., 2019). In this dataset, we adopt 3 tasks, including "Turn the stove top knob (TK)", "Open the microwave (OM)" and "Open the left door (OD)". The goal is to predict 9-DoF joint velocities (7 joints and 2 grippers) based on the visual representations and proprioceptive states (i.e., joint velocities). We follow the MPI mode (Zeng et al., 2024), which trains a shallow MLP policy network. For evaluation, we follow the R3M method (Nair et al., 2022) and (Karamcheti et al., 2023) and calculate the average success rates for each setting across the 3 tasks.
|
| 419 |
+
|
| 420 |
+
We compare our EgoVideo with MVP (Radosavovic et al., 2023), Voltron (Karamcheti et al., 2023) and MPI (Zeng et al., 2024). MVP learns representation for robot manipulation by masked image modeling. Voltron takes a step forward to combine masked image modeling with vision text alignment. MPI designs detection and prediction transformers to use object detection signals as additional guidance. Notably, these works also use Ego4D as training data.
|
| 421 |
+
|
| 422 |
+
The experimental results in Table 10 indicate that our model significantly outperforms both the MVP and Voltron models by more than $10\%$ in average success rate, and exhibits performance comparable to the more advanced MPI model, which integrates multiple pre-training tasks related to robot learning. When the MPI model is solely trained using contrastive learning and masked signal modeling as supervision, we achieve $3.7\%$ improvements in average success rate than MPI model. When MPI incorporates the video prediction task, which has been proven crucial for robot learning, our average success rate is only $0.9\%$ lower. This demonstrates the robust generalization capabilities of our model and highlights the contribution of our hand-object dynamics learning scheme to fine-grained hand operations. Figure 7 shows the qualitative results on turning on the knob, and opening the microwave and opening the door tasks.
|
| 423 |
+
|
| 424 |
+

|
| 425 |
+
Turn on the knob
|
| 426 |
+
|
| 427 |
+

|
| 428 |
+
Open microwave
|
| 429 |
+
|
| 430 |
+

|
| 431 |
+
Open door
|
| 432 |
+
Figure 7: Qualitative results on the Franka Kitchen dataset. We show the tasks of turning the stovetop knob, opening the microwave and opening the door. All tasks are trained with 25 demonstrations.
|
| 433 |
+
|
| 434 |
+
# F QUALITATIVE RESULTS
|
| 435 |
+
|
| 436 |
+
In Figure 8, we show more examples to compare narrations generated by our HOD with LaViLa Rephraser and the original narrations. We can observe that the narrations generated by our HOD model can well describe the hand-object dynamics (e.g., 'The left hand moves downwards to touch the bicycle tire'). Moreover, compared to the LaViLa rephraser, which often merely changes word order or modifies nouns/verbs, our model can combine original actions to generate more semantically rich descriptions of actions and scenes, resulting in significantly higher quality narrations. (See the first example: our HOD generates 'Person C picks a card with their right hand, which is then handed to their left hand.' while LaViLa yields '#C C chooses a card/#C C selects a card/#C C picks a card').
|
| 437 |
+
|
| 438 |
+

|
| 439 |
+
Figure 8: Comparison between: 1) the original Ego4D narrations; 2) LaViLa narrations; and 3) Narrations generated by our HOD. Our narration can describe the dynamic motion information of hands and objects, enhancing the semantic richness of the original narration.
|
data/2025/2503_00xxx/2503.00986/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:02fbec2858d93ddfe689f71eeb51747645e6d4d3b4f0220d99c0dba725beadb7
|
| 3 |
+
size 1115491
|
data/2025/2503_00xxx/2503.00986/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01006/4628333c-96b8-46e4-8a1e-35d1d5352495_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01006/4628333c-96b8-46e4-8a1e-35d1d5352495_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01006/4628333c-96b8-46e4-8a1e-35d1d5352495_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1707fd0d3312127e9372f3c24536bfeb30931df1969166a403868c178085dc5
|
| 3 |
+
size 2454850
|
data/2025/2503_01xxx/2503.01006/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01006/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:801514720fc3e53b70aa8aa9eecaaa2aa9fbfb07e61b6b7368ef7387acde096c
|
| 3 |
+
size 2012023
|
data/2025/2503_01xxx/2503.01006/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01013/203f0b74-b02f-43c0-886b-6f8f030e9940_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01013/203f0b74-b02f-43c0-886b-6f8f030e9940_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01013/203f0b74-b02f-43c0-886b-6f8f030e9940_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:abf844aba1a506bfe5bff237e5740982ce8d96159e2a93dd3e1de0b136b1144c
|
| 3 |
+
size 14908385
|
data/2025/2503_01xxx/2503.01013/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01013/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6fc96c5e3a0d2cdfb20cf96a885bac112ac7307c1e0e3a3d03c10fd8f4054a78
|
| 3 |
+
size 2270234
|
data/2025/2503_01xxx/2503.01013/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01067/691037fc-a94b-4747-9631-284615a2f840_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01067/691037fc-a94b-4747-9631-284615a2f840_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01067/691037fc-a94b-4747-9631-284615a2f840_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c67fa3bf0391b35860db7d0d349270b1f0d30a8e4c5a7c693811f86a45acf1cd
|
| 3 |
+
size 779995
|
data/2025/2503_01xxx/2503.01067/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01067/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fb31edf8b0f348b3eba4a5eeb211402e2c202ecf834f0b4caa466511eea54ac3
|
| 3 |
+
size 577275
|
data/2025/2503_01xxx/2503.01067/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01082/86c07b72-b761-4017-9dc2-5a173571c1cd_content_list.json
ADDED
|
@@ -0,0 +1,2147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Efficient or Powerful? Trade-offs Between Machine Learning and Deep Learning for Mental Illness Detection on Social Media",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
146,
|
| 8 |
+
225,
|
| 9 |
+
850,
|
| 10 |
+
272
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Zhanyi Ding $^{2}$ , Zhongyan Wang $^{2}$ , Yeyubei Zhang $^{1}$ , Yuchen Cao $^{5}$ , Yunchong Liu $^{1}$ , Xiaorui Shen $^{5}$ , Yexin Tian $^{3}$ , and Jianglai Dai $^{4}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
149,
|
| 19 |
+
311,
|
| 20 |
+
845,
|
| 21 |
+
359
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup>University of Pennsylvania, School of Engineering and Applied Science \n<sup>2</sup>New York University, Center for Data Science",
|
| 28 |
+
"bbox": [
|
| 29 |
+
186,
|
| 30 |
+
368,
|
| 31 |
+
810,
|
| 32 |
+
414
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "<sup>3</sup>Georgia Institute of Technology, College of Computing",
|
| 39 |
+
"bbox": [
|
| 40 |
+
253,
|
| 41 |
+
425,
|
| 42 |
+
739,
|
| 43 |
+
444
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "<sup>4</sup>University of California, Berkeley, Department of EECS",
|
| 50 |
+
"bbox": [
|
| 51 |
+
250,
|
| 52 |
+
454,
|
| 53 |
+
744,
|
| 54 |
+
472
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "$^{5}$ Northeastern University, Khoury College of Computer Science",
|
| 61 |
+
"bbox": [
|
| 62 |
+
223,
|
| 63 |
+
482,
|
| 64 |
+
771,
|
| 65 |
+
501
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Author Note",
|
| 72 |
+
"text_level": 1,
|
| 73 |
+
"bbox": [
|
| 74 |
+
431,
|
| 75 |
+
840,
|
| 76 |
+
564,
|
| 77 |
+
856
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Correspondence concerning this article should be addressed to Yuchen Cao, Northeastern University, E-mail: cao.yuch@northeastern.edu",
|
| 84 |
+
"bbox": [
|
| 85 |
+
115,
|
| 86 |
+
872,
|
| 87 |
+
878,
|
| 88 |
+
917
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "header",
|
| 94 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION 1",
|
| 95 |
+
"bbox": [
|
| 96 |
+
115,
|
| 97 |
+
36,
|
| 98 |
+
878,
|
| 99 |
+
52
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "aside_text",
|
| 105 |
+
"text": "arXiv:2503.01082v1 [cs.CL] 3 Mar 2025",
|
| 106 |
+
"bbox": [
|
| 107 |
+
21,
|
| 108 |
+
310,
|
| 109 |
+
60,
|
| 110 |
+
717
|
| 111 |
+
],
|
| 112 |
+
"page_idx": 0
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"type": "text",
|
| 116 |
+
"text": "Abstract",
|
| 117 |
+
"text_level": 1,
|
| 118 |
+
"bbox": [
|
| 119 |
+
452,
|
| 120 |
+
86,
|
| 121 |
+
544,
|
| 122 |
+
101
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 1
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "Social media platforms provide valuable insights into mental health trends by capturing user-generated discussions on conditions such as depression, anxiety, and suicidal ideation. Machine learning (ML) and deep learning (DL) models have been increasingly applied to classify mental health conditions from textual data, but selecting the most effective model involves trade-offs in accuracy, interpretability, and computational efficiency. This study evaluates multiple ML models, including logistic regression, random forest, and LightGBM, alongside deep learning architectures such as ALBERT and Gated Recurrent Units (GRUs), for both binary and multi-class classification of mental health conditions. Our findings indicate that ML and DL models achieve comparable classification performance on medium-sized datasets, with ML models offering greater interpretability through variable importance scores, while DL models are more robust to complex linguistic patterns. Additionally, ML models require explicit feature engineering, whereas DL models learn hierarchical representations directly from text. Logistic regression provides the advantage of capturing both positive and negative associations between features and mental health conditions, whereas tree-based models prioritize decision-making power through split-based feature selection. This study offers empirical insights into the advantages and limitations of different modeling approaches and provides recommendations for selecting appropriate methods based on dataset size, interpretability needs, and computational constraints.",
|
| 129 |
+
"bbox": [
|
| 130 |
+
114,
|
| 131 |
+
117,
|
| 132 |
+
882,
|
| 133 |
+
646
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 1
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "Keywords: Machine Learning, Deep Learning, Mental Health Detection, Social Media, Natural Language Processing, Model Interpretability, Feature Importance",
|
| 140 |
+
"bbox": [
|
| 141 |
+
115,
|
| 142 |
+
657,
|
| 143 |
+
880,
|
| 144 |
+
705
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 1
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "header",
|
| 150 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION 2",
|
| 151 |
+
"bbox": [
|
| 152 |
+
115,
|
| 153 |
+
36,
|
| 154 |
+
882,
|
| 155 |
+
52
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 1
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"text": "Efficient or Powerful? Trade-offs Between Machine Learning and Deep Learning for Mental Illness Detection on Social Media",
|
| 162 |
+
"text_level": 1,
|
| 163 |
+
"bbox": [
|
| 164 |
+
146,
|
| 165 |
+
85,
|
| 166 |
+
850,
|
| 167 |
+
133
|
| 168 |
+
],
|
| 169 |
+
"page_idx": 2
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"type": "text",
|
| 173 |
+
"text": "Introduction",
|
| 174 |
+
"text_level": 1,
|
| 175 |
+
"bbox": [
|
| 176 |
+
433,
|
| 177 |
+
149,
|
| 178 |
+
563,
|
| 179 |
+
166
|
| 180 |
+
],
|
| 181 |
+
"page_idx": 2
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"type": "text",
|
| 185 |
+
"text": "Social media has emerged as a vital platform for understanding mental health trends, offering researchers access to large-scale, real-time textual data reflecting personal experiences, emotional states, and psychological distress. Given the vast volume of user-generated content, researchers have increasingly turned to machine learning (ML) and deep learning (DL) approaches to automate mental health detection, leveraging natural language processing (NLP) techniques for feature extraction and classification. Platforms such as Twitter, Reddit, and Facebook have become key sources for analyzing mental health discussions, motivating the development of ML and DL models for early identification of psychological conditions.",
|
| 186 |
+
"bbox": [
|
| 187 |
+
114,
|
| 188 |
+
186,
|
| 189 |
+
882,
|
| 190 |
+
432
|
| 191 |
+
],
|
| 192 |
+
"page_idx": 2
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"type": "text",
|
| 196 |
+
"text": "Mental illnesses affect approximately one in eight individuals globally, with depression alone impacting over 280 million people (WHO, 2023). Early detection of these conditions is crucial for timely intervention, yet traditional diagnostic methods—such as clinical assessments and self-reported surveys—are resource-intensive and lack real-time insights (Kessler et al., 2017). Analyzing social media data presents an alternative, data-driven approach for mental health monitoring, enabling scalable detection of distress signals and behavioral patterns (De Choudhury et al., 2013; Guntuku et al., 2017). Advances in artificial intelligence (AI) and NLP have facilitated the application of ML and DL techniques for mental health classification, demonstrating promising results in various studies (Shatte et al., 2019).",
|
| 197 |
+
"bbox": [
|
| 198 |
+
114,
|
| 199 |
+
444,
|
| 200 |
+
884,
|
| 201 |
+
720
|
| 202 |
+
],
|
| 203 |
+
"page_idx": 2
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"type": "text",
|
| 207 |
+
"text": "Despite these advancements, several challenges remain. The effectiveness of ML and DL models is often hindered by dataset biases, inconsistencies in preprocessing techniques, and the reliance on imbalanced training data, all of which affect model generalizability (Cao et al., 2024; Hargittai, 2015; Helmy et al., 2024). Linguistic complexities—such as informal language, sarcasm, and context-dependent meanings—further complicate the accurate detection of mental health conditions in social media text (Calvo et al., 2017). Another critical issue is the trade-off between model performance and interpretability.",
|
| 208 |
+
"bbox": [
|
| 209 |
+
114,
|
| 210 |
+
730,
|
| 211 |
+
882,
|
| 212 |
+
920
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 2
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "header",
|
| 218 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION",
|
| 219 |
+
"bbox": [
|
| 220 |
+
115,
|
| 221 |
+
36,
|
| 222 |
+
857,
|
| 223 |
+
52
|
| 224 |
+
],
|
| 225 |
+
"page_idx": 2
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"type": "page_number",
|
| 229 |
+
"text": "3",
|
| 230 |
+
"bbox": [
|
| 231 |
+
865,
|
| 232 |
+
38,
|
| 233 |
+
878,
|
| 234 |
+
51
|
| 235 |
+
],
|
| 236 |
+
"page_idx": 2
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"type": "text",
|
| 240 |
+
"text": "Traditional ML models, such as logistic regression and random forests, provide interpretability through feature importance scores but may struggle with nuanced language understanding. In contrast, DL models, including transformer-based architectures (e.g., Bidirectional Encoder Representations from Transformers, BERT) and recurrent neural networks (e.g., Gated Recurrent Units, GRUs), excel at capturing linguistic patterns but function as black-box models, limiting transparency in decision-making.",
|
| 241 |
+
"bbox": [
|
| 242 |
+
114,
|
| 243 |
+
86,
|
| 244 |
+
884,
|
| 245 |
+
247
|
| 246 |
+
],
|
| 247 |
+
"page_idx": 3
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"type": "text",
|
| 251 |
+
"text": "While prior systematic reviews have explored ML and DL applications in mental health detection (Cao et al., 2024; Chen et al., 2025; Liu et al., 2024), there remains a need for an empirical evaluation that systematically compares model performance and interpretability across different classification tasks. This study addresses this gap by assessing ML and DL models in both binary and multiclass mental health classification settings using a publicly available dataset from Kaggle. The dataset includes various mental health conditions, such as depression, anxiety, stress, suicidal ideation, bipolar disorder, and personality disorders. Model performance is evaluated using weighted F1 score and area under the receiver operating characteristic curve (AUROC) to account for class imbalance. Additionally, we assess model interpretability through feature importance measures, including logistic regression coefficients, random forest Gini impurity reduction, and LightGBM gain-based ranking.",
|
| 252 |
+
"bbox": [
|
| 253 |
+
114,
|
| 254 |
+
258,
|
| 255 |
+
884,
|
| 256 |
+
590
|
| 257 |
+
],
|
| 258 |
+
"page_idx": 3
|
| 259 |
+
},
|
| 260 |
+
{
|
| 261 |
+
"type": "text",
|
| 262 |
+
"text": "By examining the trade-offs between model accuracy, interpretability, and computational efficiency, this study provides empirical insights into selecting appropriate models for mental health classification on social media. The remainder of this paper is organized as follows. Section Method describes the methodological framework, including data preparation, model development, and evaluation metrics. Section Results presents findings on dataset characteristics, model performance evaluation, and interpretability assessments. Finally, the Discussion and Conclusion sections summarize key insights, implications for mental health research, and directions for future work.",
|
| 263 |
+
"bbox": [
|
| 264 |
+
114,
|
| 265 |
+
601,
|
| 266 |
+
882,
|
| 267 |
+
820
|
| 268 |
+
],
|
| 269 |
+
"page_idx": 3
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"type": "text",
|
| 273 |
+
"text": "Method",
|
| 274 |
+
"text_level": 1,
|
| 275 |
+
"bbox": [
|
| 276 |
+
455,
|
| 277 |
+
835,
|
| 278 |
+
542,
|
| 279 |
+
853
|
| 280 |
+
],
|
| 281 |
+
"page_idx": 3
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"type": "text",
|
| 285 |
+
"text": "This section outlines the methodological framework of our study, covering data collection, preprocessing, model construction, and evaluation metrics. All experiments",
|
| 286 |
+
"bbox": [
|
| 287 |
+
114,
|
| 288 |
+
872,
|
| 289 |
+
882,
|
| 290 |
+
920
|
| 291 |
+
],
|
| 292 |
+
"page_idx": 3
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"type": "header",
|
| 296 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION",
|
| 297 |
+
"bbox": [
|
| 298 |
+
115,
|
| 299 |
+
36,
|
| 300 |
+
857,
|
| 301 |
+
52
|
| 302 |
+
],
|
| 303 |
+
"page_idx": 3
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"type": "page_number",
|
| 307 |
+
"text": "4",
|
| 308 |
+
"bbox": [
|
| 309 |
+
865,
|
| 310 |
+
39,
|
| 311 |
+
880,
|
| 312 |
+
51
|
| 313 |
+
],
|
| 314 |
+
"page_idx": 3
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"type": "text",
|
| 318 |
+
"text": "were conducted using Python 3, leveraging key libraries such as pandas for data processing, scikit-learn and lightgbm for traditional machine learning, PyTorch for deep learning, and Transformers for utilizing pre-trained language models. These tools facilitated efficient data handling, systematic hyperparameter tuning, and rigorous performance evaluation. All models were trained on Google Colab, utilizing a high-RAM configuration powered by an NVIDIA T4 GPU, which provided the computational efficiency required for computational tasks, especially DL models. The following sections detail each stage of our approach. Complete code for data preparation, model development, and evaluation is available on GitHub (the link will be provided upon acceptance).",
|
| 319 |
+
"bbox": [
|
| 320 |
+
114,
|
| 321 |
+
86,
|
| 322 |
+
884,
|
| 323 |
+
332
|
| 324 |
+
],
|
| 325 |
+
"page_idx": 4
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"type": "text",
|
| 329 |
+
"text": "Data Preparation",
|
| 330 |
+
"text_level": 1,
|
| 331 |
+
"bbox": [
|
| 332 |
+
115,
|
| 333 |
+
350,
|
| 334 |
+
294,
|
| 335 |
+
367
|
| 336 |
+
],
|
| 337 |
+
"page_idx": 4
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"type": "text",
|
| 341 |
+
"text": "An extensive and varied dataset is fundamental for effective mental health detection via machine learning. We employed the 'Sentiment Analysis for Mental Health' dataset available on Kaggle. This dataset amalgamates textual data from multiple sources that cover topics such as depression, anxiety, stress, bipolar disorder, personality disorders, and suicidal ideation. Data were primarily obtained from social media platforms like Reddit, Twitter, and Facebook, where individuals discuss personal experiences and mental health challenges. The data acquisition process involved using platform-specific APIs and web scraping, followed by removing duplicates, filtering out spam or irrelevant content, and standardizing mental health labels. Personal identifiers were also removed to adhere to ethical standards, resulting in a well-structured CSV file with unique identifiers for each entry. Despite its diversity, the dataset's varying demographics and language styles (e.g., slang and colloquialisms) present challenges for natural language processing. Our preprocessing pipeline was specifically designed to address these variations and balance class distributions as needed.",
|
| 342 |
+
"bbox": [
|
| 343 |
+
114,
|
| 344 |
+
387,
|
| 345 |
+
882,
|
| 346 |
+
774
|
| 347 |
+
],
|
| 348 |
+
"page_idx": 4
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"type": "text",
|
| 352 |
+
"text": "We applied a consistent preprocessing pipeline to ready the dataset for both traditional and deep learning models. Initially, we cleaned the text by removing extraneous elements such as URLs, HTML tags, mentions, hashtags, special characters, and extra whitespace. The text was then converted to lowercase to maintain consistency. Next, we removed common stopwords using the NLTK stopwords list (Bird et al., 2009) to elim-",
|
| 353 |
+
"bbox": [
|
| 354 |
+
114,
|
| 355 |
+
787,
|
| 356 |
+
880,
|
| 357 |
+
920
|
| 358 |
+
],
|
| 359 |
+
"page_idx": 4
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"type": "header",
|
| 363 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION 5",
|
| 364 |
+
"bbox": [
|
| 365 |
+
115,
|
| 366 |
+
36,
|
| 367 |
+
882,
|
| 368 |
+
52
|
| 369 |
+
],
|
| 370 |
+
"page_idx": 4
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"type": "text",
|
| 374 |
+
"text": "inate non-informative words. Finally, lemmatization was used to reduce words to their base forms, ensuring that different forms of a word are treated uniformly. The processed dataset was randomly split into training, validation, and test sets, with $20\\%$ allocated for testing. The remaining data was further divided into training $(75\\%)$ and validation $(25\\%)$ sets to ensure reproducibility and optimize model tuning.",
|
| 375 |
+
"bbox": [
|
| 376 |
+
114,
|
| 377 |
+
86,
|
| 378 |
+
884,
|
| 379 |
+
219
|
| 380 |
+
],
|
| 381 |
+
"page_idx": 5
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"type": "text",
|
| 385 |
+
"text": "For classification, the dataset labels were structured in two distinct ways. In the multi-class scenario, the original labels in the Kaggle dataset were directly used, consisting of six categories: Normal, Depression, Suicidal, Anxiety, Stress, and Personality Disorder. For binary classification, all non-Normal categories were grouped under a single 'Abnormal' label.",
|
| 386 |
+
"bbox": [
|
| 387 |
+
114,
|
| 388 |
+
237,
|
| 389 |
+
884,
|
| 390 |
+
369
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 5
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"text": "In natural language processing, feature extraction depends on the model type. Traditional ML models require structured numerical representations, while DL models can process raw text sequences or dense vector embeddings.",
|
| 397 |
+
"bbox": [
|
| 398 |
+
114,
|
| 399 |
+
390,
|
| 400 |
+
882,
|
| 401 |
+
464
|
| 402 |
+
],
|
| 403 |
+
"page_idx": 5
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"type": "text",
|
| 407 |
+
"text": "For ML models, text is commonly converted into numerical features using techniques such as the bag-of-words (BoW) model (Harris, 1954), which represents documents as token count vectors but treats all words equally. To address this limitation, Term Frequency-Inverse Document Frequency (TF-IDF) (Jones, 1972) enhances BoW by weighting words based on their importance—emphasizing informative terms while downplaying common ones. In this study, we employed TF-IDF vectorization to extract numerical features, incorporating unigrams and bigrams and limiting the feature space to 1,000 features to optimize computational efficiency and mitigate overfitting.",
|
| 408 |
+
"bbox": [
|
| 409 |
+
114,
|
| 410 |
+
485,
|
| 411 |
+
882,
|
| 412 |
+
702
|
| 413 |
+
],
|
| 414 |
+
"page_idx": 5
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"type": "text",
|
| 418 |
+
"text": "Model Development",
|
| 419 |
+
"text_level": 1,
|
| 420 |
+
"bbox": [
|
| 421 |
+
115,
|
| 422 |
+
734,
|
| 423 |
+
319,
|
| 424 |
+
753
|
| 425 |
+
],
|
| 426 |
+
"page_idx": 5
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"type": "text",
|
| 430 |
+
"text": "A variety of machine learning and deep learning models were developed to analyze and classify mental health statuses based on textual input. Each model was selected to capture different aspects of the data, ranging from simple linear classifiers to complex non-linear relationships. The following subsections outline the methodology of each model and its performance in both binary and multi-class classification.",
|
| 431 |
+
"bbox": [
|
| 432 |
+
114,
|
| 433 |
+
787,
|
| 434 |
+
882,
|
| 435 |
+
919
|
| 436 |
+
],
|
| 437 |
+
"page_idx": 5
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"type": "header",
|
| 441 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION 6",
|
| 442 |
+
"bbox": [
|
| 443 |
+
115,
|
| 444 |
+
36,
|
| 445 |
+
882,
|
| 446 |
+
52
|
| 447 |
+
],
|
| 448 |
+
"page_idx": 5
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"type": "text",
|
| 452 |
+
"text": "Logistic Regression",
|
| 453 |
+
"text_level": 1,
|
| 454 |
+
"bbox": [
|
| 455 |
+
117,
|
| 456 |
+
86,
|
| 457 |
+
315,
|
| 458 |
+
104
|
| 459 |
+
],
|
| 460 |
+
"page_idx": 6
|
| 461 |
+
},
|
| 462 |
+
{
|
| 463 |
+
"type": "text",
|
| 464 |
+
"text": "Logistic regression is a fundamental classification technique widely used in social science and biomedical research (Hosmer & Lemeshow, 2000). It models the probability of a categorical outcome based on a weighted linear combination of input features. Despite its simplicity, logistic regression is still effective when applied to high-dimensional data, such as term frequency-based representations in natural language processing.",
|
| 465 |
+
"bbox": [
|
| 466 |
+
114,
|
| 467 |
+
120,
|
| 468 |
+
882,
|
| 469 |
+
253
|
| 470 |
+
],
|
| 471 |
+
"page_idx": 6
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"type": "text",
|
| 475 |
+
"text": "In this study, logistic regression served as an interpretable model that integrated various predictors (e.g., term frequencies) to estimate the probability of different mental health outcomes. The binary model predicts the likelihood of a positive case, while the multi-class extension accommodates multiple categories.",
|
| 476 |
+
"bbox": [
|
| 477 |
+
114,
|
| 478 |
+
263,
|
| 479 |
+
884,
|
| 480 |
+
367
|
| 481 |
+
],
|
| 482 |
+
"page_idx": 6
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "text",
|
| 486 |
+
"text": "To prevent overfitting, model parameters were optimized using cross-entropy loss with regularization. A grid search was employed to fine-tune hyperparameters, including regularization strength, solver selection, and class weights, with the weighted F1 score guiding the selection process. The logistic regression models were implemented using the LogisticRegression class from scikit-learn.",
|
| 487 |
+
"bbox": [
|
| 488 |
+
114,
|
| 489 |
+
379,
|
| 490 |
+
882,
|
| 491 |
+
510
|
| 492 |
+
],
|
| 493 |
+
"page_idx": 6
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"type": "text",
|
| 497 |
+
"text": "Support Vector Machine (SVM)",
|
| 498 |
+
"text_level": 1,
|
| 499 |
+
"bbox": [
|
| 500 |
+
115,
|
| 501 |
+
526,
|
| 502 |
+
448,
|
| 503 |
+
545
|
| 504 |
+
],
|
| 505 |
+
"page_idx": 6
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"type": "text",
|
| 509 |
+
"text": "Support Vector Machines (SVMs) are effective classifiers that identify an optimal decision boundary (hyperplane) to maximize the margin between classes (Cortes & Vapnik, 1995). Unlike probabilistic models such as logistic regression, SVMs utilize kernel functions to map input data into higher-dimensional spaces, allowing them to model both linear and non-linear relationships. Given the high-dimensional and sparse nature of text-based feature representations, both linear SVMs and non-linear SVMs with a radial basis function (RBF) kernel were evaluated, with model selection based on the weighted F1 score. Hyperparameter optimization was conducted via grid search, including regularization strength, class weighting, and $\\gamma$ for RBF kernels<sup>1</sup>.",
|
| 510 |
+
"bbox": [
|
| 511 |
+
114,
|
| 512 |
+
561,
|
| 513 |
+
882,
|
| 514 |
+
806
|
| 515 |
+
],
|
| 516 |
+
"page_idx": 6
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"type": "text",
|
| 520 |
+
"text": "The final models were implemented using the SVC class from scikit-learn. For multi-class classification, the One-vs-One (OvO) strategy was employed, the default ap",
|
| 521 |
+
"bbox": [
|
| 522 |
+
114,
|
| 523 |
+
816,
|
| 524 |
+
880,
|
| 525 |
+
865
|
| 526 |
+
],
|
| 527 |
+
"page_idx": 6
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"type": "header",
|
| 531 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION",
|
| 532 |
+
"bbox": [
|
| 533 |
+
115,
|
| 534 |
+
36,
|
| 535 |
+
857,
|
| 536 |
+
54
|
| 537 |
+
],
|
| 538 |
+
"page_idx": 6
|
| 539 |
+
},
|
| 540 |
+
{
|
| 541 |
+
"type": "page_number",
|
| 542 |
+
"text": "7",
|
| 543 |
+
"bbox": [
|
| 544 |
+
865,
|
| 545 |
+
38,
|
| 546 |
+
880,
|
| 547 |
+
52
|
| 548 |
+
],
|
| 549 |
+
"page_idx": 6
|
| 550 |
+
},
|
| 551 |
+
{
|
| 552 |
+
"type": "page_footnote",
|
| 553 |
+
"text": "<sup>1</sup> The gamma parameter determines the influence of individual training samples, where higher values result in more localized decision boundaries, while lower values promote broader generalization.",
|
| 554 |
+
"bbox": [
|
| 555 |
+
114,
|
| 556 |
+
887,
|
| 557 |
+
882,
|
| 558 |
+
917
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 6
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "text",
|
| 564 |
+
"text": "proach in SVC, which constructs pairwise binary classifiers for each class combination, with the final label determined through majority voting.",
|
| 565 |
+
"bbox": [
|
| 566 |
+
114,
|
| 567 |
+
86,
|
| 568 |
+
880,
|
| 569 |
+
134
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 7
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "text",
|
| 575 |
+
"text": "Tree-Based Models",
|
| 576 |
+
"text_level": 1,
|
| 577 |
+
"bbox": [
|
| 578 |
+
117,
|
| 579 |
+
149,
|
| 580 |
+
315,
|
| 581 |
+
166
|
| 582 |
+
],
|
| 583 |
+
"page_idx": 7
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"type": "text",
|
| 587 |
+
"text": "Classification and Regression Trees (CART) are widely used for categorical outcome prediction in classification tasks. The algorithm constructs a binary decision tree by recursively partitioning the dataset based on predictor variables, selecting splits that optimize a predefined criterion. Common impurity measures, such as Gini impurity and entropy, assess split quality, with lower values indicating greater homogeneity within a node (Bishop, 2006). The tree expands iteratively until stopping conditions, such as a minimum node size, maximum depth, or impurity reduction threshold, are met.",
|
| 588 |
+
"bbox": [
|
| 589 |
+
114,
|
| 590 |
+
184,
|
| 591 |
+
884,
|
| 592 |
+
373
|
| 593 |
+
],
|
| 594 |
+
"page_idx": 7
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "text",
|
| 598 |
+
"text": "To prevent overfitting, pruning techniques (Breiman et al., 1984) reduce tree complexity by removing splits with minimal predictive value, enhancing generalizability. However, standalone CART models often overfit, making them less suitable for complex classification tasks. Instead, this study employed ensemble methods, such as Random Forests and Gradient Boosted Trees, to improve robustness and predictive performance.",
|
| 599 |
+
"bbox": [
|
| 600 |
+
114,
|
| 601 |
+
384,
|
| 602 |
+
882,
|
| 603 |
+
517
|
| 604 |
+
],
|
| 605 |
+
"page_idx": 7
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"type": "text",
|
| 609 |
+
"text": "Random Forests. Random Forests aggregate multiple decision trees to enhance classification performance. Each tree is trained on a bootstrap sample, ensuring diversity, while a random subset of features is considered at each split to reduce correlation and improve generalization (Breiman, 2001; Zhang et al., 2025). Unlike individual trees, Random Forests do not require pruning, with complexity managed through hyperparameters such as the number of trees, tree depth, and minimum sample requirements.",
|
| 610 |
+
"bbox": [
|
| 611 |
+
114,
|
| 612 |
+
529,
|
| 613 |
+
882,
|
| 614 |
+
690
|
| 615 |
+
],
|
| 616 |
+
"page_idx": 7
|
| 617 |
+
},
|
| 618 |
+
{
|
| 619 |
+
"type": "text",
|
| 620 |
+
"text": "Hyperparameter tuning via grid search optimized the number of estimators, tree depth, and minimum split criteria, using the weighted F1 score as the primary evaluation metric to address class imbalance. The best-performing binary classification model effectively distinguished between Normal and Abnormal mental health statuses. For multi-class classification, the same hyperparameter grid was used with a refined search scope for efficiency, ensuring balanced classification performance across mental health categories.",
|
| 621 |
+
"bbox": [
|
| 622 |
+
114,
|
| 623 |
+
701,
|
| 624 |
+
880,
|
| 625 |
+
863
|
| 626 |
+
],
|
| 627 |
+
"page_idx": 7
|
| 628 |
+
},
|
| 629 |
+
{
|
| 630 |
+
"type": "text",
|
| 631 |
+
"text": "Beyond predictive accuracy, feature importance analysis provided insights into key variables influencing classification decisions, enhancing model interpretability. Random",
|
| 632 |
+
"bbox": [
|
| 633 |
+
114,
|
| 634 |
+
872,
|
| 635 |
+
882,
|
| 636 |
+
920
|
| 637 |
+
],
|
| 638 |
+
"page_idx": 7
|
| 639 |
+
},
|
| 640 |
+
{
|
| 641 |
+
"type": "header",
|
| 642 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION 8",
|
| 643 |
+
"bbox": [
|
| 644 |
+
115,
|
| 645 |
+
36,
|
| 646 |
+
882,
|
| 647 |
+
54
|
| 648 |
+
],
|
| 649 |
+
"page_idx": 7
|
| 650 |
+
},
|
| 651 |
+
{
|
| 652 |
+
"type": "text",
|
| 653 |
+
"text": "Forest models were implemented using RandomForestClassifier from scikit-learn, with hyperparameter tuning via grid search on the validation set.",
|
| 654 |
+
"bbox": [
|
| 655 |
+
114,
|
| 656 |
+
86,
|
| 657 |
+
878,
|
| 658 |
+
131
|
| 659 |
+
],
|
| 660 |
+
"page_idx": 8
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "text",
|
| 664 |
+
"text": "Light Gradient Boosting Machine (LightGBM). LightGBM is an optimized gradient-boosting framework designed for efficiency and scalability, particularly in high-dimensional datasets. Unlike traditional Gradient Boosting Machines (GBMs), which sequentially refine predictions by correcting errors from prior models, LightGBM employs a leaf-wise tree growth strategy, enabling deeper splits in dense regions for improved performance (Ke et al., 2017). Additionally, histogram-based feature binning reduces memory usage and accelerates training, making LightGBM faster and more resource-efficient than standard GBMs (Friedman, 2001).",
|
| 665 |
+
"bbox": [
|
| 666 |
+
114,
|
| 667 |
+
146,
|
| 668 |
+
884,
|
| 669 |
+
362
|
| 670 |
+
],
|
| 671 |
+
"page_idx": 8
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"type": "text",
|
| 675 |
+
"text": "Grid search was used to optimize hyperparameters, including the number of boosting iterations, learning rate, tree depth, number of leaves, and minimum child samples. To address class imbalance, the class weighting parameter was tested with both 'balanced' and 'None' options. Model selection was guided by the weighted F1 score, ensuring balanced classification performance.",
|
| 676 |
+
"bbox": [
|
| 677 |
+
114,
|
| 678 |
+
375,
|
| 679 |
+
882,
|
| 680 |
+
505
|
| 681 |
+
],
|
| 682 |
+
"page_idx": 8
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"type": "text",
|
| 686 |
+
"text": "For binary classification, LightGBM effectively distinguished between Normal and Abnormal statuses. For multi-class classification, it predicted categories including Normal, Depression, Anxiety, and Personality Disorder. Evaluation metrics included precision, recall, F1 scores, confusion matrices, and one-vs-rest ROC curves. LightGBM's built-in feature importance analysis further enhanced interpretability by identifying key predictors. The models were implemented using LGBMClassifier from the lightgbm library, with hyperparameter tuning via grid search on the validation set.",
|
| 687 |
+
"bbox": [
|
| 688 |
+
114,
|
| 689 |
+
518,
|
| 690 |
+
882,
|
| 691 |
+
707
|
| 692 |
+
],
|
| 693 |
+
"page_idx": 8
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"type": "text",
|
| 697 |
+
"text": "A Lite Version of Bidirectional Encoder Representations from Transformers (ALBERT)",
|
| 698 |
+
"text_level": 1,
|
| 699 |
+
"bbox": [
|
| 700 |
+
115,
|
| 701 |
+
722,
|
| 702 |
+
882,
|
| 703 |
+
770
|
| 704 |
+
],
|
| 705 |
+
"page_idx": 8
|
| 706 |
+
},
|
| 707 |
+
{
|
| 708 |
+
"type": "text",
|
| 709 |
+
"text": "ALBERT (Lan et al., 2020) is an optimized variant of BERT (Devlin et al., 2019) designed to enhance computational efficiency while preserving strong NLP performance. It achieves this by employing parameter sharing across layers and factorized embedding parameterization, significantly reducing the total number of model parameters. Additionally, ALBERT introduces Sentence Order Prediction (SOP) as an auxiliary pretraining",
|
| 710 |
+
"bbox": [
|
| 711 |
+
114,
|
| 712 |
+
787,
|
| 713 |
+
880,
|
| 714 |
+
920
|
| 715 |
+
],
|
| 716 |
+
"page_idx": 8
|
| 717 |
+
},
|
| 718 |
+
{
|
| 719 |
+
"type": "header",
|
| 720 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION 9",
|
| 721 |
+
"bbox": [
|
| 722 |
+
115,
|
| 723 |
+
36,
|
| 724 |
+
882,
|
| 725 |
+
52
|
| 726 |
+
],
|
| 727 |
+
"page_idx": 8
|
| 728 |
+
},
|
| 729 |
+
{
|
| 730 |
+
"type": "text",
|
| 731 |
+
"text": "task to improve sentence-level coherence. These architectural refinements make ALBERT a computationally efficient alternative to BERT, particularly well-suited for large-scale text classification applications such as mental health assessment.",
|
| 732 |
+
"bbox": [
|
| 733 |
+
114,
|
| 734 |
+
86,
|
| 735 |
+
880,
|
| 736 |
+
160
|
| 737 |
+
],
|
| 738 |
+
"page_idx": 9
|
| 739 |
+
},
|
| 740 |
+
{
|
| 741 |
+
"type": "text",
|
| 742 |
+
"text": "In this study, ALBERT was fine-tuned for both binary and multi-class classification. The binary model was trained to differentiate between Normal and Abnormal mental health statuses, while the multi-class model classified inputs into categories such as Normal, Depression, Anxiety, and Personality Disorder. The pretrained Albert-base-v2 model was utilized, and hyperparameter optimization was conducted using random search over 10 iterations, tuning learning rates, dropout rates, and training epochs. Model performance was evaluated using the weighted F1 score as the primary metric. For the multi-class task, the classification objective was adjusted to predict seven categories, with weighted cross-entropy loss applied to address class imbalances.",
|
| 743 |
+
"bbox": [
|
| 744 |
+
114,
|
| 745 |
+
173,
|
| 746 |
+
884,
|
| 747 |
+
419
|
| 748 |
+
],
|
| 749 |
+
"page_idx": 9
|
| 750 |
+
},
|
| 751 |
+
{
|
| 752 |
+
"type": "text",
|
| 753 |
+
"text": "ALBERT's architecture effectively captures long-range dependencies in text while offering substantial computational advantages. Performance optimization was conducted using random hyperparameter tuning within the Hugging Face Transformers framework, leveraging AlbertTokenizer and AlbertForSequenceClassification for implementation.",
|
| 754 |
+
"bbox": [
|
| 755 |
+
114,
|
| 756 |
+
430,
|
| 757 |
+
882,
|
| 758 |
+
558
|
| 759 |
+
],
|
| 760 |
+
"page_idx": 9
|
| 761 |
+
},
|
| 762 |
+
{
|
| 763 |
+
"type": "text",
|
| 764 |
+
"text": "Gated Recurrent Units (GRUs)",
|
| 765 |
+
"text_level": 1,
|
| 766 |
+
"bbox": [
|
| 767 |
+
117,
|
| 768 |
+
579,
|
| 769 |
+
440,
|
| 770 |
+
596
|
| 771 |
+
],
|
| 772 |
+
"page_idx": 9
|
| 773 |
+
},
|
| 774 |
+
{
|
| 775 |
+
"type": "text",
|
| 776 |
+
"text": "Gated Recurrent Units (GRUs) are a variant of recurrent neural networks (RNNs) designed to model sequential dependencies, making them well-suited for natural language processing tasks such as text classification (Cho et al., 2014). Compared to Long Short-Term Memory networks (LSTMs), GRUs provide greater computational efficiency by simplifying the gating mechanism. Specifically, they merge the forget and input gates into a single update gate, reducing the number of parameters while effectively capturing long-range dependencies.",
|
| 777 |
+
"bbox": [
|
| 778 |
+
114,
|
| 779 |
+
615,
|
| 780 |
+
882,
|
| 781 |
+
803
|
| 782 |
+
],
|
| 783 |
+
"page_idx": 9
|
| 784 |
+
},
|
| 785 |
+
{
|
| 786 |
+
"type": "text",
|
| 787 |
+
"text": "In this study, GRUs were employed for both binary and multi-class mental health classification. The binary model differentiated between Normal and Abnormal mental health statuses, while the multi-class model predicted categories such as Normal, Depression, Anxiety, and Personality Disorder.",
|
| 788 |
+
"bbox": [
|
| 789 |
+
114,
|
| 790 |
+
816,
|
| 791 |
+
880,
|
| 792 |
+
919
|
| 793 |
+
],
|
| 794 |
+
"page_idx": 9
|
| 795 |
+
},
|
| 796 |
+
{
|
| 797 |
+
"type": "header",
|
| 798 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION10",
|
| 799 |
+
"bbox": [
|
| 800 |
+
115,
|
| 801 |
+
36,
|
| 802 |
+
878,
|
| 803 |
+
54
|
| 804 |
+
],
|
| 805 |
+
"page_idx": 9
|
| 806 |
+
},
|
| 807 |
+
{
|
| 808 |
+
"type": "text",
|
| 809 |
+
"text": "The GRU architecture consisted of three primary components:",
|
| 810 |
+
"bbox": [
|
| 811 |
+
174,
|
| 812 |
+
86,
|
| 813 |
+
721,
|
| 814 |
+
104
|
| 815 |
+
],
|
| 816 |
+
"page_idx": 10
|
| 817 |
+
},
|
| 818 |
+
{
|
| 819 |
+
"type": "list",
|
| 820 |
+
"sub_type": "text",
|
| 821 |
+
"list_items": [
|
| 822 |
+
"- **Embedding Layer:** Maps token indices to dense vector representations of a fixed size.",
|
| 823 |
+
"- GRU Layer: Processes sequential inputs, preserving contextual dependencies, with the final hidden state serving as the input to the classifier.",
|
| 824 |
+
"- Fully Connected Layer: Transforms the hidden state into output logits corresponding to the classification categories."
|
| 825 |
+
],
|
| 826 |
+
"bbox": [
|
| 827 |
+
142,
|
| 828 |
+
124,
|
| 829 |
+
880,
|
| 830 |
+
306
|
| 831 |
+
],
|
| 832 |
+
"page_idx": 10
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"type": "text",
|
| 836 |
+
"text": "To mitigate overfitting, dropout regularization was applied, and weighted cross-entropy loss was used to address class imbalance.",
|
| 837 |
+
"bbox": [
|
| 838 |
+
114,
|
| 839 |
+
326,
|
| 840 |
+
880,
|
| 841 |
+
370
|
| 842 |
+
],
|
| 843 |
+
"page_idx": 10
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "text",
|
| 847 |
+
"text": "Hyperparameter tuning was conducted via random search, optimizing key parameters such as embedding dimensions, hidden dimensions, learning rates, and training epochs. The weighted F1 score was used for model selection, ensuring robust performance on both validation and test data.",
|
| 848 |
+
"bbox": [
|
| 849 |
+
114,
|
| 850 |
+
382,
|
| 851 |
+
884,
|
| 852 |
+
486
|
| 853 |
+
],
|
| 854 |
+
"page_idx": 10
|
| 855 |
+
},
|
| 856 |
+
{
|
| 857 |
+
"type": "text",
|
| 858 |
+
"text": "Overall, GRUs effectively captured sequential patterns in text, enabling the extraction of linguistic features relevant to mental health classification. While less interpretable than tree-based models, their efficiency and ability to model long-range dependencies make them well-suited for text classification. The models were implemented using PyTorch's torch(nn module, incorporating nn.Embedding, nn.GRU, and nn.Linear layers. Optimization was performed using torch.optim.Adam, with class imbalances handled through nn.CrossEntropyLoss.",
|
| 859 |
+
"bbox": [
|
| 860 |
+
114,
|
| 861 |
+
497,
|
| 862 |
+
882,
|
| 863 |
+
687
|
| 864 |
+
],
|
| 865 |
+
"page_idx": 10
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"type": "text",
|
| 869 |
+
"text": "Evaluation Metrics",
|
| 870 |
+
"text_level": 1,
|
| 871 |
+
"bbox": [
|
| 872 |
+
115,
|
| 873 |
+
697,
|
| 874 |
+
310,
|
| 875 |
+
715
|
| 876 |
+
],
|
| 877 |
+
"page_idx": 10
|
| 878 |
+
},
|
| 879 |
+
{
|
| 880 |
+
"type": "text",
|
| 881 |
+
"text": "Classifying mental health conditions, such as depression or suicidal ideation, often involves imbalanced class distributions, where the 'positive' class (e.g., individuals experiencing a mental health condition) is significantly underrepresented compared to the 'negative' class (e.g., no reported issues). In such cases, traditional metrics like accuracy can be misleading, as a model predicting only the majority class may still achieve high accuracy despite failing to detect minority-class cases. To provide a more comprehensive assessment of classification performance, the following evaluation metrics were used:",
|
| 882 |
+
"bbox": [
|
| 883 |
+
114,
|
| 884 |
+
730,
|
| 885 |
+
882,
|
| 886 |
+
920
|
| 887 |
+
],
|
| 888 |
+
"page_idx": 10
|
| 889 |
+
},
|
| 890 |
+
{
|
| 891 |
+
"type": "header",
|
| 892 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION11",
|
| 893 |
+
"bbox": [
|
| 894 |
+
115,
|
| 895 |
+
36,
|
| 896 |
+
878,
|
| 897 |
+
54
|
| 898 |
+
],
|
| 899 |
+
"page_idx": 10
|
| 900 |
+
},
|
| 901 |
+
{
|
| 902 |
+
"type": "list",
|
| 903 |
+
"sub_type": "text",
|
| 904 |
+
"list_items": [
|
| 905 |
+
"- Recall (Sensitivity): Captures the proportion of actual positive cases correctly identified. High recall is crucial in mental health detection to minimize false negatives and ensure individuals in need receive appropriate intervention (Bradford et al., 2024). However, excessive focus on recall may increase false positives, leading to potential misclassifications.",
|
| 906 |
+
"- Precision: Measures the proportion of predicted positive cases that are actually positive. High precision is critical in mental health classification, as false positives can lead to unnecessary concern, stigma, and unwarranted interventions (Bradford et al., 2024; Wei et al., 2023). However, optimizing for precision alone may cause the model to miss true positive cases, limiting its usefulness.",
|
| 907 |
+
"- F1 Score: Represents the harmonic mean of precision and recall, offering a balanced performance measure (Powers, 2011). This metric is particularly useful for imbalanced datasets, ensuring that neither precision nor recall is disproportionately optimized at the expense of the other.",
|
| 908 |
+
"- Area Under the Receiver Operating Characteristic Curve (AUROC): Assesses the model's ability to distinguish between positive and negative cases across various classification thresholds. Although AUROC provides an overall measure of discrimination performance, it may be less informative in severely imbalanced datasets, where the majority class dominates (Davis & Goadrich, 2006; Tao et al., 2024)."
|
| 909 |
+
],
|
| 910 |
+
"bbox": [
|
| 911 |
+
142,
|
| 912 |
+
85,
|
| 913 |
+
885,
|
| 914 |
+
677
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 11
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "text",
|
| 920 |
+
"text": "Results",
|
| 921 |
+
"text_level": 1,
|
| 922 |
+
"bbox": [
|
| 923 |
+
458,
|
| 924 |
+
697,
|
| 925 |
+
537,
|
| 926 |
+
714
|
| 927 |
+
],
|
| 928 |
+
"page_idx": 11
|
| 929 |
+
},
|
| 930 |
+
{
|
| 931 |
+
"type": "text",
|
| 932 |
+
"text": "This section presents the findings from the analysis of the dataset and the evaluation of machine learning and deep learning models for mental health classification. First, we provide an Overview of Mental Health Distribution, highlighting the inherent class imbalances within the dataset and their implications for model development. Next, the Hyperparameter Optimization subsection details the parameter tuning process, which ensures that each model performs at its best configuration for both binary and multiclass classification tasks. Finally, the Model Performance Evaluation subsection compares",
|
| 933 |
+
"bbox": [
|
| 934 |
+
114,
|
| 935 |
+
730,
|
| 936 |
+
882,
|
| 937 |
+
920
|
| 938 |
+
],
|
| 939 |
+
"page_idx": 11
|
| 940 |
+
},
|
| 941 |
+
{
|
| 942 |
+
"type": "header",
|
| 943 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION12",
|
| 944 |
+
"bbox": [
|
| 945 |
+
115,
|
| 946 |
+
36,
|
| 947 |
+
878,
|
| 948 |
+
52
|
| 949 |
+
],
|
| 950 |
+
"page_idx": 11
|
| 951 |
+
},
|
| 952 |
+
{
|
| 953 |
+
"type": "text",
|
| 954 |
+
"text": "the models' performance based on key metrics, including F1 scores and Area Under the Receiver Operating Characteristic Curve (AUROC). Additionally, nuanced observations, such as the challenges associated with underrepresented classes, are discussed to provide deeper insights into the modeling outcomes.",
|
| 955 |
+
"bbox": [
|
| 956 |
+
114,
|
| 957 |
+
86,
|
| 958 |
+
884,
|
| 959 |
+
189
|
| 960 |
+
],
|
| 961 |
+
"page_idx": 12
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"type": "text",
|
| 965 |
+
"text": "Distribution of Mental Health Status",
|
| 966 |
+
"text_level": 1,
|
| 967 |
+
"bbox": [
|
| 968 |
+
115,
|
| 969 |
+
205,
|
| 970 |
+
492,
|
| 971 |
+
223
|
| 972 |
+
],
|
| 973 |
+
"page_idx": 12
|
| 974 |
+
},
|
| 975 |
+
{
|
| 976 |
+
"type": "text",
|
| 977 |
+
"text": "The dataset contains a total of 52,681 unique textual statements, each annotated with a corresponding mental health status label. The labels represent various mental health categories, reflecting the distribution of conditions within the dataset.",
|
| 978 |
+
"bbox": [
|
| 979 |
+
114,
|
| 980 |
+
239,
|
| 981 |
+
882,
|
| 982 |
+
313
|
| 983 |
+
],
|
| 984 |
+
"page_idx": 12
|
| 985 |
+
},
|
| 986 |
+
{
|
| 987 |
+
"type": "text",
|
| 988 |
+
"text": "The dataset is heavily imbalanced, with certain categories having significantly higher representation than others. Specifically:",
|
| 989 |
+
"bbox": [
|
| 990 |
+
114,
|
| 991 |
+
325,
|
| 992 |
+
880,
|
| 993 |
+
370
|
| 994 |
+
],
|
| 995 |
+
"page_idx": 12
|
| 996 |
+
},
|
| 997 |
+
{
|
| 998 |
+
"type": "list",
|
| 999 |
+
"sub_type": "text",
|
| 1000 |
+
"list_items": [
|
| 1001 |
+
"Normal: 16,343 statements (31.02%)",
|
| 1002 |
+
"- Depression: 15,404 statements (29.24%)",
|
| 1003 |
+
"Suicidal: 10,652 statements (20.22%)",
|
| 1004 |
+
"- Anxiety: 3,841 statements (7.29%)",
|
| 1005 |
+
"Bipolar: 2,777 statements (5.27%)",
|
| 1006 |
+
"- Stress: 2,587 statements (4.91%)",
|
| 1007 |
+
"- Personality Disorder: 1,077 statements (2.04%)"
|
| 1008 |
+
],
|
| 1009 |
+
"bbox": [
|
| 1010 |
+
142,
|
| 1011 |
+
393,
|
| 1012 |
+
603,
|
| 1013 |
+
621
|
| 1014 |
+
],
|
| 1015 |
+
"page_idx": 12
|
| 1016 |
+
},
|
| 1017 |
+
{
|
| 1018 |
+
"type": "text",
|
| 1019 |
+
"text": "For the binary classification task, all mental health conditions (Depression, Suicidal, Anxiety, Bipolar, Stress, and Personality Disorder) were combined into a single category labeled as Abnormal, while the Normal category remained unchanged. This transformation resulted in:",
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
114,
|
| 1022 |
+
644,
|
| 1023 |
+
882,
|
| 1024 |
+
746
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 12
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "list",
|
| 1030 |
+
"sub_type": "text",
|
| 1031 |
+
"list_items": [
|
| 1032 |
+
"Normal: 16,343 statements (31.02%)",
|
| 1033 |
+
"Abnormal: 36,338 statements (68.98%)"
|
| 1034 |
+
],
|
| 1035 |
+
"bbox": [
|
| 1036 |
+
142,
|
| 1037 |
+
769,
|
| 1038 |
+
519,
|
| 1039 |
+
821
|
| 1040 |
+
],
|
| 1041 |
+
"page_idx": 12
|
| 1042 |
+
},
|
| 1043 |
+
{
|
| 1044 |
+
"type": "text",
|
| 1045 |
+
"text": "Such imbalance feature in both multi-class and binary classification tasks highlights the importance of evaluation metrics that account for disparities, such as the weighted F1 score.",
|
| 1046 |
+
"bbox": [
|
| 1047 |
+
114,
|
| 1048 |
+
844,
|
| 1049 |
+
880,
|
| 1050 |
+
917
|
| 1051 |
+
],
|
| 1052 |
+
"page_idx": 12
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"type": "header",
|
| 1056 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION13",
|
| 1057 |
+
"bbox": [
|
| 1058 |
+
115,
|
| 1059 |
+
36,
|
| 1060 |
+
878,
|
| 1061 |
+
54
|
| 1062 |
+
],
|
| 1063 |
+
"page_idx": 12
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "text",
|
| 1067 |
+
"text": "Computational Efficiency",
|
| 1068 |
+
"text_level": 1,
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
115,
|
| 1071 |
+
86,
|
| 1072 |
+
371,
|
| 1073 |
+
104
|
| 1074 |
+
],
|
| 1075 |
+
"page_idx": 13
|
| 1076 |
+
},
|
| 1077 |
+
{
|
| 1078 |
+
"type": "text",
|
| 1079 |
+
"text": "The computational time for training the models varied significantly based on the algorithm type and classification task. Among ML models, SVM required an exceptionally long training time, far exceeding other ML approaches like Logistic Regression, Random Forest, and Light GBM, for both binary and multi-class tasks. In contrast, DL models such as ALBERT and GRU consistently required more time compared to ML models, reflecting their higher computational complexity.",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
114,
|
| 1082 |
+
120,
|
| 1083 |
+
884,
|
| 1084 |
+
282
|
| 1085 |
+
],
|
| 1086 |
+
"page_idx": 13
|
| 1087 |
+
},
|
| 1088 |
+
{
|
| 1089 |
+
"type": "text",
|
| 1090 |
+
"text": "For ML models, training times for multi-class classification were longer than for binary classification, likely due to the increased complexity of predicting multiple categories. However, for DL models, there was no notable difference in training times between binary and multi-class tasks, indicating that their computational cost was primarily driven by model architecture rather than the number of classes.",
|
| 1091 |
+
"bbox": [
|
| 1092 |
+
114,
|
| 1093 |
+
291,
|
| 1094 |
+
884,
|
| 1095 |
+
423
|
| 1096 |
+
],
|
| 1097 |
+
"page_idx": 13
|
| 1098 |
+
},
|
| 1099 |
+
{
|
| 1100 |
+
"type": "text",
|
| 1101 |
+
"text": "A detailed information of training times is presented in Table 1.",
|
| 1102 |
+
"bbox": [
|
| 1103 |
+
176,
|
| 1104 |
+
435,
|
| 1105 |
+
734,
|
| 1106 |
+
451
|
| 1107 |
+
],
|
| 1108 |
+
"page_idx": 13
|
| 1109 |
+
},
|
| 1110 |
+
{
|
| 1111 |
+
"type": "text",
|
| 1112 |
+
"text": "Insert Table 1 about here",
|
| 1113 |
+
"text_level": 1,
|
| 1114 |
+
"bbox": [
|
| 1115 |
+
386,
|
| 1116 |
+
508,
|
| 1117 |
+
611,
|
| 1118 |
+
525
|
| 1119 |
+
],
|
| 1120 |
+
"page_idx": 13
|
| 1121 |
+
},
|
| 1122 |
+
{
|
| 1123 |
+
"type": "text",
|
| 1124 |
+
"text": "Performance Metrics",
|
| 1125 |
+
"text_level": 1,
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
115,
|
| 1128 |
+
581,
|
| 1129 |
+
329,
|
| 1130 |
+
596
|
| 1131 |
+
],
|
| 1132 |
+
"page_idx": 13
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"type": "text",
|
| 1136 |
+
"text": "Table 2 presents the weighted F1 scores and AUROC values for all models evaluated on binary classification tasks. Across all models, there were minimal numerical differences in performance, with all achieving strong results in both metrics. The F1 scores ranged from 0.9345 (Logistic Regression) to 0.9576 (ALBERT), while AUROC values were consistently high, spanning from 0.92 (Random Forest) to 0.95 (ALBERT). These results indicate that all models effectively distinguished between Normal and Abnormal mental health statuses.",
|
| 1137 |
+
"bbox": [
|
| 1138 |
+
114,
|
| 1139 |
+
615,
|
| 1140 |
+
882,
|
| 1141 |
+
803
|
| 1142 |
+
],
|
| 1143 |
+
"page_idx": 13
|
| 1144 |
+
},
|
| 1145 |
+
{
|
| 1146 |
+
"type": "text",
|
| 1147 |
+
"text": "Despite the close performance across models, a general trend emerged where deep learning (DL) models, such as ALBERT and GRU, outperformed traditional machine learning (ML) models. For instance, ALBERT achieved the highest F1 score (0.9576) and AUROC (0.95), while GRU closely followed with an F1 score of 0.9512 and an",
|
| 1148 |
+
"bbox": [
|
| 1149 |
+
114,
|
| 1150 |
+
816,
|
| 1151 |
+
880,
|
| 1152 |
+
920
|
| 1153 |
+
],
|
| 1154 |
+
"page_idx": 13
|
| 1155 |
+
},
|
| 1156 |
+
{
|
| 1157 |
+
"type": "header",
|
| 1158 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION14",
|
| 1159 |
+
"bbox": [
|
| 1160 |
+
115,
|
| 1161 |
+
36,
|
| 1162 |
+
882,
|
| 1163 |
+
54
|
| 1164 |
+
],
|
| 1165 |
+
"page_idx": 13
|
| 1166 |
+
},
|
| 1167 |
+
{
|
| 1168 |
+
"type": "text",
|
| 1169 |
+
"text": "AUROC of 0.94. In contrast, ML models such as Logistic Regression, Random Forest, and LightGBM showed slightly lower, albeit still competitive, performance.",
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
114,
|
| 1172 |
+
85,
|
| 1173 |
+
882,
|
| 1174 |
+
134
|
| 1175 |
+
],
|
| 1176 |
+
"page_idx": 14
|
| 1177 |
+
},
|
| 1178 |
+
{
|
| 1179 |
+
"type": "text",
|
| 1180 |
+
"text": "Table 3 summarizes the weighted F1 scores and micro-average AUROC values for multi-class classification tasks. Similar to binary classification, the differences in performance across models were small, with DL models generally outperforming ML models. ALBERT achieved the highest F1 score (0.7841) and shared the top AUROC value (0.97) with LightGBM and GRU. ML models such as Logistic Regression and Random Forest exhibited slightly lower F1 scores, at 0.7498 and 0.7478, respectively, but still demonstrated strong AUROC values (0.96).",
|
| 1181 |
+
"bbox": [
|
| 1182 |
+
114,
|
| 1183 |
+
142,
|
| 1184 |
+
884,
|
| 1185 |
+
332
|
| 1186 |
+
],
|
| 1187 |
+
"page_idx": 14
|
| 1188 |
+
},
|
| 1189 |
+
{
|
| 1190 |
+
"type": "text",
|
| 1191 |
+
"text": "Insert Table 2 about here",
|
| 1192 |
+
"text_level": 1,
|
| 1193 |
+
"bbox": [
|
| 1194 |
+
384,
|
| 1195 |
+
375,
|
| 1196 |
+
611,
|
| 1197 |
+
392
|
| 1198 |
+
],
|
| 1199 |
+
"page_idx": 14
|
| 1200 |
+
},
|
| 1201 |
+
{
|
| 1202 |
+
"type": "text",
|
| 1203 |
+
"text": "Notably, a consistent pattern was observed where multi-class classification yielded lower F1 scores compared to binary classification across all models. The lower F1 scores for multi-class classification compared to binary classification reflect the increased complexity of predicting seven distinct mental health categories. Binary classification requires only a single decision boundary between Normal and all other classes (combined into Abnormal), whereas multi-class classification must learn multiple boundaries between overlapping categories like Depression, Anxiety, and Stress. This added complexity introduces more opportunities for misclassification, further lowering F1 scores. On the contrary, the AUROC values remained consistently high for both binary and multi-class tasks, indicating robust discrimination between classes despite the added complexity.",
|
| 1204 |
+
"bbox": [
|
| 1205 |
+
114,
|
| 1206 |
+
436,
|
| 1207 |
+
882,
|
| 1208 |
+
711
|
| 1209 |
+
],
|
| 1210 |
+
"page_idx": 14
|
| 1211 |
+
},
|
| 1212 |
+
{
|
| 1213 |
+
"type": "text",
|
| 1214 |
+
"text": "Insert Table 3 about here",
|
| 1215 |
+
"text_level": 1,
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
384,
|
| 1218 |
+
753,
|
| 1219 |
+
611,
|
| 1220 |
+
770
|
| 1221 |
+
],
|
| 1222 |
+
"page_idx": 14
|
| 1223 |
+
},
|
| 1224 |
+
{
|
| 1225 |
+
"type": "text",
|
| 1226 |
+
"text": "The discrepancy between the F1 score and AUROC observed in the multi-class classification results can be attributed to the fundamental differences in what these metrics measure. The F1 score, which balances precision and recall, is sensitive to class imbalance and specific misclassifications. In the confusion matrix (Figure 1), generated",
|
| 1227 |
+
"bbox": [
|
| 1228 |
+
114,
|
| 1229 |
+
816,
|
| 1230 |
+
882,
|
| 1231 |
+
920
|
| 1232 |
+
],
|
| 1233 |
+
"page_idx": 14
|
| 1234 |
+
},
|
| 1235 |
+
{
|
| 1236 |
+
"type": "header",
|
| 1237 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION15",
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
115,
|
| 1240 |
+
36,
|
| 1241 |
+
882,
|
| 1242 |
+
54
|
| 1243 |
+
],
|
| 1244 |
+
"page_idx": 14
|
| 1245 |
+
},
|
| 1246 |
+
{
|
| 1247 |
+
"type": "text",
|
| 1248 |
+
"text": "for the LightGBM multi-class model and included here for illustration purposes, certain classes such as Suicidal (Class 6) and Depression (Class 2) show notable misclassifications, including frequent overlaps with Stress (Class 5) and Normal (Class 3). This directly impacts the F1 score by lowering the precision and recall for these specific classes.",
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
114,
|
| 1251 |
+
86,
|
| 1252 |
+
880,
|
| 1253 |
+
187
|
| 1254 |
+
],
|
| 1255 |
+
"page_idx": 15
|
| 1256 |
+
},
|
| 1257 |
+
{
|
| 1258 |
+
"type": "text",
|
| 1259 |
+
"text": "In contrast, AUROC measures the model's ability to rank predictions correctly across thresholds, and it remains robust to class imbalances and individual misclassification errors. The ROC curves (Figure 2), also from the LightGBM multi-class model and included for illustrative purposes, demonstrate strong separability for most classes, with areas under the curve (AUC) exceeding 0.90 for all but Class 2 (Depression) and Class 6 (Suicidal). The micro-average AUROC of 0.97 indicates that the model can effectively rank instances across all classes, even when specific misclassifications reduce the F1 score.",
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
114,
|
| 1262 |
+
200,
|
| 1263 |
+
884,
|
| 1264 |
+
388
|
| 1265 |
+
],
|
| 1266 |
+
"page_idx": 15
|
| 1267 |
+
},
|
| 1268 |
+
{
|
| 1269 |
+
"type": "text",
|
| 1270 |
+
"text": "Error Analyses",
|
| 1271 |
+
"text_level": 1,
|
| 1272 |
+
"bbox": [
|
| 1273 |
+
115,
|
| 1274 |
+
403,
|
| 1275 |
+
272,
|
| 1276 |
+
420
|
| 1277 |
+
],
|
| 1278 |
+
"page_idx": 15
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "text",
|
| 1282 |
+
"text": "The confusion matrix reveals specific patterns of misclassification that contribute to the lower F1 scores for some classes in the multi-class classification task. Key observations include:",
|
| 1283 |
+
"bbox": [
|
| 1284 |
+
114,
|
| 1285 |
+
435,
|
| 1286 |
+
878,
|
| 1287 |
+
508
|
| 1288 |
+
],
|
| 1289 |
+
"page_idx": 15
|
| 1290 |
+
},
|
| 1291 |
+
{
|
| 1292 |
+
"type": "text",
|
| 1293 |
+
"text": "- Overlap Between Emotionally Similar Classes: As indicated in Figure1, Depression (Class 2) and Personality Disorder (Class 6) show significant overlap, with many instances of Depression misclassified as Personality Disorder or vice versa. Similarly, Suicidal (Class 3) was frequently misclassified as Depression, likely due to overlapping linguistic patterns. Another possible explanation lies in the nature of the dataset itself, which was constructed by combining data from multiple sources. While these labels may have been well-defined and effective for their original studies, they may lack consistency when integrated into a unified dataset, leading to ambiguity in class boundaries.",
|
| 1294 |
+
"bbox": [
|
| 1295 |
+
144,
|
| 1296 |
+
527,
|
| 1297 |
+
882,
|
| 1298 |
+
774
|
| 1299 |
+
],
|
| 1300 |
+
"page_idx": 15
|
| 1301 |
+
},
|
| 1302 |
+
{
|
| 1303 |
+
"type": "text",
|
| 1304 |
+
"text": "- Poor Discrimination for Depression: The ROC curve (Figure 2) highlights that Depression (Class 2) has the lowest AUC (0.90) among all classes in the LightGBM model. For other models, the AUC for Class 2 drops even further, indicating consistent difficulty in distinguishing Depression from other classes. This is likely due to semantic overlap with related classes such as Stress (Class 4), Suicidal (Class",
|
| 1305 |
+
"bbox": [
|
| 1306 |
+
142,
|
| 1307 |
+
787,
|
| 1308 |
+
880,
|
| 1309 |
+
920
|
| 1310 |
+
],
|
| 1311 |
+
"page_idx": 15
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"type": "header",
|
| 1315 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION16",
|
| 1316 |
+
"bbox": [
|
| 1317 |
+
115,
|
| 1318 |
+
36,
|
| 1319 |
+
878,
|
| 1320 |
+
52
|
| 1321 |
+
],
|
| 1322 |
+
"page_idx": 15
|
| 1323 |
+
},
|
| 1324 |
+
{
|
| 1325 |
+
"type": "text",
|
| 1326 |
+
"text": "3), and Personality Disorder (Class 6). Additionally, inconsistencies in labeling across data sources may further exacerbate the challenge of identifying Depression accurately.",
|
| 1327 |
+
"bbox": [
|
| 1328 |
+
161,
|
| 1329 |
+
85,
|
| 1330 |
+
880,
|
| 1331 |
+
159
|
| 1332 |
+
],
|
| 1333 |
+
"page_idx": 16
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "text",
|
| 1337 |
+
"text": "- Underrepresented Classes and Data Imbalance: Bipolar (Class 5) and Personality Disorder (Class 6) were underrepresented in the dataset, which exacerbated misclassification issues.",
|
| 1338 |
+
"bbox": [
|
| 1339 |
+
144,
|
| 1340 |
+
174,
|
| 1341 |
+
884,
|
| 1342 |
+
247
|
| 1343 |
+
],
|
| 1344 |
+
"page_idx": 16
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "text",
|
| 1348 |
+
"text": "Model Interpretability",
|
| 1349 |
+
"text_level": 1,
|
| 1350 |
+
"bbox": [
|
| 1351 |
+
115,
|
| 1352 |
+
271,
|
| 1353 |
+
342,
|
| 1354 |
+
288
|
| 1355 |
+
],
|
| 1356 |
+
"page_idx": 16
|
| 1357 |
+
},
|
| 1358 |
+
{
|
| 1359 |
+
"type": "text",
|
| 1360 |
+
"text": "In traditional machine learning (ML) models, variable importance can be quantified to understand how individual features contribute to predictions. This interpretability allows researchers to identify key linguistic and behavioral markers associated with mental health conditions. However, deep learning (DL) models operate differently. Rather than relying on explicit features, DL models extract representations from raw text, making them inherently black-box models. Since these models learn hierarchical patterns across entire sentences and contexts, they do not produce traditional variable importance scores, making direct interpretability more challenging. In this project, we assessed variable importance for three out of four machine learning models: logistic regression, random forest, and LightGBM. Support Vector Machine (SVM) was excluded from this analysis because Radial Basis Function (RBF) kernel was selected during model construction, which is a nonlinear kernel. In such cases, variable importance is not directly interpretable due to the transformation of the input space, making it difficult to quantify individual feature contributions meaningfully (Guyon et al., 2002). Unlike linear models, where coefficients provide a direct measure of feature importance, nonlinear SVMs construct decision boundaries in high-dimensional spaces, where the contribution of each feature depends on complex interactions (Chang & Lin, 2011).",
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
114,
|
| 1363 |
+
303,
|
| 1364 |
+
882,
|
| 1365 |
+
778
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 16
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "text",
|
| 1371 |
+
"text": "For logistic regression, variable importance is derived from model coefficients, where positive coefficients indicate a higher likelihood of the outcome (e.g., mental health condition), while negative coefficients suggest a protective effect. To enhance interpretability, we adopted a color scheme in our visualizations: dark gray for positive coefficients and light gray for negative coefficients. For Random Forest, variable importance is com-",
|
| 1372 |
+
"bbox": [
|
| 1373 |
+
114,
|
| 1374 |
+
787,
|
| 1375 |
+
880,
|
| 1376 |
+
920
|
| 1377 |
+
],
|
| 1378 |
+
"page_idx": 16
|
| 1379 |
+
},
|
| 1380 |
+
{
|
| 1381 |
+
"type": "header",
|
| 1382 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION17",
|
| 1383 |
+
"bbox": [
|
| 1384 |
+
115,
|
| 1385 |
+
36,
|
| 1386 |
+
878,
|
| 1387 |
+
52
|
| 1388 |
+
],
|
| 1389 |
+
"page_idx": 16
|
| 1390 |
+
},
|
| 1391 |
+
{
|
| 1392 |
+
"type": "text",
|
| 1393 |
+
"text": "puted using the Gini impurity reduction criterion (Breiman, 2001). This metric quantifies how much each feature contributes to reducing class impurity across the decision trees by assessing the decrease in Gini impurity at each node split. Features with higher importance scores have a greater impact on classification performance. For LightGBM, variable importance is measured using information gain, which quantifies the total improvement in the model's objective function when a feature is used for node splitting across all trees in the boosting process. Information gain reflects how much a feature contributes to minimizing the loss function during training and is commonly used in gradient boosting frameworks (Ke et al., 2017). Features with higher gain values contribute more to optimizing the model's predictive accuracy.",
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
114,
|
| 1396 |
+
85,
|
| 1397 |
+
884,
|
| 1398 |
+
359
|
| 1399 |
+
],
|
| 1400 |
+
"page_idx": 17
|
| 1401 |
+
},
|
| 1402 |
+
{
|
| 1403 |
+
"type": "text",
|
| 1404 |
+
"text": "The variable importance for both binary and multiclass models using logistic regression, random forest, and LightGBM is presented in Figure 3. To ensure comparability across models, we rescaled the variable importance scores for random forest and LightGBM by normalizing them to a maximum value of 100. For logistic regression, variable importance is represented by model coefficients, retaining both their relative scale and sign. Among machine learning models that provide feature importance, logistic regression offers a more interpretable framework since its importance scores are derived directly from model coefficients. Unlike tree-based methods, which rely on splitting criteria (such as Gini impurity for Random Forest or Gain for LightGBM), logistic regression coefficients retain their sign, allowing researchers to distinguish positive and negative associations with the target outcome. This property is particularly valuable in mental health detection, where it is critical to understand whether a term increases or decreases the likelihood of classification (e.g., identifying depressive symptoms).",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
114,
|
| 1407 |
+
370,
|
| 1408 |
+
884,
|
| 1409 |
+
731
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 17
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "text",
|
| 1415 |
+
"text": "Insert Figure 3 about here",
|
| 1416 |
+
"text_level": 1,
|
| 1417 |
+
"bbox": [
|
| 1418 |
+
381,
|
| 1419 |
+
777,
|
| 1420 |
+
615,
|
| 1421 |
+
795
|
| 1422 |
+
],
|
| 1423 |
+
"page_idx": 17
|
| 1424 |
+
},
|
| 1425 |
+
{
|
| 1426 |
+
"type": "text",
|
| 1427 |
+
"text": "Despite variations in ranking, the top features identified across machine learning models share strong overlap, reinforcing their relevance in mental health classification. However, different importance criteria lead to model-specific variations: logistic regress-",
|
| 1428 |
+
"bbox": [
|
| 1429 |
+
115,
|
| 1430 |
+
844,
|
| 1431 |
+
882,
|
| 1432 |
+
920
|
| 1433 |
+
],
|
| 1434 |
+
"page_idx": 17
|
| 1435 |
+
},
|
| 1436 |
+
{
|
| 1437 |
+
"type": "header",
|
| 1438 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION18",
|
| 1439 |
+
"bbox": [
|
| 1440 |
+
115,
|
| 1441 |
+
36,
|
| 1442 |
+
880,
|
| 1443 |
+
54
|
| 1444 |
+
],
|
| 1445 |
+
"page_idx": 17
|
| 1446 |
+
},
|
| 1447 |
+
{
|
| 1448 |
+
"type": "text",
|
| 1449 |
+
"text": "sion ranks features based on coefficient magnitude (allowing both positive and negative values), random forest uses Gini impurity reduction, and LightGBM employs gain-based ranking. While these models prioritize features differently, they consistently highlight depression-related language as the strongest predictor of mental health conditions on social media.",
|
| 1450 |
+
"bbox": [
|
| 1451 |
+
114,
|
| 1452 |
+
86,
|
| 1453 |
+
884,
|
| 1454 |
+
218
|
| 1455 |
+
],
|
| 1456 |
+
"page_idx": 18
|
| 1457 |
+
},
|
| 1458 |
+
{
|
| 1459 |
+
"type": "text",
|
| 1460 |
+
"text": "For binary classification models, 'depression' emerges as the most predictive feature across all methods, reinforcing its centrality in identifying mental health status. Beyond this, words associated with emotional distress—such as 'feel,' 'want,' and 'anxiety'—consistently appear in the top ranks, though their order varies. Logistic regression assigns strong positive coefficients to 'restless' and 'suicidal,' suggesting their direct correlation with depressive states. Meanwhile, tree-based models (random forest and Light-GBM) highlight terms like 'die,' 'kill,' and 'suicide' more prominently, likely due to their effectiveness in decision splits. These differences reflect how each model processes textual features, with logistic regression providing interpretability through sign-based coefficients, while tree-based models prioritize decision-making power through split-based feature selection.",
|
| 1461 |
+
"bbox": [
|
| 1462 |
+
114,
|
| 1463 |
+
234,
|
| 1464 |
+
884,
|
| 1465 |
+
537
|
| 1466 |
+
],
|
| 1467 |
+
"page_idx": 18
|
| 1468 |
+
},
|
| 1469 |
+
{
|
| 1470 |
+
"type": "text",
|
| 1471 |
+
"text": "In the multiclass setting, feature importance rankings shift to reflect the distinctions between different mental health conditions. While 'depression' remains a dominant predictor, terms like 'bipolar' and 'anxiety' gain prominence, particularly in tree-based models (random forest and LightGBM), suggesting their utility in distinguishing among multiple mental health states. Logistic regression, on the other hand, highlights 'restless' and 'nervous' more strongly, aligning with its emphasis on anxiety-related symptoms. The presence of 'kill' and 'suicidal' in tree-based models underscores their role in severe mental health classifications. Despite these ranking differences, the core predictive features remain largely consistent, validating their role in mental health detection on social media.",
|
| 1472 |
+
"bbox": [
|
| 1473 |
+
114,
|
| 1474 |
+
552,
|
| 1475 |
+
882,
|
| 1476 |
+
827
|
| 1477 |
+
],
|
| 1478 |
+
"page_idx": 18
|
| 1479 |
+
},
|
| 1480 |
+
{
|
| 1481 |
+
"type": "text",
|
| 1482 |
+
"text": "Among models capable of generating variable importance, logistic regression stands out for its interpretability. Unlike tree-based methods, which assign importance based on split-based metrics, logistic regression allows for direct interpretation of feature coeffi-",
|
| 1483 |
+
"bbox": [
|
| 1484 |
+
114,
|
| 1485 |
+
844,
|
| 1486 |
+
882,
|
| 1487 |
+
920
|
| 1488 |
+
],
|
| 1489 |
+
"page_idx": 18
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "header",
|
| 1493 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION19",
|
| 1494 |
+
"bbox": [
|
| 1495 |
+
115,
|
| 1496 |
+
36,
|
| 1497 |
+
878,
|
| 1498 |
+
52
|
| 1499 |
+
],
|
| 1500 |
+
"page_idx": 18
|
| 1501 |
+
},
|
| 1502 |
+
{
|
| 1503 |
+
"type": "text",
|
| 1504 |
+
"text": "cients, capturing both positive and negative associations. This provides a clearer understanding of which terms contribute most strongly to classification and in what direction. In contrast, while random forest and LightGBM effectively rank important features, their criteria for feature selection make direct interpretability more challenging.",
|
| 1505 |
+
"bbox": [
|
| 1506 |
+
114,
|
| 1507 |
+
86,
|
| 1508 |
+
880,
|
| 1509 |
+
187
|
| 1510 |
+
],
|
| 1511 |
+
"page_idx": 19
|
| 1512 |
+
},
|
| 1513 |
+
{
|
| 1514 |
+
"type": "text",
|
| 1515 |
+
"text": "Discussion",
|
| 1516 |
+
"text_level": 1,
|
| 1517 |
+
"bbox": [
|
| 1518 |
+
443,
|
| 1519 |
+
206,
|
| 1520 |
+
552,
|
| 1521 |
+
222
|
| 1522 |
+
],
|
| 1523 |
+
"page_idx": 19
|
| 1524 |
+
},
|
| 1525 |
+
{
|
| 1526 |
+
"type": "text",
|
| 1527 |
+
"text": "This study provides an empirical evaluation of machine learning (ML) and deep learning (DL) models for mental health classification on social media, focusing on their predictability, interpretability, and computational efficiency. The findings highlight key trade-offs that researchers should consider when selecting models for mental health detection tasks. While DL models, such as ALBERT and GRU, have gained popularity for their ability to extract hierarchical representations from raw text, their advantages in small-to-medium datasets remain limited. The results indicate that in cases where data-set size is moderate, traditional ML models, such as logistic regression, random forests, and LightGBM, perform comparably to DL models while offering additional benefits in terms of interpretability and computational efficiency.",
|
| 1528 |
+
"bbox": [
|
| 1529 |
+
114,
|
| 1530 |
+
243,
|
| 1531 |
+
884,
|
| 1532 |
+
517
|
| 1533 |
+
],
|
| 1534 |
+
"page_idx": 19
|
| 1535 |
+
},
|
| 1536 |
+
{
|
| 1537 |
+
"type": "text",
|
| 1538 |
+
"text": "The size of the dataset plays a crucial role in determining the most suitable modeling approach. When working with small to medium-sized datasets, traditional ML models remain an effective choice. Their reliance on structured feature engineering, while requiring additional preprocessing efforts, allows for a more controlled and interpretable learning process. In contrast, DL models require large-scale training data to leverage their full potential. Although DL architectures can automatically extract complex linguistic patterns without extensive feature engineering, this advantage is less pronounced in settings with limited training samples. For researchers with small datasets, the use of feature engineering and careful selection of input variables is critical to optimizing model performance. The results suggest that DL models are more suitable for large-scale mental health detection tasks, where the volume of data is sufficient to justify their increased computational demands.",
|
| 1539 |
+
"bbox": [
|
| 1540 |
+
114,
|
| 1541 |
+
530,
|
| 1542 |
+
882,
|
| 1543 |
+
859
|
| 1544 |
+
],
|
| 1545 |
+
"page_idx": 19
|
| 1546 |
+
},
|
| 1547 |
+
{
|
| 1548 |
+
"type": "text",
|
| 1549 |
+
"text": "In addition to dataset size, computational efficiency remains a practical consideration in model selection. The ML models evaluated in this study consistently required",
|
| 1550 |
+
"bbox": [
|
| 1551 |
+
114,
|
| 1552 |
+
873,
|
| 1553 |
+
882,
|
| 1554 |
+
917
|
| 1555 |
+
],
|
| 1556 |
+
"page_idx": 19
|
| 1557 |
+
},
|
| 1558 |
+
{
|
| 1559 |
+
"type": "header",
|
| 1560 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION20",
|
| 1561 |
+
"bbox": [
|
| 1562 |
+
115,
|
| 1563 |
+
36,
|
| 1564 |
+
878,
|
| 1565 |
+
52
|
| 1566 |
+
],
|
| 1567 |
+
"page_idx": 19
|
| 1568 |
+
},
|
| 1569 |
+
{
|
| 1570 |
+
"type": "text",
|
| 1571 |
+
"text": "less computational time compared to DL models, making them preferable in scenarios where efficiency is a priority. While DL models demonstrated competitive classification performance, their significantly longer training times present a challenge, particularly for researchers working with constrained computing resources. Given that many mental health detection applications require scalable solutions, this finding suggests that ML models provide a more efficient and accessible alternative for researchers seeking to deploy classification models without extensive computational infrastructure.",
|
| 1572 |
+
"bbox": [
|
| 1573 |
+
114,
|
| 1574 |
+
86,
|
| 1575 |
+
884,
|
| 1576 |
+
274
|
| 1577 |
+
],
|
| 1578 |
+
"page_idx": 20
|
| 1579 |
+
},
|
| 1580 |
+
{
|
| 1581 |
+
"type": "text",
|
| 1582 |
+
"text": "Interpretability is another critical factor in model selection, particularly for applications in mental health research where understanding the relationships between linguistic patterns and psychological states is essential. Among the ML models, logistic regression offers the clearest interpretability, as it provides direct coefficient estimates that indicate the relative influence of each feature. This advantage is particularly important in mental health classification, where identifying protective and risk-related linguistic markers can provide valuable insights into early detection and intervention strategies. Unlike logistic regression, tree-based models such as random forests and LightGBM do not distinguish between positive and negative associations but instead rank features based on their contribution to classification accuracy. This limitation reduces their interpretability but still allows for the identification of key predictive features. In contrast, DL models operate as black-box systems with no explicit feature importance scores, making them less suitable for researchers prioritizing explainability. Given these differences, logistic regression emerges as the preferred choice when interpretability is a key concern, while tree-based models provide flexibility for high-dimensional data without imposing strong linearity assumptions.",
|
| 1583 |
+
"bbox": [
|
| 1584 |
+
114,
|
| 1585 |
+
294,
|
| 1586 |
+
884,
|
| 1587 |
+
737
|
| 1588 |
+
],
|
| 1589 |
+
"page_idx": 20
|
| 1590 |
+
},
|
| 1591 |
+
{
|
| 1592 |
+
"type": "text",
|
| 1593 |
+
"text": "Despite the strengths of ML models in terms of efficiency and interpretability, it is important to acknowledge the assumptions underlying these approaches. Logistic regression, for example, assumes a linear relationship between the features and the log-odds of the target variable, an assumption that was not explicitly tested in this study. Future research should explore whether nonlinear transformations or interaction terms could improve model fit while maintaining interpretability. Similarly, while tree-based models",
|
| 1594 |
+
"bbox": [
|
| 1595 |
+
114,
|
| 1596 |
+
759,
|
| 1597 |
+
885,
|
| 1598 |
+
920
|
| 1599 |
+
],
|
| 1600 |
+
"page_idx": 20
|
| 1601 |
+
},
|
| 1602 |
+
{
|
| 1603 |
+
"type": "header",
|
| 1604 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION21",
|
| 1605 |
+
"bbox": [
|
| 1606 |
+
115,
|
| 1607 |
+
36,
|
| 1608 |
+
878,
|
| 1609 |
+
52
|
| 1610 |
+
],
|
| 1611 |
+
"page_idx": 20
|
| 1612 |
+
},
|
| 1613 |
+
{
|
| 1614 |
+
"type": "text",
|
| 1615 |
+
"text": "do not impose strict assumptions about feature relationships, they rely on hierarchical partitioning mechanisms that may introduce biases in highly unbalanced datasets. These limitations highlight the importance of methodological rigor when selecting ML models for mental health research.",
|
| 1616 |
+
"bbox": [
|
| 1617 |
+
114,
|
| 1618 |
+
86,
|
| 1619 |
+
880,
|
| 1620 |
+
187
|
| 1621 |
+
],
|
| 1622 |
+
"page_idx": 21
|
| 1623 |
+
},
|
| 1624 |
+
{
|
| 1625 |
+
"type": "text",
|
| 1626 |
+
"text": "In addition to model selection, dataset composition and label consistency present challenges in mental health classification. The dataset used in this study was compiled from multiple publicly available sources, which, while beneficial for enhancing linguistic diversity, also introduced inconsistencies in class labels. Since each dataset was originally created for different research purposes, class boundaries may not be clearly defined when combined. This issue likely contributed to increased misclassification rates in the multi-class setting, particularly in categories with overlapping linguistic features such as depression, stress, and suicidal ideation. The presence of ambiguous class definitions suggests that future studies should consider collecting data directly from social media platforms using standardized labeling criteria. By ensuring greater consistency in data annotation, researchers can improve model generalizability and reduce classification errors.",
|
| 1627 |
+
"bbox": [
|
| 1628 |
+
114,
|
| 1629 |
+
206,
|
| 1630 |
+
884,
|
| 1631 |
+
533
|
| 1632 |
+
],
|
| 1633 |
+
"page_idx": 21
|
| 1634 |
+
},
|
| 1635 |
+
{
|
| 1636 |
+
"type": "text",
|
| 1637 |
+
"text": "Another limitation relates to annotation quality. Given the subjective nature of mental health expressions, the reliability of pre-existing labels in publicly available data-sets can be uncertain. Manual verification of labels by domain experts could improve classification accuracy, but such an approach is time-consuming and resource-intensive. As an alternative, future work could explore Artificial Intelligence-assisted annotation strategies to enhance labeling consistency. Advances in natural language processing, particularly in large language models, offer opportunities for developing semi-automated annotation systems that incorporate human-in-the-loop validation. By combining automated text classification with expert oversight, researchers could create more comprehensive and reliable datasets for mental health detection.",
|
| 1638 |
+
"bbox": [
|
| 1639 |
+
114,
|
| 1640 |
+
554,
|
| 1641 |
+
882,
|
| 1642 |
+
827
|
| 1643 |
+
],
|
| 1644 |
+
"page_idx": 21
|
| 1645 |
+
},
|
| 1646 |
+
{
|
| 1647 |
+
"type": "text",
|
| 1648 |
+
"text": "The ethical implications of using social media data for mental health research also warrant careful consideration. While these datasets provide valuable insights into psychological well-being, they often include sensitive information that must be handled with",
|
| 1649 |
+
"bbox": [
|
| 1650 |
+
114,
|
| 1651 |
+
844,
|
| 1652 |
+
882,
|
| 1653 |
+
920
|
| 1654 |
+
],
|
| 1655 |
+
"page_idx": 21
|
| 1656 |
+
},
|
| 1657 |
+
{
|
| 1658 |
+
"type": "header",
|
| 1659 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION22",
|
| 1660 |
+
"bbox": [
|
| 1661 |
+
115,
|
| 1662 |
+
36,
|
| 1663 |
+
878,
|
| 1664 |
+
52
|
| 1665 |
+
],
|
| 1666 |
+
"page_idx": 21
|
| 1667 |
+
},
|
| 1668 |
+
{
|
| 1669 |
+
"type": "text",
|
| 1670 |
+
"text": "caution. Privacy-preserving techniques, such as anonymization and differential privacy, should be explored to protect user identities while maintaining the linguistic features necessary for classification. Future research should also establish clearer guidelines for ethical data collection, ensuring that social media-derived datasets align with best practices in mental health ethics.",
|
| 1671 |
+
"bbox": [
|
| 1672 |
+
114,
|
| 1673 |
+
86,
|
| 1674 |
+
884,
|
| 1675 |
+
218
|
| 1676 |
+
],
|
| 1677 |
+
"page_idx": 22
|
| 1678 |
+
},
|
| 1679 |
+
{
|
| 1680 |
+
"type": "text",
|
| 1681 |
+
"text": "In summary, this study provides a comparative analysis of ML and DL models for mental health classification on social media, highlighting key considerations in accuracy, interpretability, and computational efficiency. The findings suggest that ML models remain a practical and interpretable choice for small to medium-sized datasets, while DL models may offer advantages when working with larger data volumes. Among ML models, logistic regression is particularly useful for its ability to distinguish between positive and negative feature importance, offering valuable insights into linguistic markers associated with mental health conditions. However, researchers should remain mindful of model assumptions and dataset inconsistencies, which can impact classification performance. Moving forward, efforts to improve data collection, annotation quality, and ethical considerations will be essential for advancing AI-driven mental health detection and ensuring that these models contribute to more effective, transparent, and responsible research practices.",
|
| 1682 |
+
"bbox": [
|
| 1683 |
+
114,
|
| 1684 |
+
228,
|
| 1685 |
+
884,
|
| 1686 |
+
587
|
| 1687 |
+
],
|
| 1688 |
+
"page_idx": 22
|
| 1689 |
+
},
|
| 1690 |
+
{
|
| 1691 |
+
"type": "header",
|
| 1692 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION23",
|
| 1693 |
+
"bbox": [
|
| 1694 |
+
115,
|
| 1695 |
+
36,
|
| 1696 |
+
878,
|
| 1697 |
+
52
|
| 1698 |
+
],
|
| 1699 |
+
"page_idx": 22
|
| 1700 |
+
},
|
| 1701 |
+
{
|
| 1702 |
+
"type": "text",
|
| 1703 |
+
"text": "References",
|
| 1704 |
+
"text_level": 1,
|
| 1705 |
+
"bbox": [
|
| 1706 |
+
442,
|
| 1707 |
+
85,
|
| 1708 |
+
552,
|
| 1709 |
+
102
|
| 1710 |
+
],
|
| 1711 |
+
"page_idx": 23
|
| 1712 |
+
},
|
| 1713 |
+
{
|
| 1714 |
+
"type": "list",
|
| 1715 |
+
"sub_type": "ref_text",
|
| 1716 |
+
"list_items": [
|
| 1717 |
+
"Bird, S., Klein, E., & Loper, E. (2009). Natural language processing with python. O'Reilly Media Inc.",
|
| 1718 |
+
"Bishop, C. M. (2006). Pattern recognition and machine learning. Springer.",
|
| 1719 |
+
"Bradford, A., Meyer, A. N. D., Khan, S., Giardina, T. D., & Singh, H. (2024). Diagnostic error in mental health: A review. BMJ Quality & Safety, 33(10), 663-672. https://qualitysafety.bmj.com/content/33/10/663",
|
| 1720 |
+
"Breiman, L. (2001). *Random forests* (Vol. 45). Springer.",
|
| 1721 |
+
"Breiman, L., Friedman, J. H., Olshen, R. A., & Stone, C. J. (1984). Classification and regression trees. Wadsworth & Brooks/Cole Advanced Books & Software, Monterey, CA.",
|
| 1722 |
+
"Calvo, R. A., Milne, D. N., Hussain, M. S., & Christensen, H. (2017). Natural language processing in mental health applications using non-clinical texts. *Natural Language Engineering*, 23(5), 649–685. https://doi.org/10.1017/S1351324916000383",
|
| 1723 |
+
"Cao, Y., Dai, J., Wang, Z., Zhang, Y., Shen, X., Liu, Y., & Tian, Y. (2024). Systematic review: Text processing algorithms in machine learning and deep learning for mental health detection on social media. https://arxiv.org/abs/2410.16204",
|
| 1724 |
+
"Chang, C.-C., & Lin, C.-J. (2011). Libsvm: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology, 2(3), 1-27.",
|
| 1725 |
+
"Chen, Y., Zhao, C., Xu, Y., & Nie, C. (2025). Year-over-year developments in financial fraud detection via deep learning: A systematic literature review. https://arxiv.org/abs/2502.00201",
|
| 1726 |
+
"Cho, K., van Merrienboer, B., Gulcehre, C., Bahdanau, D., Bougares, F., Schwenk, H., & Bengio, Y. (2014). Learning phrase representations using rnn encoder-decoder for statistical machine translation. Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), 1724-1734.",
|
| 1727 |
+
"Cortes, C., & Vapnik, V. N. (1995). Support-vector networks (Vol. 20). Springer."
|
| 1728 |
+
],
|
| 1729 |
+
"bbox": [
|
| 1730 |
+
115,
|
| 1731 |
+
116,
|
| 1732 |
+
882,
|
| 1733 |
+
845
|
| 1734 |
+
],
|
| 1735 |
+
"page_idx": 23
|
| 1736 |
+
},
|
| 1737 |
+
{
|
| 1738 |
+
"type": "header",
|
| 1739 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION24",
|
| 1740 |
+
"bbox": [
|
| 1741 |
+
115,
|
| 1742 |
+
36,
|
| 1743 |
+
882,
|
| 1744 |
+
54
|
| 1745 |
+
],
|
| 1746 |
+
"page_idx": 23
|
| 1747 |
+
},
|
| 1748 |
+
{
|
| 1749 |
+
"type": "list",
|
| 1750 |
+
"sub_type": "ref_text",
|
| 1751 |
+
"list_items": [
|
| 1752 |
+
"Davis, J., & Goadrich, M. (2006). The relationship between precision-recall and roc curves. Proceedings of the 23rd International Conference on Machine Learning, 233-240.",
|
| 1753 |
+
"De Choudhury, M., Counts, S., & Horvitz, E. (2013). Social media as a measurement of depression in populations. Proceedings of the ACM Annual Web Science Conference, 47-56. https://doi.org/10.1145/2464464.2464480",
|
| 1754 |
+
"Devlin, J., Chang, M.-W., Lee, K., & Toutanova, K. (2019). Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.",
|
| 1755 |
+
"Friedman, J. H. (2001). Greedy function approximation: A gradient boosting machine. Annals of Statistics, 29(5), 1189-1232.",
|
| 1756 |
+
"Guntuku, S. C., Yaden, D. B., Kern, M. L., Ungar, L. H., & Eichstaedt, J. C. (2017). Detecting depression and mental illness on social media: An integrative review. Current Opinion in Psychology, 18, 43-49. https://doi.org/10.1016/j.copsyc.2017.07.005",
|
| 1757 |
+
"Guyon, I., Weston, J., Barnhill, S., & Vapnik, V. (2002). Gene selection for cancer classification using support vector machines. Machine Learning, 46, 389-422.",
|
| 1758 |
+
"Hargittai, E. (2015). Is bigger always better? potential biases of big data derived from social network sites. The Annals of the American Academy of Political and Social Science, 659, 63-76. http://www.jstor.org/stable/24541849",
|
| 1759 |
+
"Harris, Z. S. (1954). Distributional structure. Word, 10(2-3), 146-162. https://doi.org/10.1080/00437956.1954.11659520",
|
| 1760 |
+
"Helmy, A., Nassar, R., & Ramadan, N. (2024). Depression detection for twitter users using sentiment analysis in english and arabic tweets. Artificial Intelligence in Medicine, 147, 102716. https://doi.org/10.1016/j.artmed.2023.102716",
|
| 1761 |
+
"Hosmer, D. W., & Lemeshow, S. (2000). Applied logistic regression (Second Edition). John Wiley & Sons, Inc. https://doi.org/10.1002/0471722146",
|
| 1762 |
+
"Jones, K. S. (1972). A statistical interpretation of term specificity and its application in retrieval. Journal of Documentation, 28(1), 11-21. https://doi.org/10.1108/eb026526"
|
| 1763 |
+
],
|
| 1764 |
+
"bbox": [
|
| 1765 |
+
115,
|
| 1766 |
+
84,
|
| 1767 |
+
899,
|
| 1768 |
+
897
|
| 1769 |
+
],
|
| 1770 |
+
"page_idx": 24
|
| 1771 |
+
},
|
| 1772 |
+
{
|
| 1773 |
+
"type": "header",
|
| 1774 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION25",
|
| 1775 |
+
"bbox": [
|
| 1776 |
+
115,
|
| 1777 |
+
35,
|
| 1778 |
+
882,
|
| 1779 |
+
54
|
| 1780 |
+
],
|
| 1781 |
+
"page_idx": 24
|
| 1782 |
+
},
|
| 1783 |
+
{
|
| 1784 |
+
"type": "list",
|
| 1785 |
+
"sub_type": "ref_text",
|
| 1786 |
+
"list_items": [
|
| 1787 |
+
"Ke, G., Meng, Q., Finley, T., Wang, T., Chen, W., Ma, W., Ye, Q., & Liu, T.-Y. (2017). Lightgbm: A highly efficient gradient boosting decision tree. Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS), 3149-3157.",
|
| 1788 |
+
"Kessler, R. C., Aguilar-Gaxiola, S., Alonso, J., Benjet, C., Bromet, E. J., Cardoso, G., Degenhardt, L., de Girolamo, G., Dinolova, R. V., Ferry, F., Florescu, S., Gureje, O., Haro, J. M., Huang, Y., Karam, E. G., Kawakami, N., Lee, S., Lepine, J. P., Levinson, D., ... Koenen, K. C. (2017). Trauma and ptsd in the who world mental health surveys. European Journal of Psychotraumatology, 8(sup5), 1353383. https://doi.org/10.1080/20008198.2017.1353383",
|
| 1789 |
+
"Lan, Z., Chen, M., Goodman, S., Gimpel, K., Sharma, P., & Soricut, R. (2020). Albert: A lite bert for self-supervised learning of language representations. arXiv preprint arXiv:1909.11942.",
|
| 1790 |
+
"Liu, Y., Shen, X., Zhang, Y., Wang, Z., Tian, Y., Dai, J., & Cao, Y. (2024). A systematic review of machine learning approaches for detecting deceptive activities on social media: Methods, challenges, and biases. arXiv, arXiv:2410.20293. https://doi.org/10.48550/arXiv.2410.20293",
|
| 1791 |
+
"Powers, D. M. (2011). Evaluation: From precision, recall and f-measure to roc, informedness, markedness and correlation. Journal of Machine Learning Technologies, 2(1), 37-63.",
|
| 1792 |
+
"Shatte, A. B. R., Hutchinson, D. M., & Teague, S. J. (2019). Machine learning in mental health: A scoping review of methods and applications. *Psychological Medicine*, 49(9), 1426-1448. https://doi.org/10.1017/S0033291719000151",
|
| 1793 |
+
"Tao, Y., Wang, Z., Zhang, H., & Wang, L. (2024). Nevlp: Noise-robust framework for efficient vision-language pre-training. https://arxiv.org/abs/2409.09582",
|
| 1794 |
+
"Wei, Y., Gao, M., Xiao, J., Liu, C., Tian, Y., & He, Y. (2023). Research and implementation of cancer gene data classification based on deep learning. Journal of Software Engineering and Applications, 16, 155-169. https://doi.org/10.4236/jsea.2023.166009"
|
| 1795 |
+
],
|
| 1796 |
+
"bbox": [
|
| 1797 |
+
115,
|
| 1798 |
+
83,
|
| 1799 |
+
880,
|
| 1800 |
+
897
|
| 1801 |
+
],
|
| 1802 |
+
"page_idx": 25
|
| 1803 |
+
},
|
| 1804 |
+
{
|
| 1805 |
+
"type": "header",
|
| 1806 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION26",
|
| 1807 |
+
"bbox": [
|
| 1808 |
+
115,
|
| 1809 |
+
36,
|
| 1810 |
+
880,
|
| 1811 |
+
54
|
| 1812 |
+
],
|
| 1813 |
+
"page_idx": 25
|
| 1814 |
+
},
|
| 1815 |
+
{
|
| 1816 |
+
"type": "list",
|
| 1817 |
+
"sub_type": "ref_text",
|
| 1818 |
+
"list_items": [
|
| 1819 |
+
"WHO. (2023). Mental disorders [Retrieved February 9, 2025]. https://www.who.int/news-room/fact-sheets/detail/mental-disorders",
|
| 1820 |
+
"Zhang, Y., Wang, Z., Ding, Z., Tian, Y., Dai, J., Shen, X., Liu, Y., & Cao, Y. (2025). Tutorial on using machine learning and deep learning models for mental illness detection. arXiv. https://arxiv.org/abs/2502.04342"
|
| 1821 |
+
],
|
| 1822 |
+
"bbox": [
|
| 1823 |
+
115,
|
| 1824 |
+
83,
|
| 1825 |
+
882,
|
| 1826 |
+
219
|
| 1827 |
+
],
|
| 1828 |
+
"page_idx": 26
|
| 1829 |
+
},
|
| 1830 |
+
{
|
| 1831 |
+
"type": "header",
|
| 1832 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION27",
|
| 1833 |
+
"bbox": [
|
| 1834 |
+
115,
|
| 1835 |
+
35,
|
| 1836 |
+
882,
|
| 1837 |
+
54
|
| 1838 |
+
],
|
| 1839 |
+
"page_idx": 26
|
| 1840 |
+
},
|
| 1841 |
+
{
|
| 1842 |
+
"type": "table",
|
| 1843 |
+
"img_path": "images/e141f9c65f74ad67fa2740a3ef0980b38eeaab541493d1231a7d20658dd014de.jpg",
|
| 1844 |
+
"table_caption": [],
|
| 1845 |
+
"table_footnote": [],
|
| 1846 |
+
"table_body": "<table><tr><td>Type</td><td>Model</td><td>Binary (seconds)</td><td>Multiclass (seconds)</td></tr><tr><td rowspan=\"4\">ML</td><td>SVM</td><td>4681.96</td><td>22844.23</td></tr><tr><td>Logistic Regression</td><td>7.33</td><td>181.86</td></tr><tr><td>Random Forest</td><td>263.54</td><td>2895.43</td></tr><tr><td>Light GBM</td><td>336.65</td><td>3968.33</td></tr><tr><td rowspan=\"2\">DL</td><td>Albert</td><td>21244.18</td><td>20860.15</td></tr><tr><td>GRU</td><td>1530.76</td><td>1567.24</td></tr></table>",
|
| 1847 |
+
"bbox": [
|
| 1848 |
+
164,
|
| 1849 |
+
82,
|
| 1850 |
+
831,
|
| 1851 |
+
206
|
| 1852 |
+
],
|
| 1853 |
+
"page_idx": 27
|
| 1854 |
+
},
|
| 1855 |
+
{
|
| 1856 |
+
"type": "text",
|
| 1857 |
+
"text": "Table 1",
|
| 1858 |
+
"bbox": [
|
| 1859 |
+
115,
|
| 1860 |
+
205,
|
| 1861 |
+
193,
|
| 1862 |
+
219
|
| 1863 |
+
],
|
| 1864 |
+
"page_idx": 27
|
| 1865 |
+
},
|
| 1866 |
+
{
|
| 1867 |
+
"type": "text",
|
| 1868 |
+
"text": "Training times (in seconds) for model optimization in binary and multi-class classification tasks",
|
| 1869 |
+
"bbox": [
|
| 1870 |
+
115,
|
| 1871 |
+
221,
|
| 1872 |
+
880,
|
| 1873 |
+
254
|
| 1874 |
+
],
|
| 1875 |
+
"page_idx": 27
|
| 1876 |
+
},
|
| 1877 |
+
{
|
| 1878 |
+
"type": "header",
|
| 1879 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION28",
|
| 1880 |
+
"bbox": [
|
| 1881 |
+
115,
|
| 1882 |
+
36,
|
| 1883 |
+
880,
|
| 1884 |
+
52
|
| 1885 |
+
],
|
| 1886 |
+
"page_idx": 27
|
| 1887 |
+
},
|
| 1888 |
+
{
|
| 1889 |
+
"type": "table",
|
| 1890 |
+
"img_path": "images/49f80788ab8952176b28bb789a2db7cb8a1029613085c955c098a3927f9ce0d3.jpg",
|
| 1891 |
+
"table_caption": [],
|
| 1892 |
+
"table_footnote": [],
|
| 1893 |
+
"table_body": "<table><tr><td>Model</td><td>F1 Score</td><td>AUROC</td></tr><tr><td>Support Vector Machine (SVM)</td><td>0.9401</td><td>0.93</td></tr><tr><td>Logistic Regression</td><td>0.9345</td><td>0.93</td></tr><tr><td>Random Forest</td><td>0.9359</td><td>0.92</td></tr><tr><td>LightGBM</td><td>0.9358</td><td>0.93</td></tr><tr><td>ALBERT</td><td>0.9576</td><td>0.95</td></tr><tr><td>Gated Recurrent Units (GRU)</td><td>0.9512</td><td>0.94</td></tr></table>",
|
| 1894 |
+
"bbox": [
|
| 1895 |
+
242,
|
| 1896 |
+
82,
|
| 1897 |
+
754,
|
| 1898 |
+
209
|
| 1899 |
+
],
|
| 1900 |
+
"page_idx": 28
|
| 1901 |
+
},
|
| 1902 |
+
{
|
| 1903 |
+
"type": "text",
|
| 1904 |
+
"text": "Table 2",
|
| 1905 |
+
"bbox": [
|
| 1906 |
+
115,
|
| 1907 |
+
206,
|
| 1908 |
+
194,
|
| 1909 |
+
221
|
| 1910 |
+
],
|
| 1911 |
+
"page_idx": 28
|
| 1912 |
+
},
|
| 1913 |
+
{
|
| 1914 |
+
"type": "text",
|
| 1915 |
+
"text": "F1 Scores and AUROC for Binary Classification Tasks.",
|
| 1916 |
+
"bbox": [
|
| 1917 |
+
115,
|
| 1918 |
+
223,
|
| 1919 |
+
600,
|
| 1920 |
+
240
|
| 1921 |
+
],
|
| 1922 |
+
"page_idx": 28
|
| 1923 |
+
},
|
| 1924 |
+
{
|
| 1925 |
+
"type": "header",
|
| 1926 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION29",
|
| 1927 |
+
"bbox": [
|
| 1928 |
+
115,
|
| 1929 |
+
36,
|
| 1930 |
+
880,
|
| 1931 |
+
52
|
| 1932 |
+
],
|
| 1933 |
+
"page_idx": 28
|
| 1934 |
+
},
|
| 1935 |
+
{
|
| 1936 |
+
"type": "table",
|
| 1937 |
+
"img_path": "images/2762771481c996a9dec1af4754d52c078e23acf3cd45d57b21891597e7903435.jpg",
|
| 1938 |
+
"table_caption": [],
|
| 1939 |
+
"table_footnote": [],
|
| 1940 |
+
"table_body": "<table><tr><td>Model</td><td>F1 Score</td><td>Micro-Average AUROC</td></tr><tr><td>Support Vector Machine (SVM)</td><td>0.7610</td><td>0.95</td></tr><tr><td>Logistic Regression</td><td>0.7498</td><td>0.96</td></tr><tr><td>Random Forest</td><td>0.7478</td><td>0.96</td></tr><tr><td>LightGBM</td><td>0.7747</td><td>0.97</td></tr><tr><td>ALBERT</td><td>0.7841</td><td>0.97</td></tr><tr><td>Gated Recurrent Units (GRU)</td><td>0.7756</td><td>0.97</td></tr></table>",
|
| 1941 |
+
"bbox": [
|
| 1942 |
+
166,
|
| 1943 |
+
82,
|
| 1944 |
+
828,
|
| 1945 |
+
208
|
| 1946 |
+
],
|
| 1947 |
+
"page_idx": 29
|
| 1948 |
+
},
|
| 1949 |
+
{
|
| 1950 |
+
"type": "text",
|
| 1951 |
+
"text": "Table 3 F1 Scores and Micro-Average AUROC for Multi-Class Classification Tasks.",
|
| 1952 |
+
"bbox": [
|
| 1953 |
+
115,
|
| 1954 |
+
208,
|
| 1955 |
+
771,
|
| 1956 |
+
241
|
| 1957 |
+
],
|
| 1958 |
+
"page_idx": 29
|
| 1959 |
+
},
|
| 1960 |
+
{
|
| 1961 |
+
"type": "header",
|
| 1962 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION30",
|
| 1963 |
+
"bbox": [
|
| 1964 |
+
115,
|
| 1965 |
+
36,
|
| 1966 |
+
880,
|
| 1967 |
+
52
|
| 1968 |
+
],
|
| 1969 |
+
"page_idx": 29
|
| 1970 |
+
},
|
| 1971 |
+
{
|
| 1972 |
+
"type": "image",
|
| 1973 |
+
"img_path": "images/c36614d0007b32649ec76814e1daebea31792a2e5deb61d8e53973ed7c78ed37.jpg",
|
| 1974 |
+
"image_caption": [
|
| 1975 |
+
"Figure 1 LightGBM Multi-Class Model Performance: Confusion Matrix."
|
| 1976 |
+
],
|
| 1977 |
+
"image_footnote": [],
|
| 1978 |
+
"bbox": [
|
| 1979 |
+
200,
|
| 1980 |
+
84,
|
| 1981 |
+
796,
|
| 1982 |
+
441
|
| 1983 |
+
],
|
| 1984 |
+
"page_idx": 30
|
| 1985 |
+
},
|
| 1986 |
+
{
|
| 1987 |
+
"type": "text",
|
| 1988 |
+
"text": "The class labels are arranged as follows: Class 0: Anxiety, Class 1: Normal, Class 2: Depression, Class 3: Suicidal, Class 4: Stress, Class 5: Bipolar, and Class 6: Personality Disorder.",
|
| 1989 |
+
"bbox": [
|
| 1990 |
+
115,
|
| 1991 |
+
478,
|
| 1992 |
+
882,
|
| 1993 |
+
529
|
| 1994 |
+
],
|
| 1995 |
+
"page_idx": 30
|
| 1996 |
+
},
|
| 1997 |
+
{
|
| 1998 |
+
"type": "header",
|
| 1999 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION31",
|
| 2000 |
+
"bbox": [
|
| 2001 |
+
115,
|
| 2002 |
+
36,
|
| 2003 |
+
878,
|
| 2004 |
+
52
|
| 2005 |
+
],
|
| 2006 |
+
"page_idx": 30
|
| 2007 |
+
},
|
| 2008 |
+
{
|
| 2009 |
+
"type": "image",
|
| 2010 |
+
"img_path": "images/fab43e0ee7d28eabee94376d869425d9df1970745a8795a9145e5ce840821e2e.jpg",
|
| 2011 |
+
"image_caption": [
|
| 2012 |
+
"Figure 2 LightGBM Multi-Class Model Performance: Area Under the Receiver Operating Characteristic Curve."
|
| 2013 |
+
],
|
| 2014 |
+
"image_footnote": [],
|
| 2015 |
+
"bbox": [
|
| 2016 |
+
196,
|
| 2017 |
+
84,
|
| 2018 |
+
801,
|
| 2019 |
+
422
|
| 2020 |
+
],
|
| 2021 |
+
"page_idx": 31
|
| 2022 |
+
},
|
| 2023 |
+
{
|
| 2024 |
+
"type": "text",
|
| 2025 |
+
"text": "The class labels are arranged as follows: Class 0: Anxiety, Class 1: Normal, Class 2: Depression, Class 3: Suicidal, Class 4: Stress, Class 5: Bipolar, and Class 6: Personality Disorder.",
|
| 2026 |
+
"bbox": [
|
| 2027 |
+
115,
|
| 2028 |
+
475,
|
| 2029 |
+
882,
|
| 2030 |
+
525
|
| 2031 |
+
],
|
| 2032 |
+
"page_idx": 31
|
| 2033 |
+
},
|
| 2034 |
+
{
|
| 2035 |
+
"type": "header",
|
| 2036 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION32",
|
| 2037 |
+
"bbox": [
|
| 2038 |
+
115,
|
| 2039 |
+
36,
|
| 2040 |
+
880,
|
| 2041 |
+
52
|
| 2042 |
+
],
|
| 2043 |
+
"page_idx": 31
|
| 2044 |
+
},
|
| 2045 |
+
{
|
| 2046 |
+
"type": "image",
|
| 2047 |
+
"img_path": "images/188947d73c0768b805d1ac26ba34f3efb82faf8538ab15768a98ab874331da56.jpg",
|
| 2048 |
+
"image_caption": [
|
| 2049 |
+
"(a) Logistic Regression (Binary)"
|
| 2050 |
+
],
|
| 2051 |
+
"image_footnote": [],
|
| 2052 |
+
"bbox": [
|
| 2053 |
+
119,
|
| 2054 |
+
84,
|
| 2055 |
+
484,
|
| 2056 |
+
256
|
| 2057 |
+
],
|
| 2058 |
+
"page_idx": 32
|
| 2059 |
+
},
|
| 2060 |
+
{
|
| 2061 |
+
"type": "image",
|
| 2062 |
+
"img_path": "images/d3f5265870ab68b4f0353c64bb265e65f8f3d58edc9a1210873418cd3fae333e.jpg",
|
| 2063 |
+
"image_caption": [
|
| 2064 |
+
"(b) Logistic Regression (Multiclass)"
|
| 2065 |
+
],
|
| 2066 |
+
"image_footnote": [],
|
| 2067 |
+
"bbox": [
|
| 2068 |
+
512,
|
| 2069 |
+
85,
|
| 2070 |
+
877,
|
| 2071 |
+
256
|
| 2072 |
+
],
|
| 2073 |
+
"page_idx": 32
|
| 2074 |
+
},
|
| 2075 |
+
{
|
| 2076 |
+
"type": "image",
|
| 2077 |
+
"img_path": "images/cb9515502b4ddf0468a1b756b3cee48c8ff94fd3709b020f55ec5be5ecb53f9e.jpg",
|
| 2078 |
+
"image_caption": [
|
| 2079 |
+
"(c) Random Forest (Binary)"
|
| 2080 |
+
],
|
| 2081 |
+
"image_footnote": [],
|
| 2082 |
+
"bbox": [
|
| 2083 |
+
119,
|
| 2084 |
+
286,
|
| 2085 |
+
484,
|
| 2086 |
+
456
|
| 2087 |
+
],
|
| 2088 |
+
"page_idx": 32
|
| 2089 |
+
},
|
| 2090 |
+
{
|
| 2091 |
+
"type": "image",
|
| 2092 |
+
"img_path": "images/909bab7163821269ed929d9ce450131c45fb2321c6c4eddde36e703f70eeb6ef.jpg",
|
| 2093 |
+
"image_caption": [
|
| 2094 |
+
"(d) Random Forest (Multiclass)"
|
| 2095 |
+
],
|
| 2096 |
+
"image_footnote": [],
|
| 2097 |
+
"bbox": [
|
| 2098 |
+
514,
|
| 2099 |
+
287,
|
| 2100 |
+
873,
|
| 2101 |
+
456
|
| 2102 |
+
],
|
| 2103 |
+
"page_idx": 32
|
| 2104 |
+
},
|
| 2105 |
+
{
|
| 2106 |
+
"type": "image",
|
| 2107 |
+
"img_path": "images/716bf99ccfbd441cda175499d61ef54aba7770eb21f86797ce4d7e55e3517466.jpg",
|
| 2108 |
+
"image_caption": [
|
| 2109 |
+
"(e) LightGBM (Binary)"
|
| 2110 |
+
],
|
| 2111 |
+
"image_footnote": [],
|
| 2112 |
+
"bbox": [
|
| 2113 |
+
119,
|
| 2114 |
+
488,
|
| 2115 |
+
484,
|
| 2116 |
+
658
|
| 2117 |
+
],
|
| 2118 |
+
"page_idx": 32
|
| 2119 |
+
},
|
| 2120 |
+
{
|
| 2121 |
+
"type": "image",
|
| 2122 |
+
"img_path": "images/8db89aded9f61cb6fd3fafaa445e015f4609f64c59a4ec0e3720abce2ea50ba9.jpg",
|
| 2123 |
+
"image_caption": [
|
| 2124 |
+
"(f) LightGBM (Multiclass)",
|
| 2125 |
+
"Figure 3 Comparison of Feature Importance Across Different Models"
|
| 2126 |
+
],
|
| 2127 |
+
"image_footnote": [],
|
| 2128 |
+
"bbox": [
|
| 2129 |
+
514,
|
| 2130 |
+
488,
|
| 2131 |
+
873,
|
| 2132 |
+
658
|
| 2133 |
+
],
|
| 2134 |
+
"page_idx": 32
|
| 2135 |
+
},
|
| 2136 |
+
{
|
| 2137 |
+
"type": "header",
|
| 2138 |
+
"text": "ML VS. DL: TRADE-OFFS IN SOCIAL MEDIA MENTAL ILLNESS DETECTION33",
|
| 2139 |
+
"bbox": [
|
| 2140 |
+
115,
|
| 2141 |
+
36,
|
| 2142 |
+
878,
|
| 2143 |
+
52
|
| 2144 |
+
],
|
| 2145 |
+
"page_idx": 32
|
| 2146 |
+
}
|
| 2147 |
+
]
|
data/2025/2503_01xxx/2503.01082/86c07b72-b761-4017-9dc2-5a173571c1cd_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01082/86c07b72-b761-4017-9dc2-5a173571c1cd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a0055321365ebe84b8ca7a9f715520fd1b53a3624800e374ceab3d498d6221e6
|
| 3 |
+
size 444390
|
data/2025/2503_01xxx/2503.01082/full.md
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Efficient or Powerful? Trade-offs Between Machine Learning and Deep Learning for Mental Illness Detection on Social Media
|
| 2 |
+
|
| 3 |
+
Zhanyi Ding $^{2}$ , Zhongyan Wang $^{2}$ , Yeyubei Zhang $^{1}$ , Yuchen Cao $^{5}$ , Yunchong Liu $^{1}$ , Xiaorui Shen $^{5}$ , Yexin Tian $^{3}$ , and Jianglai Dai $^{4}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>University of Pennsylvania, School of Engineering and Applied Science
|
| 6 |
+
<sup>2</sup>New York University, Center for Data Science
|
| 7 |
+
|
| 8 |
+
<sup>3</sup>Georgia Institute of Technology, College of Computing
|
| 9 |
+
|
| 10 |
+
<sup>4</sup>University of California, Berkeley, Department of EECS
|
| 11 |
+
|
| 12 |
+
$^{5}$ Northeastern University, Khoury College of Computer Science
|
| 13 |
+
|
| 14 |
+
# Author Note
|
| 15 |
+
|
| 16 |
+
Correspondence concerning this article should be addressed to Yuchen Cao, Northeastern University, E-mail: cao.yuch@northeastern.edu
|
| 17 |
+
|
| 18 |
+
# Abstract
|
| 19 |
+
|
| 20 |
+
Social media platforms provide valuable insights into mental health trends by capturing user-generated discussions on conditions such as depression, anxiety, and suicidal ideation. Machine learning (ML) and deep learning (DL) models have been increasingly applied to classify mental health conditions from textual data, but selecting the most effective model involves trade-offs in accuracy, interpretability, and computational efficiency. This study evaluates multiple ML models, including logistic regression, random forest, and LightGBM, alongside deep learning architectures such as ALBERT and Gated Recurrent Units (GRUs), for both binary and multi-class classification of mental health conditions. Our findings indicate that ML and DL models achieve comparable classification performance on medium-sized datasets, with ML models offering greater interpretability through variable importance scores, while DL models are more robust to complex linguistic patterns. Additionally, ML models require explicit feature engineering, whereas DL models learn hierarchical representations directly from text. Logistic regression provides the advantage of capturing both positive and negative associations between features and mental health conditions, whereas tree-based models prioritize decision-making power through split-based feature selection. This study offers empirical insights into the advantages and limitations of different modeling approaches and provides recommendations for selecting appropriate methods based on dataset size, interpretability needs, and computational constraints.
|
| 21 |
+
|
| 22 |
+
Keywords: Machine Learning, Deep Learning, Mental Health Detection, Social Media, Natural Language Processing, Model Interpretability, Feature Importance
|
| 23 |
+
|
| 24 |
+
# Efficient or Powerful? Trade-offs Between Machine Learning and Deep Learning for Mental Illness Detection on Social Media
|
| 25 |
+
|
| 26 |
+
# Introduction
|
| 27 |
+
|
| 28 |
+
Social media has emerged as a vital platform for understanding mental health trends, offering researchers access to large-scale, real-time textual data reflecting personal experiences, emotional states, and psychological distress. Given the vast volume of user-generated content, researchers have increasingly turned to machine learning (ML) and deep learning (DL) approaches to automate mental health detection, leveraging natural language processing (NLP) techniques for feature extraction and classification. Platforms such as Twitter, Reddit, and Facebook have become key sources for analyzing mental health discussions, motivating the development of ML and DL models for early identification of psychological conditions.
|
| 29 |
+
|
| 30 |
+
Mental illnesses affect approximately one in eight individuals globally, with depression alone impacting over 280 million people (WHO, 2023). Early detection of these conditions is crucial for timely intervention, yet traditional diagnostic methods—such as clinical assessments and self-reported surveys—are resource-intensive and lack real-time insights (Kessler et al., 2017). Analyzing social media data presents an alternative, data-driven approach for mental health monitoring, enabling scalable detection of distress signals and behavioral patterns (De Choudhury et al., 2013; Guntuku et al., 2017). Advances in artificial intelligence (AI) and NLP have facilitated the application of ML and DL techniques for mental health classification, demonstrating promising results in various studies (Shatte et al., 2019).
|
| 31 |
+
|
| 32 |
+
Despite these advancements, several challenges remain. The effectiveness of ML and DL models is often hindered by dataset biases, inconsistencies in preprocessing techniques, and the reliance on imbalanced training data, all of which affect model generalizability (Cao et al., 2024; Hargittai, 2015; Helmy et al., 2024). Linguistic complexities—such as informal language, sarcasm, and context-dependent meanings—further complicate the accurate detection of mental health conditions in social media text (Calvo et al., 2017). Another critical issue is the trade-off between model performance and interpretability.
|
| 33 |
+
|
| 34 |
+
Traditional ML models, such as logistic regression and random forests, provide interpretability through feature importance scores but may struggle with nuanced language understanding. In contrast, DL models, including transformer-based architectures (e.g., Bidirectional Encoder Representations from Transformers, BERT) and recurrent neural networks (e.g., Gated Recurrent Units, GRUs), excel at capturing linguistic patterns but function as black-box models, limiting transparency in decision-making.
|
| 35 |
+
|
| 36 |
+
While prior systematic reviews have explored ML and DL applications in mental health detection (Cao et al., 2024; Chen et al., 2025; Liu et al., 2024), there remains a need for an empirical evaluation that systematically compares model performance and interpretability across different classification tasks. This study addresses this gap by assessing ML and DL models in both binary and multiclass mental health classification settings using a publicly available dataset from Kaggle. The dataset includes various mental health conditions, such as depression, anxiety, stress, suicidal ideation, bipolar disorder, and personality disorders. Model performance is evaluated using weighted F1 score and area under the receiver operating characteristic curve (AUROC) to account for class imbalance. Additionally, we assess model interpretability through feature importance measures, including logistic regression coefficients, random forest Gini impurity reduction, and LightGBM gain-based ranking.
|
| 37 |
+
|
| 38 |
+
By examining the trade-offs between model accuracy, interpretability, and computational efficiency, this study provides empirical insights into selecting appropriate models for mental health classification on social media. The remainder of this paper is organized as follows. Section Method describes the methodological framework, including data preparation, model development, and evaluation metrics. Section Results presents findings on dataset characteristics, model performance evaluation, and interpretability assessments. Finally, the Discussion and Conclusion sections summarize key insights, implications for mental health research, and directions for future work.
|
| 39 |
+
|
| 40 |
+
# Method
|
| 41 |
+
|
| 42 |
+
This section outlines the methodological framework of our study, covering data collection, preprocessing, model construction, and evaluation metrics. All experiments
|
| 43 |
+
|
| 44 |
+
were conducted using Python 3, leveraging key libraries such as pandas for data processing, scikit-learn and lightgbm for traditional machine learning, PyTorch for deep learning, and Transformers for utilizing pre-trained language models. These tools facilitated efficient data handling, systematic hyperparameter tuning, and rigorous performance evaluation. All models were trained on Google Colab, utilizing a high-RAM configuration powered by an NVIDIA T4 GPU, which provided the computational efficiency required for computational tasks, especially DL models. The following sections detail each stage of our approach. Complete code for data preparation, model development, and evaluation is available on GitHub (the link will be provided upon acceptance).
|
| 45 |
+
|
| 46 |
+
# Data Preparation
|
| 47 |
+
|
| 48 |
+
An extensive and varied dataset is fundamental for effective mental health detection via machine learning. We employed the 'Sentiment Analysis for Mental Health' dataset available on Kaggle. This dataset amalgamates textual data from multiple sources that cover topics such as depression, anxiety, stress, bipolar disorder, personality disorders, and suicidal ideation. Data were primarily obtained from social media platforms like Reddit, Twitter, and Facebook, where individuals discuss personal experiences and mental health challenges. The data acquisition process involved using platform-specific APIs and web scraping, followed by removing duplicates, filtering out spam or irrelevant content, and standardizing mental health labels. Personal identifiers were also removed to adhere to ethical standards, resulting in a well-structured CSV file with unique identifiers for each entry. Despite its diversity, the dataset's varying demographics and language styles (e.g., slang and colloquialisms) present challenges for natural language processing. Our preprocessing pipeline was specifically designed to address these variations and balance class distributions as needed.
|
| 49 |
+
|
| 50 |
+
We applied a consistent preprocessing pipeline to ready the dataset for both traditional and deep learning models. Initially, we cleaned the text by removing extraneous elements such as URLs, HTML tags, mentions, hashtags, special characters, and extra whitespace. The text was then converted to lowercase to maintain consistency. Next, we removed common stopwords using the NLTK stopwords list (Bird et al., 2009) to elim-
|
| 51 |
+
|
| 52 |
+
inate non-informative words. Finally, lemmatization was used to reduce words to their base forms, ensuring that different forms of a word are treated uniformly. The processed dataset was randomly split into training, validation, and test sets, with $20\%$ allocated for testing. The remaining data was further divided into training $(75\%)$ and validation $(25\%)$ sets to ensure reproducibility and optimize model tuning.
|
| 53 |
+
|
| 54 |
+
For classification, the dataset labels were structured in two distinct ways. In the multi-class scenario, the original labels in the Kaggle dataset were directly used, consisting of six categories: Normal, Depression, Suicidal, Anxiety, Stress, and Personality Disorder. For binary classification, all non-Normal categories were grouped under a single 'Abnormal' label.
|
| 55 |
+
|
| 56 |
+
In natural language processing, feature extraction depends on the model type. Traditional ML models require structured numerical representations, while DL models can process raw text sequences or dense vector embeddings.
|
| 57 |
+
|
| 58 |
+
For ML models, text is commonly converted into numerical features using techniques such as the bag-of-words (BoW) model (Harris, 1954), which represents documents as token count vectors but treats all words equally. To address this limitation, Term Frequency-Inverse Document Frequency (TF-IDF) (Jones, 1972) enhances BoW by weighting words based on their importance—emphasizing informative terms while downplaying common ones. In this study, we employed TF-IDF vectorization to extract numerical features, incorporating unigrams and bigrams and limiting the feature space to 1,000 features to optimize computational efficiency and mitigate overfitting.
|
| 59 |
+
|
| 60 |
+
# Model Development
|
| 61 |
+
|
| 62 |
+
A variety of machine learning and deep learning models were developed to analyze and classify mental health statuses based on textual input. Each model was selected to capture different aspects of the data, ranging from simple linear classifiers to complex non-linear relationships. The following subsections outline the methodology of each model and its performance in both binary and multi-class classification.
|
| 63 |
+
|
| 64 |
+
# Logistic Regression
|
| 65 |
+
|
| 66 |
+
Logistic regression is a fundamental classification technique widely used in social science and biomedical research (Hosmer & Lemeshow, 2000). It models the probability of a categorical outcome based on a weighted linear combination of input features. Despite its simplicity, logistic regression is still effective when applied to high-dimensional data, such as term frequency-based representations in natural language processing.
|
| 67 |
+
|
| 68 |
+
In this study, logistic regression served as an interpretable model that integrated various predictors (e.g., term frequencies) to estimate the probability of different mental health outcomes. The binary model predicts the likelihood of a positive case, while the multi-class extension accommodates multiple categories.
|
| 69 |
+
|
| 70 |
+
To prevent overfitting, model parameters were optimized using cross-entropy loss with regularization. A grid search was employed to fine-tune hyperparameters, including regularization strength, solver selection, and class weights, with the weighted F1 score guiding the selection process. The logistic regression models were implemented using the LogisticRegression class from scikit-learn.
|
| 71 |
+
|
| 72 |
+
# Support Vector Machine (SVM)
|
| 73 |
+
|
| 74 |
+
Support Vector Machines (SVMs) are effective classifiers that identify an optimal decision boundary (hyperplane) to maximize the margin between classes (Cortes & Vapnik, 1995). Unlike probabilistic models such as logistic regression, SVMs utilize kernel functions to map input data into higher-dimensional spaces, allowing them to model both linear and non-linear relationships. Given the high-dimensional and sparse nature of text-based feature representations, both linear SVMs and non-linear SVMs with a radial basis function (RBF) kernel were evaluated, with model selection based on the weighted F1 score. Hyperparameter optimization was conducted via grid search, including regularization strength, class weighting, and $\gamma$ for RBF kernels<sup>1</sup>.
|
| 75 |
+
|
| 76 |
+
The final models were implemented using the SVC class from scikit-learn. For multi-class classification, the One-vs-One (OvO) strategy was employed, the default ap
|
| 77 |
+
|
| 78 |
+
proach in SVC, which constructs pairwise binary classifiers for each class combination, with the final label determined through majority voting.
|
| 79 |
+
|
| 80 |
+
# Tree-Based Models
|
| 81 |
+
|
| 82 |
+
Classification and Regression Trees (CART) are widely used for categorical outcome prediction in classification tasks. The algorithm constructs a binary decision tree by recursively partitioning the dataset based on predictor variables, selecting splits that optimize a predefined criterion. Common impurity measures, such as Gini impurity and entropy, assess split quality, with lower values indicating greater homogeneity within a node (Bishop, 2006). The tree expands iteratively until stopping conditions, such as a minimum node size, maximum depth, or impurity reduction threshold, are met.
|
| 83 |
+
|
| 84 |
+
To prevent overfitting, pruning techniques (Breiman et al., 1984) reduce tree complexity by removing splits with minimal predictive value, enhancing generalizability. However, standalone CART models often overfit, making them less suitable for complex classification tasks. Instead, this study employed ensemble methods, such as Random Forests and Gradient Boosted Trees, to improve robustness and predictive performance.
|
| 85 |
+
|
| 86 |
+
Random Forests. Random Forests aggregate multiple decision trees to enhance classification performance. Each tree is trained on a bootstrap sample, ensuring diversity, while a random subset of features is considered at each split to reduce correlation and improve generalization (Breiman, 2001; Zhang et al., 2025). Unlike individual trees, Random Forests do not require pruning, with complexity managed through hyperparameters such as the number of trees, tree depth, and minimum sample requirements.
|
| 87 |
+
|
| 88 |
+
Hyperparameter tuning via grid search optimized the number of estimators, tree depth, and minimum split criteria, using the weighted F1 score as the primary evaluation metric to address class imbalance. The best-performing binary classification model effectively distinguished between Normal and Abnormal mental health statuses. For multi-class classification, the same hyperparameter grid was used with a refined search scope for efficiency, ensuring balanced classification performance across mental health categories.
|
| 89 |
+
|
| 90 |
+
Beyond predictive accuracy, feature importance analysis provided insights into key variables influencing classification decisions, enhancing model interpretability. Random
|
| 91 |
+
|
| 92 |
+
Forest models were implemented using RandomForestClassifier from scikit-learn, with hyperparameter tuning via grid search on the validation set.
|
| 93 |
+
|
| 94 |
+
Light Gradient Boosting Machine (LightGBM). LightGBM is an optimized gradient-boosting framework designed for efficiency and scalability, particularly in high-dimensional datasets. Unlike traditional Gradient Boosting Machines (GBMs), which sequentially refine predictions by correcting errors from prior models, LightGBM employs a leaf-wise tree growth strategy, enabling deeper splits in dense regions for improved performance (Ke et al., 2017). Additionally, histogram-based feature binning reduces memory usage and accelerates training, making LightGBM faster and more resource-efficient than standard GBMs (Friedman, 2001).
|
| 95 |
+
|
| 96 |
+
Grid search was used to optimize hyperparameters, including the number of boosting iterations, learning rate, tree depth, number of leaves, and minimum child samples. To address class imbalance, the class weighting parameter was tested with both 'balanced' and 'None' options. Model selection was guided by the weighted F1 score, ensuring balanced classification performance.
|
| 97 |
+
|
| 98 |
+
For binary classification, LightGBM effectively distinguished between Normal and Abnormal statuses. For multi-class classification, it predicted categories including Normal, Depression, Anxiety, and Personality Disorder. Evaluation metrics included precision, recall, F1 scores, confusion matrices, and one-vs-rest ROC curves. LightGBM's built-in feature importance analysis further enhanced interpretability by identifying key predictors. The models were implemented using LGBMClassifier from the lightgbm library, with hyperparameter tuning via grid search on the validation set.
|
| 99 |
+
|
| 100 |
+
# A Lite Version of Bidirectional Encoder Representations from Transformers (ALBERT)
|
| 101 |
+
|
| 102 |
+
ALBERT (Lan et al., 2020) is an optimized variant of BERT (Devlin et al., 2019) designed to enhance computational efficiency while preserving strong NLP performance. It achieves this by employing parameter sharing across layers and factorized embedding parameterization, significantly reducing the total number of model parameters. Additionally, ALBERT introduces Sentence Order Prediction (SOP) as an auxiliary pretraining
|
| 103 |
+
|
| 104 |
+
task to improve sentence-level coherence. These architectural refinements make ALBERT a computationally efficient alternative to BERT, particularly well-suited for large-scale text classification applications such as mental health assessment.
|
| 105 |
+
|
| 106 |
+
In this study, ALBERT was fine-tuned for both binary and multi-class classification. The binary model was trained to differentiate between Normal and Abnormal mental health statuses, while the multi-class model classified inputs into categories such as Normal, Depression, Anxiety, and Personality Disorder. The pretrained Albert-base-v2 model was utilized, and hyperparameter optimization was conducted using random search over 10 iterations, tuning learning rates, dropout rates, and training epochs. Model performance was evaluated using the weighted F1 score as the primary metric. For the multi-class task, the classification objective was adjusted to predict seven categories, with weighted cross-entropy loss applied to address class imbalances.
|
| 107 |
+
|
| 108 |
+
ALBERT's architecture effectively captures long-range dependencies in text while offering substantial computational advantages. Performance optimization was conducted using random hyperparameter tuning within the Hugging Face Transformers framework, leveraging AlbertTokenizer and AlbertForSequenceClassification for implementation.
|
| 109 |
+
|
| 110 |
+
# Gated Recurrent Units (GRUs)
|
| 111 |
+
|
| 112 |
+
Gated Recurrent Units (GRUs) are a variant of recurrent neural networks (RNNs) designed to model sequential dependencies, making them well-suited for natural language processing tasks such as text classification (Cho et al., 2014). Compared to Long Short-Term Memory networks (LSTMs), GRUs provide greater computational efficiency by simplifying the gating mechanism. Specifically, they merge the forget and input gates into a single update gate, reducing the number of parameters while effectively capturing long-range dependencies.
|
| 113 |
+
|
| 114 |
+
In this study, GRUs were employed for both binary and multi-class mental health classification. The binary model differentiated between Normal and Abnormal mental health statuses, while the multi-class model predicted categories such as Normal, Depression, Anxiety, and Personality Disorder.
|
| 115 |
+
|
| 116 |
+
The GRU architecture consisted of three primary components:
|
| 117 |
+
|
| 118 |
+
- **Embedding Layer:** Maps token indices to dense vector representations of a fixed size.
|
| 119 |
+
- GRU Layer: Processes sequential inputs, preserving contextual dependencies, with the final hidden state serving as the input to the classifier.
|
| 120 |
+
- Fully Connected Layer: Transforms the hidden state into output logits corresponding to the classification categories.
|
| 121 |
+
|
| 122 |
+
To mitigate overfitting, dropout regularization was applied, and weighted cross-entropy loss was used to address class imbalance.
|
| 123 |
+
|
| 124 |
+
Hyperparameter tuning was conducted via random search, optimizing key parameters such as embedding dimensions, hidden dimensions, learning rates, and training epochs. The weighted F1 score was used for model selection, ensuring robust performance on both validation and test data.
|
| 125 |
+
|
| 126 |
+
Overall, GRUs effectively captured sequential patterns in text, enabling the extraction of linguistic features relevant to mental health classification. While less interpretable than tree-based models, their efficiency and ability to model long-range dependencies make them well-suited for text classification. The models were implemented using PyTorch's torch(nn module, incorporating nn.Embedding, nn.GRU, and nn.Linear layers. Optimization was performed using torch.optim.Adam, with class imbalances handled through nn.CrossEntropyLoss.
|
| 127 |
+
|
| 128 |
+
# Evaluation Metrics
|
| 129 |
+
|
| 130 |
+
Classifying mental health conditions, such as depression or suicidal ideation, often involves imbalanced class distributions, where the 'positive' class (e.g., individuals experiencing a mental health condition) is significantly underrepresented compared to the 'negative' class (e.g., no reported issues). In such cases, traditional metrics like accuracy can be misleading, as a model predicting only the majority class may still achieve high accuracy despite failing to detect minority-class cases. To provide a more comprehensive assessment of classification performance, the following evaluation metrics were used:
|
| 131 |
+
|
| 132 |
+
- Recall (Sensitivity): Captures the proportion of actual positive cases correctly identified. High recall is crucial in mental health detection to minimize false negatives and ensure individuals in need receive appropriate intervention (Bradford et al., 2024). However, excessive focus on recall may increase false positives, leading to potential misclassifications.
|
| 133 |
+
- Precision: Measures the proportion of predicted positive cases that are actually positive. High precision is critical in mental health classification, as false positives can lead to unnecessary concern, stigma, and unwarranted interventions (Bradford et al., 2024; Wei et al., 2023). However, optimizing for precision alone may cause the model to miss true positive cases, limiting its usefulness.
|
| 134 |
+
- F1 Score: Represents the harmonic mean of precision and recall, offering a balanced performance measure (Powers, 2011). This metric is particularly useful for imbalanced datasets, ensuring that neither precision nor recall is disproportionately optimized at the expense of the other.
|
| 135 |
+
- Area Under the Receiver Operating Characteristic Curve (AUROC): Assesses the model's ability to distinguish between positive and negative cases across various classification thresholds. Although AUROC provides an overall measure of discrimination performance, it may be less informative in severely imbalanced datasets, where the majority class dominates (Davis & Goadrich, 2006; Tao et al., 2024).
|
| 136 |
+
|
| 137 |
+
# Results
|
| 138 |
+
|
| 139 |
+
This section presents the findings from the analysis of the dataset and the evaluation of machine learning and deep learning models for mental health classification. First, we provide an Overview of Mental Health Distribution, highlighting the inherent class imbalances within the dataset and their implications for model development. Next, the Hyperparameter Optimization subsection details the parameter tuning process, which ensures that each model performs at its best configuration for both binary and multiclass classification tasks. Finally, the Model Performance Evaluation subsection compares
|
| 140 |
+
|
| 141 |
+
the models' performance based on key metrics, including F1 scores and Area Under the Receiver Operating Characteristic Curve (AUROC). Additionally, nuanced observations, such as the challenges associated with underrepresented classes, are discussed to provide deeper insights into the modeling outcomes.
|
| 142 |
+
|
| 143 |
+
# Distribution of Mental Health Status
|
| 144 |
+
|
| 145 |
+
The dataset contains a total of 52,681 unique textual statements, each annotated with a corresponding mental health status label. The labels represent various mental health categories, reflecting the distribution of conditions within the dataset.
|
| 146 |
+
|
| 147 |
+
The dataset is heavily imbalanced, with certain categories having significantly higher representation than others. Specifically:
|
| 148 |
+
|
| 149 |
+
Normal: 16,343 statements (31.02%)
|
| 150 |
+
- Depression: 15,404 statements (29.24%)
|
| 151 |
+
Suicidal: 10,652 statements (20.22%)
|
| 152 |
+
- Anxiety: 3,841 statements (7.29%)
|
| 153 |
+
Bipolar: 2,777 statements (5.27%)
|
| 154 |
+
- Stress: 2,587 statements (4.91%)
|
| 155 |
+
- Personality Disorder: 1,077 statements (2.04%)
|
| 156 |
+
|
| 157 |
+
For the binary classification task, all mental health conditions (Depression, Suicidal, Anxiety, Bipolar, Stress, and Personality Disorder) were combined into a single category labeled as Abnormal, while the Normal category remained unchanged. This transformation resulted in:
|
| 158 |
+
|
| 159 |
+
Normal: 16,343 statements (31.02%)
|
| 160 |
+
Abnormal: 36,338 statements (68.98%)
|
| 161 |
+
|
| 162 |
+
Such imbalance feature in both multi-class and binary classification tasks highlights the importance of evaluation metrics that account for disparities, such as the weighted F1 score.
|
| 163 |
+
|
| 164 |
+
# Computational Efficiency
|
| 165 |
+
|
| 166 |
+
The computational time for training the models varied significantly based on the algorithm type and classification task. Among ML models, SVM required an exceptionally long training time, far exceeding other ML approaches like Logistic Regression, Random Forest, and Light GBM, for both binary and multi-class tasks. In contrast, DL models such as ALBERT and GRU consistently required more time compared to ML models, reflecting their higher computational complexity.
|
| 167 |
+
|
| 168 |
+
For ML models, training times for multi-class classification were longer than for binary classification, likely due to the increased complexity of predicting multiple categories. However, for DL models, there was no notable difference in training times between binary and multi-class tasks, indicating that their computational cost was primarily driven by model architecture rather than the number of classes.
|
| 169 |
+
|
| 170 |
+
A detailed information of training times is presented in Table 1.
|
| 171 |
+
|
| 172 |
+
# Insert Table 1 about here
|
| 173 |
+
|
| 174 |
+
# Performance Metrics
|
| 175 |
+
|
| 176 |
+
Table 2 presents the weighted F1 scores and AUROC values for all models evaluated on binary classification tasks. Across all models, there were minimal numerical differences in performance, with all achieving strong results in both metrics. The F1 scores ranged from 0.9345 (Logistic Regression) to 0.9576 (ALBERT), while AUROC values were consistently high, spanning from 0.92 (Random Forest) to 0.95 (ALBERT). These results indicate that all models effectively distinguished between Normal and Abnormal mental health statuses.
|
| 177 |
+
|
| 178 |
+
Despite the close performance across models, a general trend emerged where deep learning (DL) models, such as ALBERT and GRU, outperformed traditional machine learning (ML) models. For instance, ALBERT achieved the highest F1 score (0.9576) and AUROC (0.95), while GRU closely followed with an F1 score of 0.9512 and an
|
| 179 |
+
|
| 180 |
+
AUROC of 0.94. In contrast, ML models such as Logistic Regression, Random Forest, and LightGBM showed slightly lower, albeit still competitive, performance.
|
| 181 |
+
|
| 182 |
+
Table 3 summarizes the weighted F1 scores and micro-average AUROC values for multi-class classification tasks. Similar to binary classification, the differences in performance across models were small, with DL models generally outperforming ML models. ALBERT achieved the highest F1 score (0.7841) and shared the top AUROC value (0.97) with LightGBM and GRU. ML models such as Logistic Regression and Random Forest exhibited slightly lower F1 scores, at 0.7498 and 0.7478, respectively, but still demonstrated strong AUROC values (0.96).
|
| 183 |
+
|
| 184 |
+
# Insert Table 2 about here
|
| 185 |
+
|
| 186 |
+
Notably, a consistent pattern was observed where multi-class classification yielded lower F1 scores compared to binary classification across all models. The lower F1 scores for multi-class classification compared to binary classification reflect the increased complexity of predicting seven distinct mental health categories. Binary classification requires only a single decision boundary between Normal and all other classes (combined into Abnormal), whereas multi-class classification must learn multiple boundaries between overlapping categories like Depression, Anxiety, and Stress. This added complexity introduces more opportunities for misclassification, further lowering F1 scores. On the contrary, the AUROC values remained consistently high for both binary and multi-class tasks, indicating robust discrimination between classes despite the added complexity.
|
| 187 |
+
|
| 188 |
+
# Insert Table 3 about here
|
| 189 |
+
|
| 190 |
+
The discrepancy between the F1 score and AUROC observed in the multi-class classification results can be attributed to the fundamental differences in what these metrics measure. The F1 score, which balances precision and recall, is sensitive to class imbalance and specific misclassifications. In the confusion matrix (Figure 1), generated
|
| 191 |
+
|
| 192 |
+
for the LightGBM multi-class model and included here for illustration purposes, certain classes such as Suicidal (Class 6) and Depression (Class 2) show notable misclassifications, including frequent overlaps with Stress (Class 5) and Normal (Class 3). This directly impacts the F1 score by lowering the precision and recall for these specific classes.
|
| 193 |
+
|
| 194 |
+
In contrast, AUROC measures the model's ability to rank predictions correctly across thresholds, and it remains robust to class imbalances and individual misclassification errors. The ROC curves (Figure 2), also from the LightGBM multi-class model and included for illustrative purposes, demonstrate strong separability for most classes, with areas under the curve (AUC) exceeding 0.90 for all but Class 2 (Depression) and Class 6 (Suicidal). The micro-average AUROC of 0.97 indicates that the model can effectively rank instances across all classes, even when specific misclassifications reduce the F1 score.
|
| 195 |
+
|
| 196 |
+
# Error Analyses
|
| 197 |
+
|
| 198 |
+
The confusion matrix reveals specific patterns of misclassification that contribute to the lower F1 scores for some classes in the multi-class classification task. Key observations include:
|
| 199 |
+
|
| 200 |
+
- Overlap Between Emotionally Similar Classes: As indicated in Figure1, Depression (Class 2) and Personality Disorder (Class 6) show significant overlap, with many instances of Depression misclassified as Personality Disorder or vice versa. Similarly, Suicidal (Class 3) was frequently misclassified as Depression, likely due to overlapping linguistic patterns. Another possible explanation lies in the nature of the dataset itself, which was constructed by combining data from multiple sources. While these labels may have been well-defined and effective for their original studies, they may lack consistency when integrated into a unified dataset, leading to ambiguity in class boundaries.
|
| 201 |
+
|
| 202 |
+
- Poor Discrimination for Depression: The ROC curve (Figure 2) highlights that Depression (Class 2) has the lowest AUC (0.90) among all classes in the LightGBM model. For other models, the AUC for Class 2 drops even further, indicating consistent difficulty in distinguishing Depression from other classes. This is likely due to semantic overlap with related classes such as Stress (Class 4), Suicidal (Class
|
| 203 |
+
|
| 204 |
+
3), and Personality Disorder (Class 6). Additionally, inconsistencies in labeling across data sources may further exacerbate the challenge of identifying Depression accurately.
|
| 205 |
+
|
| 206 |
+
- Underrepresented Classes and Data Imbalance: Bipolar (Class 5) and Personality Disorder (Class 6) were underrepresented in the dataset, which exacerbated misclassification issues.
|
| 207 |
+
|
| 208 |
+
# Model Interpretability
|
| 209 |
+
|
| 210 |
+
In traditional machine learning (ML) models, variable importance can be quantified to understand how individual features contribute to predictions. This interpretability allows researchers to identify key linguistic and behavioral markers associated with mental health conditions. However, deep learning (DL) models operate differently. Rather than relying on explicit features, DL models extract representations from raw text, making them inherently black-box models. Since these models learn hierarchical patterns across entire sentences and contexts, they do not produce traditional variable importance scores, making direct interpretability more challenging. In this project, we assessed variable importance for three out of four machine learning models: logistic regression, random forest, and LightGBM. Support Vector Machine (SVM) was excluded from this analysis because Radial Basis Function (RBF) kernel was selected during model construction, which is a nonlinear kernel. In such cases, variable importance is not directly interpretable due to the transformation of the input space, making it difficult to quantify individual feature contributions meaningfully (Guyon et al., 2002). Unlike linear models, where coefficients provide a direct measure of feature importance, nonlinear SVMs construct decision boundaries in high-dimensional spaces, where the contribution of each feature depends on complex interactions (Chang & Lin, 2011).
|
| 211 |
+
|
| 212 |
+
For logistic regression, variable importance is derived from model coefficients, where positive coefficients indicate a higher likelihood of the outcome (e.g., mental health condition), while negative coefficients suggest a protective effect. To enhance interpretability, we adopted a color scheme in our visualizations: dark gray for positive coefficients and light gray for negative coefficients. For Random Forest, variable importance is com-
|
| 213 |
+
|
| 214 |
+
puted using the Gini impurity reduction criterion (Breiman, 2001). This metric quantifies how much each feature contributes to reducing class impurity across the decision trees by assessing the decrease in Gini impurity at each node split. Features with higher importance scores have a greater impact on classification performance. For LightGBM, variable importance is measured using information gain, which quantifies the total improvement in the model's objective function when a feature is used for node splitting across all trees in the boosting process. Information gain reflects how much a feature contributes to minimizing the loss function during training and is commonly used in gradient boosting frameworks (Ke et al., 2017). Features with higher gain values contribute more to optimizing the model's predictive accuracy.
|
| 215 |
+
|
| 216 |
+
The variable importance for both binary and multiclass models using logistic regression, random forest, and LightGBM is presented in Figure 3. To ensure comparability across models, we rescaled the variable importance scores for random forest and LightGBM by normalizing them to a maximum value of 100. For logistic regression, variable importance is represented by model coefficients, retaining both their relative scale and sign. Among machine learning models that provide feature importance, logistic regression offers a more interpretable framework since its importance scores are derived directly from model coefficients. Unlike tree-based methods, which rely on splitting criteria (such as Gini impurity for Random Forest or Gain for LightGBM), logistic regression coefficients retain their sign, allowing researchers to distinguish positive and negative associations with the target outcome. This property is particularly valuable in mental health detection, where it is critical to understand whether a term increases or decreases the likelihood of classification (e.g., identifying depressive symptoms).
|
| 217 |
+
|
| 218 |
+
# Insert Figure 3 about here
|
| 219 |
+
|
| 220 |
+
Despite variations in ranking, the top features identified across machine learning models share strong overlap, reinforcing their relevance in mental health classification. However, different importance criteria lead to model-specific variations: logistic regress-
|
| 221 |
+
|
| 222 |
+
sion ranks features based on coefficient magnitude (allowing both positive and negative values), random forest uses Gini impurity reduction, and LightGBM employs gain-based ranking. While these models prioritize features differently, they consistently highlight depression-related language as the strongest predictor of mental health conditions on social media.
|
| 223 |
+
|
| 224 |
+
For binary classification models, 'depression' emerges as the most predictive feature across all methods, reinforcing its centrality in identifying mental health status. Beyond this, words associated with emotional distress—such as 'feel,' 'want,' and 'anxiety'—consistently appear in the top ranks, though their order varies. Logistic regression assigns strong positive coefficients to 'restless' and 'suicidal,' suggesting their direct correlation with depressive states. Meanwhile, tree-based models (random forest and Light-GBM) highlight terms like 'die,' 'kill,' and 'suicide' more prominently, likely due to their effectiveness in decision splits. These differences reflect how each model processes textual features, with logistic regression providing interpretability through sign-based coefficients, while tree-based models prioritize decision-making power through split-based feature selection.
|
| 225 |
+
|
| 226 |
+
In the multiclass setting, feature importance rankings shift to reflect the distinctions between different mental health conditions. While 'depression' remains a dominant predictor, terms like 'bipolar' and 'anxiety' gain prominence, particularly in tree-based models (random forest and LightGBM), suggesting their utility in distinguishing among multiple mental health states. Logistic regression, on the other hand, highlights 'restless' and 'nervous' more strongly, aligning with its emphasis on anxiety-related symptoms. The presence of 'kill' and 'suicidal' in tree-based models underscores their role in severe mental health classifications. Despite these ranking differences, the core predictive features remain largely consistent, validating their role in mental health detection on social media.
|
| 227 |
+
|
| 228 |
+
Among models capable of generating variable importance, logistic regression stands out for its interpretability. Unlike tree-based methods, which assign importance based on split-based metrics, logistic regression allows for direct interpretation of feature coeffi-
|
| 229 |
+
|
| 230 |
+
cients, capturing both positive and negative associations. This provides a clearer understanding of which terms contribute most strongly to classification and in what direction. In contrast, while random forest and LightGBM effectively rank important features, their criteria for feature selection make direct interpretability more challenging.
|
| 231 |
+
|
| 232 |
+
# Discussion
|
| 233 |
+
|
| 234 |
+
This study provides an empirical evaluation of machine learning (ML) and deep learning (DL) models for mental health classification on social media, focusing on their predictability, interpretability, and computational efficiency. The findings highlight key trade-offs that researchers should consider when selecting models for mental health detection tasks. While DL models, such as ALBERT and GRU, have gained popularity for their ability to extract hierarchical representations from raw text, their advantages in small-to-medium datasets remain limited. The results indicate that in cases where data-set size is moderate, traditional ML models, such as logistic regression, random forests, and LightGBM, perform comparably to DL models while offering additional benefits in terms of interpretability and computational efficiency.
|
| 235 |
+
|
| 236 |
+
The size of the dataset plays a crucial role in determining the most suitable modeling approach. When working with small to medium-sized datasets, traditional ML models remain an effective choice. Their reliance on structured feature engineering, while requiring additional preprocessing efforts, allows for a more controlled and interpretable learning process. In contrast, DL models require large-scale training data to leverage their full potential. Although DL architectures can automatically extract complex linguistic patterns without extensive feature engineering, this advantage is less pronounced in settings with limited training samples. For researchers with small datasets, the use of feature engineering and careful selection of input variables is critical to optimizing model performance. The results suggest that DL models are more suitable for large-scale mental health detection tasks, where the volume of data is sufficient to justify their increased computational demands.
|
| 237 |
+
|
| 238 |
+
In addition to dataset size, computational efficiency remains a practical consideration in model selection. The ML models evaluated in this study consistently required
|
| 239 |
+
|
| 240 |
+
less computational time compared to DL models, making them preferable in scenarios where efficiency is a priority. While DL models demonstrated competitive classification performance, their significantly longer training times present a challenge, particularly for researchers working with constrained computing resources. Given that many mental health detection applications require scalable solutions, this finding suggests that ML models provide a more efficient and accessible alternative for researchers seeking to deploy classification models without extensive computational infrastructure.
|
| 241 |
+
|
| 242 |
+
Interpretability is another critical factor in model selection, particularly for applications in mental health research where understanding the relationships between linguistic patterns and psychological states is essential. Among the ML models, logistic regression offers the clearest interpretability, as it provides direct coefficient estimates that indicate the relative influence of each feature. This advantage is particularly important in mental health classification, where identifying protective and risk-related linguistic markers can provide valuable insights into early detection and intervention strategies. Unlike logistic regression, tree-based models such as random forests and LightGBM do not distinguish between positive and negative associations but instead rank features based on their contribution to classification accuracy. This limitation reduces their interpretability but still allows for the identification of key predictive features. In contrast, DL models operate as black-box systems with no explicit feature importance scores, making them less suitable for researchers prioritizing explainability. Given these differences, logistic regression emerges as the preferred choice when interpretability is a key concern, while tree-based models provide flexibility for high-dimensional data without imposing strong linearity assumptions.
|
| 243 |
+
|
| 244 |
+
Despite the strengths of ML models in terms of efficiency and interpretability, it is important to acknowledge the assumptions underlying these approaches. Logistic regression, for example, assumes a linear relationship between the features and the log-odds of the target variable, an assumption that was not explicitly tested in this study. Future research should explore whether nonlinear transformations or interaction terms could improve model fit while maintaining interpretability. Similarly, while tree-based models
|
| 245 |
+
|
| 246 |
+
do not impose strict assumptions about feature relationships, they rely on hierarchical partitioning mechanisms that may introduce biases in highly unbalanced datasets. These limitations highlight the importance of methodological rigor when selecting ML models for mental health research.
|
| 247 |
+
|
| 248 |
+
In addition to model selection, dataset composition and label consistency present challenges in mental health classification. The dataset used in this study was compiled from multiple publicly available sources, which, while beneficial for enhancing linguistic diversity, also introduced inconsistencies in class labels. Since each dataset was originally created for different research purposes, class boundaries may not be clearly defined when combined. This issue likely contributed to increased misclassification rates in the multi-class setting, particularly in categories with overlapping linguistic features such as depression, stress, and suicidal ideation. The presence of ambiguous class definitions suggests that future studies should consider collecting data directly from social media platforms using standardized labeling criteria. By ensuring greater consistency in data annotation, researchers can improve model generalizability and reduce classification errors.
|
| 249 |
+
|
| 250 |
+
Another limitation relates to annotation quality. Given the subjective nature of mental health expressions, the reliability of pre-existing labels in publicly available data-sets can be uncertain. Manual verification of labels by domain experts could improve classification accuracy, but such an approach is time-consuming and resource-intensive. As an alternative, future work could explore Artificial Intelligence-assisted annotation strategies to enhance labeling consistency. Advances in natural language processing, particularly in large language models, offer opportunities for developing semi-automated annotation systems that incorporate human-in-the-loop validation. By combining automated text classification with expert oversight, researchers could create more comprehensive and reliable datasets for mental health detection.
|
| 251 |
+
|
| 252 |
+
The ethical implications of using social media data for mental health research also warrant careful consideration. While these datasets provide valuable insights into psychological well-being, they often include sensitive information that must be handled with
|
| 253 |
+
|
| 254 |
+
caution. Privacy-preserving techniques, such as anonymization and differential privacy, should be explored to protect user identities while maintaining the linguistic features necessary for classification. Future research should also establish clearer guidelines for ethical data collection, ensuring that social media-derived datasets align with best practices in mental health ethics.
|
| 255 |
+
|
| 256 |
+
In summary, this study provides a comparative analysis of ML and DL models for mental health classification on social media, highlighting key considerations in accuracy, interpretability, and computational efficiency. The findings suggest that ML models remain a practical and interpretable choice for small to medium-sized datasets, while DL models may offer advantages when working with larger data volumes. Among ML models, logistic regression is particularly useful for its ability to distinguish between positive and negative feature importance, offering valuable insights into linguistic markers associated with mental health conditions. However, researchers should remain mindful of model assumptions and dataset inconsistencies, which can impact classification performance. Moving forward, efforts to improve data collection, annotation quality, and ethical considerations will be essential for advancing AI-driven mental health detection and ensuring that these models contribute to more effective, transparent, and responsible research practices.
|
| 257 |
+
|
| 258 |
+
# References
|
| 259 |
+
|
| 260 |
+
Bird, S., Klein, E., & Loper, E. (2009). Natural language processing with python. O'Reilly Media Inc.
|
| 261 |
+
Bishop, C. M. (2006). Pattern recognition and machine learning. Springer.
|
| 262 |
+
Bradford, A., Meyer, A. N. D., Khan, S., Giardina, T. D., & Singh, H. (2024). Diagnostic error in mental health: A review. BMJ Quality & Safety, 33(10), 663-672. https://qualitysafety.bmj.com/content/33/10/663
|
| 263 |
+
Breiman, L. (2001). *Random forests* (Vol. 45). Springer.
|
| 264 |
+
Breiman, L., Friedman, J. H., Olshen, R. A., & Stone, C. J. (1984). Classification and regression trees. Wadsworth & Brooks/Cole Advanced Books & Software, Monterey, CA.
|
| 265 |
+
Calvo, R. A., Milne, D. N., Hussain, M. S., & Christensen, H. (2017). Natural language processing in mental health applications using non-clinical texts. *Natural Language Engineering*, 23(5), 649–685. https://doi.org/10.1017/S1351324916000383
|
| 266 |
+
Cao, Y., Dai, J., Wang, Z., Zhang, Y., Shen, X., Liu, Y., & Tian, Y. (2024). Systematic review: Text processing algorithms in machine learning and deep learning for mental health detection on social media. https://arxiv.org/abs/2410.16204
|
| 267 |
+
Chang, C.-C., & Lin, C.-J. (2011). Libsvm: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology, 2(3), 1-27.
|
| 268 |
+
Chen, Y., Zhao, C., Xu, Y., & Nie, C. (2025). Year-over-year developments in financial fraud detection via deep learning: A systematic literature review. https://arxiv.org/abs/2502.00201
|
| 269 |
+
Cho, K., van Merrienboer, B., Gulcehre, C., Bahdanau, D., Bougares, F., Schwenk, H., & Bengio, Y. (2014). Learning phrase representations using rnn encoder-decoder for statistical machine translation. Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), 1724-1734.
|
| 270 |
+
Cortes, C., & Vapnik, V. N. (1995). Support-vector networks (Vol. 20). Springer.
|
| 271 |
+
|
| 272 |
+
Davis, J., & Goadrich, M. (2006). The relationship between precision-recall and roc curves. Proceedings of the 23rd International Conference on Machine Learning, 233-240.
|
| 273 |
+
De Choudhury, M., Counts, S., & Horvitz, E. (2013). Social media as a measurement of depression in populations. Proceedings of the ACM Annual Web Science Conference, 47-56. https://doi.org/10.1145/2464464.2464480
|
| 274 |
+
Devlin, J., Chang, M.-W., Lee, K., & Toutanova, K. (2019). Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.
|
| 275 |
+
Friedman, J. H. (2001). Greedy function approximation: A gradient boosting machine. Annals of Statistics, 29(5), 1189-1232.
|
| 276 |
+
Guntuku, S. C., Yaden, D. B., Kern, M. L., Ungar, L. H., & Eichstaedt, J. C. (2017). Detecting depression and mental illness on social media: An integrative review. Current Opinion in Psychology, 18, 43-49. https://doi.org/10.1016/j.copsyc.2017.07.005
|
| 277 |
+
Guyon, I., Weston, J., Barnhill, S., & Vapnik, V. (2002). Gene selection for cancer classification using support vector machines. Machine Learning, 46, 389-422.
|
| 278 |
+
Hargittai, E. (2015). Is bigger always better? potential biases of big data derived from social network sites. The Annals of the American Academy of Political and Social Science, 659, 63-76. http://www.jstor.org/stable/24541849
|
| 279 |
+
Harris, Z. S. (1954). Distributional structure. Word, 10(2-3), 146-162. https://doi.org/10.1080/00437956.1954.11659520
|
| 280 |
+
Helmy, A., Nassar, R., & Ramadan, N. (2024). Depression detection for twitter users using sentiment analysis in english and arabic tweets. Artificial Intelligence in Medicine, 147, 102716. https://doi.org/10.1016/j.artmed.2023.102716
|
| 281 |
+
Hosmer, D. W., & Lemeshow, S. (2000). Applied logistic regression (Second Edition). John Wiley & Sons, Inc. https://doi.org/10.1002/0471722146
|
| 282 |
+
Jones, K. S. (1972). A statistical interpretation of term specificity and its application in retrieval. Journal of Documentation, 28(1), 11-21. https://doi.org/10.1108/eb026526
|
| 283 |
+
|
| 284 |
+
Ke, G., Meng, Q., Finley, T., Wang, T., Chen, W., Ma, W., Ye, Q., & Liu, T.-Y. (2017). Lightgbm: A highly efficient gradient boosting decision tree. Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS), 3149-3157.
|
| 285 |
+
Kessler, R. C., Aguilar-Gaxiola, S., Alonso, J., Benjet, C., Bromet, E. J., Cardoso, G., Degenhardt, L., de Girolamo, G., Dinolova, R. V., Ferry, F., Florescu, S., Gureje, O., Haro, J. M., Huang, Y., Karam, E. G., Kawakami, N., Lee, S., Lepine, J. P., Levinson, D., ... Koenen, K. C. (2017). Trauma and ptsd in the who world mental health surveys. European Journal of Psychotraumatology, 8(sup5), 1353383. https://doi.org/10.1080/20008198.2017.1353383
|
| 286 |
+
Lan, Z., Chen, M., Goodman, S., Gimpel, K., Sharma, P., & Soricut, R. (2020). Albert: A lite bert for self-supervised learning of language representations. arXiv preprint arXiv:1909.11942.
|
| 287 |
+
Liu, Y., Shen, X., Zhang, Y., Wang, Z., Tian, Y., Dai, J., & Cao, Y. (2024). A systematic review of machine learning approaches for detecting deceptive activities on social media: Methods, challenges, and biases. arXiv, arXiv:2410.20293. https://doi.org/10.48550/arXiv.2410.20293
|
| 288 |
+
Powers, D. M. (2011). Evaluation: From precision, recall and f-measure to roc, informedness, markedness and correlation. Journal of Machine Learning Technologies, 2(1), 37-63.
|
| 289 |
+
Shatte, A. B. R., Hutchinson, D. M., & Teague, S. J. (2019). Machine learning in mental health: A scoping review of methods and applications. *Psychological Medicine*, 49(9), 1426-1448. https://doi.org/10.1017/S0033291719000151
|
| 290 |
+
Tao, Y., Wang, Z., Zhang, H., & Wang, L. (2024). Nevlp: Noise-robust framework for efficient vision-language pre-training. https://arxiv.org/abs/2409.09582
|
| 291 |
+
Wei, Y., Gao, M., Xiao, J., Liu, C., Tian, Y., & He, Y. (2023). Research and implementation of cancer gene data classification based on deep learning. Journal of Software Engineering and Applications, 16, 155-169. https://doi.org/10.4236/jsea.2023.166009
|
| 292 |
+
|
| 293 |
+
WHO. (2023). Mental disorders [Retrieved February 9, 2025]. https://www.who.int/news-room/fact-sheets/detail/mental-disorders
|
| 294 |
+
Zhang, Y., Wang, Z., Ding, Z., Tian, Y., Dai, J., Shen, X., Liu, Y., & Cao, Y. (2025). Tutorial on using machine learning and deep learning models for mental illness detection. arXiv. https://arxiv.org/abs/2502.04342
|
| 295 |
+
|
| 296 |
+
<table><tr><td>Type</td><td>Model</td><td>Binary (seconds)</td><td>Multiclass (seconds)</td></tr><tr><td rowspan="4">ML</td><td>SVM</td><td>4681.96</td><td>22844.23</td></tr><tr><td>Logistic Regression</td><td>7.33</td><td>181.86</td></tr><tr><td>Random Forest</td><td>263.54</td><td>2895.43</td></tr><tr><td>Light GBM</td><td>336.65</td><td>3968.33</td></tr><tr><td rowspan="2">DL</td><td>Albert</td><td>21244.18</td><td>20860.15</td></tr><tr><td>GRU</td><td>1530.76</td><td>1567.24</td></tr></table>
|
| 297 |
+
|
| 298 |
+
Table 1
|
| 299 |
+
|
| 300 |
+
Training times (in seconds) for model optimization in binary and multi-class classification tasks
|
| 301 |
+
|
| 302 |
+
<table><tr><td>Model</td><td>F1 Score</td><td>AUROC</td></tr><tr><td>Support Vector Machine (SVM)</td><td>0.9401</td><td>0.93</td></tr><tr><td>Logistic Regression</td><td>0.9345</td><td>0.93</td></tr><tr><td>Random Forest</td><td>0.9359</td><td>0.92</td></tr><tr><td>LightGBM</td><td>0.9358</td><td>0.93</td></tr><tr><td>ALBERT</td><td>0.9576</td><td>0.95</td></tr><tr><td>Gated Recurrent Units (GRU)</td><td>0.9512</td><td>0.94</td></tr></table>
|
| 303 |
+
|
| 304 |
+
Table 2
|
| 305 |
+
|
| 306 |
+
F1 Scores and AUROC for Binary Classification Tasks.
|
| 307 |
+
|
| 308 |
+
<table><tr><td>Model</td><td>F1 Score</td><td>Micro-Average AUROC</td></tr><tr><td>Support Vector Machine (SVM)</td><td>0.7610</td><td>0.95</td></tr><tr><td>Logistic Regression</td><td>0.7498</td><td>0.96</td></tr><tr><td>Random Forest</td><td>0.7478</td><td>0.96</td></tr><tr><td>LightGBM</td><td>0.7747</td><td>0.97</td></tr><tr><td>ALBERT</td><td>0.7841</td><td>0.97</td></tr><tr><td>Gated Recurrent Units (GRU)</td><td>0.7756</td><td>0.97</td></tr></table>
|
| 309 |
+
|
| 310 |
+
Table 3 F1 Scores and Micro-Average AUROC for Multi-Class Classification Tasks.
|
| 311 |
+
|
| 312 |
+

|
| 313 |
+
Figure 1 LightGBM Multi-Class Model Performance: Confusion Matrix.
|
| 314 |
+
|
| 315 |
+
The class labels are arranged as follows: Class 0: Anxiety, Class 1: Normal, Class 2: Depression, Class 3: Suicidal, Class 4: Stress, Class 5: Bipolar, and Class 6: Personality Disorder.
|
| 316 |
+
|
| 317 |
+

|
| 318 |
+
Figure 2 LightGBM Multi-Class Model Performance: Area Under the Receiver Operating Characteristic Curve.
|
| 319 |
+
|
| 320 |
+
The class labels are arranged as follows: Class 0: Anxiety, Class 1: Normal, Class 2: Depression, Class 3: Suicidal, Class 4: Stress, Class 5: Bipolar, and Class 6: Personality Disorder.
|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
(a) Logistic Regression (Binary)
|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
(b) Logistic Regression (Multiclass)
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
(c) Random Forest (Binary)
|
| 330 |
+
|
| 331 |
+

|
| 332 |
+
(d) Random Forest (Multiclass)
|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
(e) LightGBM (Binary)
|
| 336 |
+
|
| 337 |
+

|
| 338 |
+
(f) LightGBM (Multiclass)
|
| 339 |
+
Figure 3 Comparison of Feature Importance Across Different Models
|
data/2025/2503_01xxx/2503.01082/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9afda2b530fc38b3b5a04707470f7c9b6491f79c43fea102ab94969d5e76f85
|
| 3 |
+
size 292984
|
data/2025/2503_01xxx/2503.01082/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01103/f985c22c-01b0-4300-b25f-fa458f9f9eb1_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01103/f985c22c-01b0-4300-b25f-fa458f9f9eb1_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01103/f985c22c-01b0-4300-b25f-fa458f9f9eb1_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6cc10ed69459760f10487232a81cd3efe924e8805a6e1cd3e0e2375e39132fda
|
| 3 |
+
size 17116665
|
data/2025/2503_01xxx/2503.01103/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01103/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6a233cc182740c4b182fcbe75f210eec65387d09fe877a5cba9d5051d8008f78
|
| 3 |
+
size 4586171
|
data/2025/2503_01xxx/2503.01103/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_01xxx/2503.01113/30257a40-975b-4eb4-a204-a1e160fd3415_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|