Add MinerU batch fc685a2e-563a-4e1d-95ee-1752be146b95
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +32 -0
- data/2023/2308_01xxx/2308.01597/02afac10-8251-4a73-aa5f-400c9e5be391_content_list.json +0 -0
- data/2023/2308_01xxx/2308.01597/02afac10-8251-4a73-aa5f-400c9e5be391_model.json +0 -0
- data/2023/2308_01xxx/2308.01597/02afac10-8251-4a73-aa5f-400c9e5be391_origin.pdf +3 -0
- data/2023/2308_01xxx/2308.01597/full.md +715 -0
- data/2023/2308_01xxx/2308.01597/images.zip +3 -0
- data/2023/2308_01xxx/2308.01597/layout.json +0 -0
- data/2023/2308_01xxx/2308.01661/7263487d-f861-4e28-bbc9-bb783bebeb71_content_list.json +1617 -0
- data/2023/2308_01xxx/2308.01661/7263487d-f861-4e28-bbc9-bb783bebeb71_model.json +2107 -0
- data/2023/2308_01xxx/2308.01661/7263487d-f861-4e28-bbc9-bb783bebeb71_origin.pdf +3 -0
- data/2023/2308_01xxx/2308.01661/full.md +310 -0
- data/2023/2308_01xxx/2308.01661/images.zip +3 -0
- data/2023/2308_01xxx/2308.01661/layout.json +0 -0
- data/2023/2308_01xxx/2308.01681/71f011e8-922f-4d9b-bc52-116e72695568_content_list.json +0 -0
- data/2023/2308_01xxx/2308.01681/71f011e8-922f-4d9b-bc52-116e72695568_model.json +0 -0
- data/2023/2308_01xxx/2308.01681/71f011e8-922f-4d9b-bc52-116e72695568_origin.pdf +3 -0
- data/2023/2308_01xxx/2308.01681/full.md +609 -0
- data/2023/2308_01xxx/2308.01681/images.zip +3 -0
- data/2023/2308_01xxx/2308.01681/layout.json +0 -0
- data/2023/2308_01xxx/2308.01737/5b627e26-cac0-4623-ad74-a488ce34673f_content_list.json +0 -0
- data/2023/2308_01xxx/2308.01737/5b627e26-cac0-4623-ad74-a488ce34673f_model.json +0 -0
- data/2023/2308_01xxx/2308.01737/5b627e26-cac0-4623-ad74-a488ce34673f_origin.pdf +3 -0
- data/2023/2308_01xxx/2308.01737/full.md +543 -0
- data/2023/2308_01xxx/2308.01737/images.zip +3 -0
- data/2023/2308_01xxx/2308.01737/layout.json +0 -0
- data/2023/2308_01xxx/2308.01738/ac7fea6c-1858-4700-ad7c-6584f1f16521_content_list.json +0 -0
- data/2023/2308_01xxx/2308.01738/ac7fea6c-1858-4700-ad7c-6584f1f16521_model.json +0 -0
- data/2023/2308_01xxx/2308.01738/ac7fea6c-1858-4700-ad7c-6584f1f16521_origin.pdf +3 -0
- data/2023/2308_01xxx/2308.01738/full.md +688 -0
- data/2023/2308_01xxx/2308.01738/images.zip +3 -0
- data/2023/2308_01xxx/2308.01738/layout.json +0 -0
- data/2023/2308_01xxx/2308.01760/9e41b7c3-99e0-4489-a75e-5349b467d780_content_list.json +681 -0
- data/2023/2308_01xxx/2308.01760/9e41b7c3-99e0-4489-a75e-5349b467d780_model.json +1138 -0
- data/2023/2308_01xxx/2308.01760/9e41b7c3-99e0-4489-a75e-5349b467d780_origin.pdf +3 -0
- data/2023/2308_01xxx/2308.01760/full.md +139 -0
- data/2023/2308_01xxx/2308.01760/images.zip +3 -0
- data/2023/2308_01xxx/2308.01760/layout.json +0 -0
- data/2023/2308_01xxx/2308.01779/43fba6bb-a379-4c73-93cb-4d72c4602f7c_content_list.json +0 -0
- data/2023/2308_01xxx/2308.01779/43fba6bb-a379-4c73-93cb-4d72c4602f7c_model.json +0 -0
- data/2023/2308_01xxx/2308.01779/43fba6bb-a379-4c73-93cb-4d72c4602f7c_origin.pdf +3 -0
- data/2023/2308_01xxx/2308.01779/full.md +489 -0
- data/2023/2308_01xxx/2308.01779/images.zip +3 -0
- data/2023/2308_01xxx/2308.01779/layout.json +0 -0
- data/2023/2308_01xxx/2308.01825/63f7c919-8b1b-4e16-882d-40fa30d5fc3e_content_list.json +0 -0
- data/2023/2308_01xxx/2308.01825/63f7c919-8b1b-4e16-882d-40fa30d5fc3e_model.json +0 -0
- data/2023/2308_01xxx/2308.01825/63f7c919-8b1b-4e16-882d-40fa30d5fc3e_origin.pdf +3 -0
- data/2023/2308_01xxx/2308.01825/full.md +360 -0
- data/2023/2308_01xxx/2308.01825/images.zip +3 -0
- data/2023/2308_01xxx/2308.01825/layout.json +0 -0
- data/2023/2308_01xxx/2308.01834/8f7c1f3a-8864-47d7-b9cd-e1b3128d4da8_content_list.json +750 -0
.gitattributes
CHANGED
|
@@ -2243,3 +2243,35 @@ data/2025/2508_10xxx/2508.10925/3284cd2f-5e9c-4699-a8ba-c0c685bdfb90_origin.pdf
|
|
| 2243 |
data/2025/2508_10xxx/2508.10934/710f6133-9fb9-4ddc-ac2b-de01aeef7363_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2244 |
data/2025/2508_11xxx/2508.11681/14f0b943-25fc-48fa-8405-a8a42c22b555_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2245 |
data/2025/2508_16xxx/2508.16610/4f4bfae3-fbaf-4366-a20e-cb467e799f7b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2243 |
data/2025/2508_10xxx/2508.10934/710f6133-9fb9-4ddc-ac2b-de01aeef7363_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2244 |
data/2025/2508_11xxx/2508.11681/14f0b943-25fc-48fa-8405-a8a42c22b555_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2245 |
data/2025/2508_16xxx/2508.16610/4f4bfae3-fbaf-4366-a20e-cb467e799f7b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2246 |
+
data/2023/2308_01xxx/2308.01597/02afac10-8251-4a73-aa5f-400c9e5be391_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2247 |
+
data/2023/2308_01xxx/2308.01661/7263487d-f861-4e28-bbc9-bb783bebeb71_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2248 |
+
data/2023/2308_01xxx/2308.01681/71f011e8-922f-4d9b-bc52-116e72695568_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2249 |
+
data/2023/2308_01xxx/2308.01737/5b627e26-cac0-4623-ad74-a488ce34673f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2250 |
+
data/2023/2308_01xxx/2308.01738/ac7fea6c-1858-4700-ad7c-6584f1f16521_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2251 |
+
data/2023/2308_01xxx/2308.01760/9e41b7c3-99e0-4489-a75e-5349b467d780_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2252 |
+
data/2023/2308_01xxx/2308.01779/43fba6bb-a379-4c73-93cb-4d72c4602f7c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2253 |
+
data/2023/2308_01xxx/2308.01825/63f7c919-8b1b-4e16-882d-40fa30d5fc3e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2254 |
+
data/2023/2308_01xxx/2308.01834/8f7c1f3a-8864-47d7-b9cd-e1b3128d4da8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2255 |
+
data/2023/2308_01xxx/2308.01861/f06ef930-faf1-4869-a457-1e460a479046_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2256 |
+
data/2023/2308_01xxx/2308.01862/c48b27df-c391-4438-b7d9-79995a96a9e6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2257 |
+
data/2023/2308_01xxx/2308.01890/69fbb7f3-3b36-4f2e-b264-bbfa0e26e78b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2258 |
+
data/2023/2308_01xxx/2308.01898/ecd68250-d4b9-4843-bace-133db34589e5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2259 |
+
data/2023/2308_01xxx/2308.01899/c7258f07-b1fd-4ab0-a9d1-8663c7b2a249_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2260 |
+
data/2023/2308_01xxx/2308.01906/2649c648-2107-4094-947e-418931e12237_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2261 |
+
data/2023/2308_01xxx/2308.01907/4d597cac-2f23-473d-a9a4-3ca6535c18f5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2262 |
+
data/2023/2308_01xxx/2308.01990/e5bd1fbe-7f78-4f36-95bb-48d2279a4807_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2263 |
+
data/2023/2308_01xxx/2308.01999/53f0a2b5-f79c-40ec-b49f-4f0096ad5506_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2264 |
+
data/2023/2308_02xxx/2308.02019/ee8371c5-4478-4e7d-b898-e152a7fd206f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2265 |
+
data/2023/2308_02xxx/2308.02053/06386cf4-c511-4066-8846-9d57fb78a924_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2266 |
+
data/2023/2308_02xxx/2308.02062/c6b90d64-7e0f-405c-844f-9031c324c7d2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2267 |
+
data/2023/2308_02xxx/2308.02097/3be81ada-480c-4dd0-9f26-3b45fd34a285_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2268 |
+
data/2023/2308_02xxx/2308.02122/825bc916-12fe-4b98-bf88-e66b88b2a019_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2269 |
+
data/2023/2308_02xxx/2308.02135/c50b9a86-0cc6-461a-8e1e-094acf923bc8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2270 |
+
data/2023/2308_02xxx/2308.02149/da1034fa-c495-4fe5-8f99-968c2273ea94_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2271 |
+
data/2023/2308_02xxx/2308.02151/4b6cffde-c112-49f3-b860-bade313f85a7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2272 |
+
data/2023/2308_02xxx/2308.02299/9e4ad9d8-a725-4c3a-a2ef-888ec1aa443e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2273 |
+
data/2023/2308_02xxx/2308.02565/9a4fd171-821b-4a80-933d-8c4b0c764367_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2274 |
+
data/2023/2308_02xxx/2308.02575/5a69270d-7824-42d1-8391-a6a8bebb8659_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2275 |
+
data/2023/2308_02xxx/2308.02585/0e64277d-559a-42a4-a0bc-1a1eadf92664_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2276 |
+
data/2023/2308_03xxx/2308.03784/9ddc842e-ca7a-4a47-847a-0f6d561a895f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 2277 |
+
data/2023/2308_15xxx/2308.15483/5952c7f0-4ebb-4924-aa57-7cd4b5473be4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2023/2308_01xxx/2308.01597/02afac10-8251-4a73-aa5f-400c9e5be391_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01597/02afac10-8251-4a73-aa5f-400c9e5be391_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01597/02afac10-8251-4a73-aa5f-400c9e5be391_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5a53020450f779ca066d2059fca6343b8d17dcc3a1e29ea0f207cec24e97a753
|
| 3 |
+
size 570396
|
data/2023/2308_01xxx/2308.01597/full.md
ADDED
|
@@ -0,0 +1,715 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DOLCE: A Descriptive Ontology for Linguistic and Cognitive Engineering<sup>1</sup>
|
| 2 |
+
|
| 3 |
+
Stefano Borgo *, Roberta Ferrario, Aldo Gangemi, Nicola Guarino, Claudio Masolo, Daniele Porello, Emilio M. Sanfilippo and Laure Vieu
|
| 4 |
+
|
| 5 |
+
Abstract. DOLCE, the first top-level (foundational) ontology to be axiomatized, has remained stable for twenty years and today is broadly used in a variety of domains. DOLCE is inspired by cognitive and linguistic considerations and aims to model a commonsense view of reality, like the one human beings exploit in everyday life in areas as diverse as socio-technical systems, manufacturing, financial transactions and cultural heritage. DOLCE clearly lists the ontological choices it is based upon, relies on philosophical principles, is richly formalized, and is built according to well-established ontological methodologies, e.g. OntoClean. Because of these features, it has inspired most of the existing top-level ontologies and has been used to develop or improve standards and public domain resources (e.g. CIDOC CRM, DBpedia and WordNet). Being a foundational ontology, DOLCE is not directly concerned with domain knowledge. Its purpose is to provide the general categories and relations needed to give a coherent view of reality, to integrate domain knowledge, and to mediate across domains. In these 20 years DOLCE has shown that applied ontologies can be stable and that interoperability across reference and domain ontologies is a reality. This paper briefly introduces the ontology and shows how to use it on a few modeling cases.
|
| 6 |
+
|
| 7 |
+
Keywords: DOLCE, Foundational ontology, Ontological analysis, Formal ontology, Use cases
|
| 8 |
+
|
| 9 |
+
# Introduction
|
| 10 |
+
|
| 11 |
+
As a foundational ontology, $\mathrm{DOLCE}^2$ provides general categories and relations that can be reused in different application scenarios by specializing them to the specific domains to be modeled.
|
| 12 |
+
|
| 13 |
+
In order to rely on well-established modeling principles and theoretical bases, it is a common practice for the categories and relations of foundational ontologies to be philosophically grounded. This is one of the reasons why the ontological analysis preceding modeling is of paramount importance. A careful choice and characterization of categories and relations produces indeed ontologies that have higher chances of being interoperable, or at least of understanding potential obstacles to interoperability. In particular, when this strategy is applied to foundational ontologies, interoperability is possible also between the domain ontologies aligned to them.
|
| 14 |
+
|
| 15 |
+
From a philosophical perspective, DOLCE adopts a descriptive (rather than referentialist) metaphysics, as its main purpose is to make explicit already existing conceptualizations through the use of categories whose structure is influenced by natural language, the makeup of human cognition, and social practices. As a consequence, such categories are mostly situated at a mesoscopic level, and may change while scientific knowledge or social consensus evolve. Also, DOLCE's domain of discourse is formed by particulars, while properties and relations are taken to be universals.
|
| 16 |
+
|
| 17 |
+
Once the intended meaning of the terms denoting the relevant ontology categories has been analyzed, it should be expressed in a way that is as semantically transparent as possible. To this aim, DOLCE is equipped with a rich axiomatization in first-order modal logic. Such richness greatly enhances expressiveness but, on the other hand, it makes foundational ontologies non computable, due to the well-known trade-off between formal expressiveness and computability. For this reason, approximated and partial translations expressed in application-oriented languages are often provided, as is the case for DOLCE.<sup>3</sup>
|
| 18 |
+
|
| 19 |
+
# A bit of history of DOLCE
|
| 20 |
+
|
| 21 |
+
The first comprehensive presentation of DOLCE appeared in the deliverables of the WonderWeb project in the early 2000s and in particular (Masolo et al., 2003). Following this work, several application-oriented, "lite" versions were later published, including DOLCE-lite, DOLCE-ultralite, and DOLCE-zero (Paulheim and Gangemi, 2015), see (Presutti and Gangemi, 2016) for a summary, and widely used (see also Sect. 4). The present article is mainly based on the work of Masolo et al. (2003) with the addition of concepts, e.g. roles, as introduced by Borgo and Masolo (2009).
|
| 22 |
+
|
| 23 |
+
The analysis underlying the formalization of DOLCE leverages the techniques of ontological engineering and the study of classes' meta-properties of the OntoClean methodology, firstly developed in the early 2000s by Guarino and Welty (2002) and later revised by Guarino and Welty (2009) and Guarino (2009).
|
| 24 |
+
|
| 25 |
+
A later work presented by Masolo et al. (2004) introduced social roles and concepts within DOLCE through a reification pattern, allowing in this way to introduce them as particulars into the domain of discourse.
|
| 26 |
+
|
| 27 |
+
In 2009, DOLCE-CORE was introduced in Borgo and Masolo (2009). The main purpose behind this work was that of simplifying the whole system, making it more usable in applications, and at the same time acceptable under different philosophical stands. Such simplification was also intended to facilitate the task of further extending the ontology. In particular, some of the changes introduced by DOLCE-CORE are: the adoption of the notion of concept as an ontology category, a better explanation on how to distinguish and formalize properties, the formalization of the notion of resemblance to facilitate the use of qualities, and the possibility of having more quality spaces associated to the same quality. Further changes include the definition of different parthood relations depending on ontological categories, the introduction of a notion of time regularity, and a simplification concerning the most basic categories, which in DOLCE were called 'endurant' and 'perdurant' and which become 'object' and 'event' in DOLCE-CORE and can be distinguished based on whether they have space or time as main dimension, respectively.
|
| 28 |
+
|
| 29 |
+
Leaving aside these theoretical studies, DOLCE has remained fixed over the years fulfilling the purpose of top-level ontologies to provide a solid and stable basis for modeling different domains, in this way ensuring interoperability of reference and domain ontologies that use DOLCE. Through the years, DOLCE has been enriched with modules to extend and specialize it. These modules facilitate the application and coherent use of the ontology. Some extensions tackle knowledge representation's specific issues, like the modeling of roles by Masolo et al. (2004), of artifacts by Vieu et al. (2008) and by Borgo et al. (2014), and of modules by Ferrario and Porello (2015). Others showed a possible integration with machine learning and in particular computer vision (Conigliaro et al., 2017). Extensions to the modeling of social
|
| 30 |
+
|
| 31 |
+
(Bottazzi and Ferrario, 2009; Porello et al., 2013, 2014a) and cognitive aspects (Ferrario and Oltramari, 2004; Biccheri et al., 2020) have also been proposed. Today DOLCE is becoming part of the ISO 21838 standard, under development, and is available also in CLIF, a syntax of Common Logic ISO 24707 (2018).<sup>4</sup>
|
| 32 |
+
|
| 33 |
+
The remaining of the paper is organized as follows: section 1 introduces the most fundamental categories and relations of DOLCE, which are axiomatized in section 2. With the aim of enhancing understanding, section 3 shows the application of DOLCE's axioms to five modeling examples. Before looking at the structure of the ontology, we shall spend some words on its history.
|
| 34 |
+
|
| 35 |
+
# 1. Principles and structure of DOLCE
|
| 36 |
+
|
| 37 |
+
As depicted in the taxonomy in Figure 1, the basic categories of DOLCE are endurant (aka continuant), perdurant (occurrent), quality, and abstract.
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
Fig. 1. The taxonomy of DOLCE extended with the subcategories Concept, Role, and Artefact.
|
| 41 |
+
|
| 42 |
+
I. Continuant vs. occurrent.. The distinction between endurants and perdurants is inspired by the philosophical debate about change in time. In particular, while endurants may acquire and lose properties and parts through time, perdurants are fixed in time. Their fundamental difference concerns therefore their presence in time: endurants are wholly present (i.e., with all their parts) at any time in which they are present; differently, perdurants can be partially present, so that at any time in which they unfold only a part of them is present. Examples of endurants are a table, a person, a cat, or a planet, while examples of perdurants are a tennis match, a conference talk or a manufacturing process producing a certain item.
|
| 43 |
+
|
| 44 |
+
The relation connecting endurants and perdurants is called participation. An endurant can be in time by participating in a perdurant, and perdurants happen in time by having endurants as participants. For instance, a person is in time by participating to her own life, and a conference talk happens if at least one presenter (or attendant) participates to it.
|
| 45 |
+
|
| 46 |
+
II. Independent vs. dependent entity.. This distinction is found across the entire taxonomy of DOLCE. For instance, features (e.g., edges, holes, bumps, etc.) are endurants whose existence depends on some physical object (the feature bearer), while physical objects are independent entities, i.e., their existence does not require other endurants to exist. Note that if we take a notion of cross-categorical dependence, only abstract entities turn out to be independent in DOLCE. For instance, since a physical object necessarily participates in an event (namely, its life), every physical object requires the existence of at least one event (and vice versa).
|
| 47 |
+
|
| 48 |
+
III. Processes vs. events.. In DOLCE processes and events are special types of perdurants. As it can be seen from Figure 1, DOLCE covers various classes of perdurant following taxonomic distinctions found in both philosophy and linguistics. In particular, a perdurant(-type) is stative or eventive according to whether it holds of the mereological sum of two of its instances, i.e. if it is cumulative or not. Common examples of stative perdurants are states; e.g., a sitting state is stative because the sum of two sittings is still a sitting. Among stative perdurants, processes are cumulative but not homeomeric, namely, they have parts of different types; e.g., there are (even very short) temporal parts of a running that are not themselves runnings. Finally, eventive occurrences (events) are not cumulative, and they are called achievements if they are atomic, otherwise they are accomplishments.[5]
|
| 49 |
+
|
| 50 |
+
IV. Properties, qualities, quantities... DOLCE covers these entities through the general notion of quality.<sup>6</sup> Qualities are, roughly speaking, what can be perceived and measured; they are particulars inhering in endurants or perdurants. For example, when we talk about the red of a rose, we are talking about a particular quality (that specific red) which inheres in a particular endurant (that specific rose). See also Section 3.3.1. Qualities are therefore specific to their bearers (this is why they are called individual qualities in DOLCE), and they are present at each time in which their bearers are present. Depending on the entities in which they inhere (qualities are dependent entities indeed), DOLCE identifies qualities of different types, namely, physical, temporal or abstract qualities. Moreover, since complex qualities can have qualities themselves, DOLCE includes a notion of direct quality to distinguish qualities of endurants, perdurants and abstracts, from qualities of qualities.
|
| 51 |
+
|
| 52 |
+
To compare qualities of the same kind, e.g., the color of a rose and the color of a book cover, the category of quale is introduced. A quale is the position occupied by an individual quality within a quality space.<sup>7</sup> In our example, if the rose and the book cover exhibit the same shade of red, their individual colors occupy the same position (quale) in the color space. Hence, the two qualities are distinct but they have the same quale (within the same color space).
|
| 53 |
+
|
| 54 |
+
V. Function and Role.. DOLCE does not formalize functions and roles, although these have been widely investigated and represented in DOLCE-driven approaches (Borgo et al., 2010; Masolo et al., 2004). Roles are represented as (social) concepts, which are connected to other entities (like endurants, perdurants, and abstracts) by the relation of classification. In particular, roles are concepts that are anti-rigid and founded, meaning that (i) they have dynamic properties<sup>8</sup> and (ii) they have a relational nature, i.e. they depend on other roles and on contexts.
|
| 55 |
+
|
| 56 |
+
VI. Relations.. An important relation in DOLCE is parthood, which is time-indexed when connecting endurants and a-temporal when holding between perdurants or abstracts, i.e. between entities that do not change in time. Constitution is another temporalized relation in DOLCE, holding between either endurants or perdurants. It is often used to single out entities that are spatio-temporally co-located but nonetheless distinguishable for their histories, persistence conditions, or relational properties. A typical example of constitution is the relation between a statue and the amount of matter it is built with. The former started to exist at a later moment with respect to the latter; the latter can survive the destruction of the former and only for the former the existence of a sculptor is a necessary condition of existence.
|
| 57 |
+
|
| 58 |
+
The last basic category of the ontology is that of abstracts. These are entities that have neither spatial nor temporal qualities and are not qualities themselves. We will not deal with them in the current paper, so it should suffice to give a few examples: quality regions (and therefore also quality spaces), sets, and facts. Also, although DOLCE has other important categories and relations, in the present paper we will focus especially on those just presented, as they will be discussed in the following in the light of their axiomatization and used for the formalization of the cases in Section 3.
|
| 59 |
+
|
| 60 |
+
# 2. The formalization of DOLCE in First-Order Logic
|
| 61 |
+
|
| 62 |
+
The formal theory of DOLCE is written in the first-order quantified modal logic QS5, including the Barcan and the converse Barcan formula, cf. (Fitting and Mendelsohn, 2012). These assumptions entail a possibilistic view of the entities: the domain of quantification contains all possible entities, regardless of their actual existence.
|
| 63 |
+
|
| 64 |
+
Here we present an excerpt of the axiomatization, focusing on the axioms required for the subsequent examples, that provides a general view of the DOLCE approach. An exhaustive presentation of DOLCE was given by Masolo et al. (2003) and a proof of consistency was provided by Kutz and Mossakowski (2011). In the following paragraphs, next to each axiom and definition we report the label of that formula in the primary presentation, cf. (Masolo et al., 2003). DOLCE is here extended to include the category of Concepts (C) and Roles (RL) and the relation of classification (CF), as we shall see below; their formalization is taken from (Masolo et al., 2004).
|
| 65 |
+
|
| 66 |
+
# 2.1. Taxonomy
|
| 67 |
+
|
| 68 |
+
As said, the taxonomy of DOLCE is shown in Figure 1. We omit in the following the taxonomic axioms which can be found in (Masolo et al., 2003). With respect to the original version, we include in this paper the categories Concept and Role as specializations of Non-Agentive Social Object, and the category Artefact as specialization of Non-Agentive Physical Object. These will be used in the formalization of the examples.
|
| 69 |
+
|
| 70 |
+
# 2.2. Mereology
|
| 71 |
+
|
| 72 |
+
DOLCE assumes two primitive parthood relations: atemporal $(\mathsf{P}(x,y)$ for $x$ is part of $y$ ) and time-dependent $(\mathsf{P}(x,y,t)$ for $x$ is part of $y$ at time $t$ ) parthood. The same predicate symbol $\mathsf{P}$ is used for
|
| 73 |
+
|
| 74 |
+
both relations. The first follows the principles of the General Extensional Mereology (GEM), whereas temporary parthood drops the antisymmetry axioms, cf. (Masolo et al., 2003, p.33).
|
| 75 |
+
|
| 76 |
+
Here we give some axioms and definitions relative to temporary parthood, which we will use in Section 3.1 (in the rest of this section $\mathrm{Ddn}$ and $\mathrm{Adn}$ are the labels of definitions and axioms, respectively, used in (Masolo et al., 2003)). In the formulas, $\mathsf{PRE}(x,t)$ reads $x$ is present at time $t$ ; $\mathsf{PP}(x,y,t)$ reads $x$ is a proper part of $y$ at $t$ ; and $\mathsf{O}(x,y,t)$ reads $x$ and $y$ overlap at time $t$ . The expression $x +_{te} y$ reads 'the temporary sum of $x$ and $y$ , and $\sigma_{te} x \phi(x)$ reads 'the temporary fusion of each $x$ that satisfies $\phi$ . After the formulas we give a description in natural language.
|
| 77 |
+
|
| 78 |
+
a1 $\mathsf{P}(x,y,t)\to \mathsf{ED}(x)\wedge \mathsf{ED}(y)\wedge \mathsf{T}(t)$ (Temporary part typing, cf. Ad10)
|
| 79 |
+
a2 $\mathsf{P}(x,y,t)\to \mathsf{PRE}(x,t)\wedge \mathsf{PRE}(y,t)$ (cf. Ad17)
|
| 80 |
+
d1 $\mathsf{PP}(x,y,t)\stackrel {def}{=}\mathsf{P}(x,y,t)\wedge \neg \mathsf{P}(y,x,t)$ (Temporary proper part, cf. Dd20)
|
| 81 |
+
d2 $\mathsf{O}(x,y,t)\stackrel {def}{=}\exists z(\mathsf{P}(z,x,t)\wedge \mathsf{P}(z,y,t))$ (Temporary Overlap, cf. Dd21)
|
| 82 |
+
d3 $x + _{te}y\stackrel {def}{=}\iota z\forall w,t(\mathsf{O}(w,z,t)\leftrightarrow (\mathsf{O}(w,x,t)\lor \mathsf{O}(w,y,t)))$ (Temporary binary sum, cf. Dd26)
|
| 83 |
+
d4 $\sigma_{te}x\phi (x)\stackrel {def}{=}\iota z\forall y,t(\mathsf{O}(y,z,t)\leftrightarrow \exists w(\phi (w)\land \mathsf{O}(y,w,t)))$ (Temporary sum, cf. Dd27)
|
| 84 |
+
|
| 85 |
+
Axiom (a1) states that temporary parthood holds only between two endurants at some time, axiom (a2) states that to have a parthood relationship both the part and the whole must be present, while (d1) states that a proper part is any part which does not contain the whole itself. (d2) defines overlap as a relation that holds on a pair of entities at the time when they have a common part. Using overlap, one can define binary and unrestricted sums, see cf. (d3) and (d4). These definitions characterize new entities: the sum of two entities and the fusion (sum of possibly infinite entities) of all the entities that satisfy a given formula $\phi$ , where $\phi$ does not contain time variables. Finally, note that in DOLCE sum (fusion) is defined also on events and on abstracts, thus including the sum (fusion) of times. We do not report these latter definitions since they are standard (cf. Dd18 and Dd19). We use the same notation $(+)$ and $(\sigma)$ for sum and fusion with or without the temporal parameter depending on the entities to which it applies.
|
| 86 |
+
|
| 87 |
+
# 2.3. Quality and quale
|
| 88 |
+
|
| 89 |
+
The relation being a quality of (qt) is primitive in DOLCE. Its full characterization is in (Masolo et al., 2003, p.35). To be able to say that $x$ is a quality of $y$ of type $\phi$ we extend it relatively to a type as follows:
|
| 90 |
+
|
| 91 |
+
d5 $\mathfrak{q}\mathfrak{t}(\phi ,x,y)\stackrel {def}{=}\mathfrak{q}\mathfrak{t}(x,y)\wedge \phi (x)\wedge \mathsf{SBL}_X(Q,\phi)$ (Quality of type $\phi$ ,cf.Dd29)
|
| 92 |
+
|
| 93 |
+
where $\mathsf{SBL}_X(Q,\phi)$ is an abbreviation for the statement that $\phi$ is a leaf in the DOLCE hierarchy of qualities (i.e. it is a minimal category in the quality branch of Fig.1, cf. (Masolo et al., 2003, p.27)).
|
| 94 |
+
|
| 95 |
+
Then, DOLCE defines the temporal quaile (relation qI), i.e., the position occupied by an individual quality within a quality space, as follows (recall that TL is the temporal location category, see Fig.1):
|
| 96 |
+
|
| 97 |
+
d6 $\mathsf{q}\mathsf{l}_{T,PD}(t,x)\stackrel {def}{=}\mathrm{PD}(x)\wedge \exists z(\mathsf{qt}(\mathrm{TL},z,x)\wedge \mathsf{q}\mathsf{l}(t,z))$ (Temporal quale of perdurants, cf. Dd30)
|
| 98 |
+
d7 $\mathfrak{q}\mathbb{1}_{T,ED}(t,x)\stackrel {def}{=}\operatorname {ED}(x)\wedge t = \sigma t^{\prime}(\exists y(\mathsf{PC}(x,y,t^{\prime}))$ (Temporal quale of endurants, cf. Dd31)
|
| 99 |
+
d8 $\mathsf{q}|_{T}(t,x)\stackrel {def}{=}\mathsf{q}|_{T,ED}(t,x)\vee \mathsf{q}|_{T,PD}(t,x)\vee \mathsf{q}|_{T,Q}(t,x)$ (Temporal Quale, cf. Dd35)
|
| 100 |
+
|
| 101 |
+
From (d6) the temporal quale of a perdurant is the quale associated to the time location quality (TL) of the perdurant, and from (d7) the temporal quale of an endurant is the sum of all the times during which
|
| 102 |
+
|
| 103 |
+
the endurant participates (PC) to some perdurant. (The participation relation is formally introduced below.) The temporal quale of a quality $(\mathsf{q}|_{T,Q})$ is defined in a similar way (Masolo et al., 2003, p.28). Finally, the temporal quale of an entity is given by the collection of all the previous definitions, (d8).
|
| 104 |
+
|
| 105 |
+
Qualities are classified in DOLCE as physical, temporal, and abstract qualities as stated below where the formulas add that a quality inheres in one and only one entity $(\mathfrak{qt}(x,y)$ reads $x$ is a quality of $y$
|
| 106 |
+
|
| 107 |
+
a3 $\mathrm{PQ}(x)\to \exists !y(\mathfrak{qt}(x,y)\wedge \mathrm{PED}(x))$ (Physical quality, cf. Ad47)
|
| 108 |
+
a4 $\mathrm{TQ}(x)\to \exists !y(\mathfrak{qt}(x,y)\wedge \mathrm{PD}(x))$ (Temporal quality, cf. Ad46)
|
| 109 |
+
a5 $\mathrm{AQ}(x)\to \exists !y(\mathsf{qt}(x,y)\land \mathsf{NPED}(x))$ (Abstract quality, cf. Ad48)
|
| 110 |
+
|
| 111 |
+
# 2.4. Time and existence
|
| 112 |
+
|
| 113 |
+
Actual existence in DOLCE is represented by means of the being present at (PRE) relation. The assumption here is that things exist if they have a temporal quale.
|
| 114 |
+
|
| 115 |
+
d9 $\mathsf{PRE}(x,t)\stackrel {\mathsf{def}}{=}\exists t^{\prime}(\mathsf{ql}_{T}(t^{\prime},x)\wedge \mathsf{P}(t,t^{\prime}))$ (Being Present at $t$ , cf. Dd40)
|
| 116 |
+
|
| 117 |
+
Further properties of PRE are described in (Masolo et al., 2003), Section 4.3.8.
|
| 118 |
+
|
| 119 |
+
# 2.5. Participation
|
| 120 |
+
|
| 121 |
+
The participation (PC) relation connects endurants, perdurants, and times, i.e. endurants participate in perdurants at a certain time (a6). Here we write $\mathsf{PC}(x,y,t)$ for $x$ participates in $y$ at time $t$ . (a7) states that a perdant has at least one participant and (a8) that an endurant participates in at least one perdurant. Axiom (a9) says that for an endurant to participate in a perdurant they must be present at the same time. We also introduce the relation of constant participation $(\mathsf{PC}_{\mathbb{C}})$ , cf. (d10), i.e., participation during the whole perdurant, which we will use in sections 3.4 and 3.5.
|
| 122 |
+
|
| 123 |
+
a6 $\mathsf{PC}(x,y,t)\to \mathsf{ED}(x)\wedge \mathsf{PD}(y)\wedge \mathsf{T}(t)$ (Participation typing, cf. Ad33)
|
| 124 |
+
a7 $\mathrm{PD}(x)\wedge \mathrm{PRE}(x,t)\to \exists y(\mathrm{PC}(y,x,t))$ (cf. Ad34)
|
| 125 |
+
a8 $\operatorname {ED}(x)\to \exists y,t(\mathsf{PC}(x,y,t))$ (cf. Ad35)
|
| 126 |
+
a9 $\mathsf{PC}(x,y,t)\to \mathsf{PRE}(x,t)\wedge \mathsf{PRE}(y,t)$ (cf. Ad36)
|
| 127 |
+
|
| 128 |
+
a10 $\mathsf{PC}_{\mathbb{C}}(x,y)\stackrel {\text{def}}{=}\exists t(\mathsf{PRE}(y,t))\wedge \forall t(\mathsf{PRE}(y,t)\to \mathsf{PC}(x,y,t))$ (Const. Participation, cf. Dd63)
|
| 129 |
+
|
| 130 |
+
# 2.6. Constitution
|
| 131 |
+
|
| 132 |
+
The constitution relation $\mathsf{K}$ is mainly used here to model the scenario in Section 3.1. We report only a few axioms required to model the scenario $(\mathsf{K}(x, y, t)$ reads ' $x$ constitutes $y$ at time $t$ ).
|
| 133 |
+
|
| 134 |
+
a11 $\mathsf{K}(x,y,t)\to ((\mathsf{ED}(x)\lor \mathsf{PD}(x))\land (\mathsf{ED}(y)\lor \mathsf{PD}(y))\land \mathsf{T}(t))$ (Constitution typing, cf. Ad20)
|
| 135 |
+
a12 $\mathsf{K}(x,y,t)\to (\mathsf{PED}(x)\leftrightarrow \mathsf{PED}(y))$ (cf. Ad21)
|
| 136 |
+
a13 $\mathsf{K}(x,y,t)\to \neg \mathsf{K}(y,x,t)$ (cf. Ad24)
|
| 137 |
+
|
| 138 |
+
(a11) states that $\kappa$ applies to pairs of endurants or of perdurants and a time. (a12) states that only physical endurants can constitute another physical endurant. (a13) states that constitution is asymmetric.
|
| 139 |
+
|
| 140 |
+
# 2.7. Concepts, roles, and classification
|
| 141 |
+
|
| 142 |
+
As anticipated, the relation of classification (CF) is not in (Masolo et al., 2003) as it applies to the category Concept (C), and to its subcategories including Role (RL), which informally collects particulars that classify, as introduced in (Masolo et al., 2004). We thus take the following axioms from the latter work $(\mathsf{CF}(x,y,t)$ stands for 'at the time $t$ $x$ is classified by the concept $y$ ):
|
| 143 |
+
|
| 144 |
+
a14 $\mathsf{CF}(x,y,t)\to \mathsf{ED}(x)\wedge \mathsf{C}(y)\wedge \mathsf{T}(t)$ (cf. A11 in (Masolo et al., 2004)<sup>10</sup>)
|
| 145 |
+
a15 $\mathsf{CF}(x,y,t)\to \mathsf{PRE}(x,t)$ (cf. A12 in (Masolo et al., 2004))
|
| 146 |
+
a16 $\mathsf{CF}(x,y,t)\to \neg \mathsf{CF}(y,x,t)$ (cf. A14 in (Masolo et al., 2004))
|
| 147 |
+
a17 $\mathsf{CF}(x,y,t)\wedge \mathsf{CF}(y,z,t)\to \neg \mathsf{CF}(x,z,t)$ (cf. A15 in (Masolo et al., 2004))
|
| 148 |
+
d10 $\mathsf{AR}(x)\stackrel {def}{=}\forall y,t(\mathsf{CF}(x,y,t)\to \exists t^{\prime}(\mathsf{PRE}(x,t^{\prime})\wedge \neg \mathsf{CF}(x,y,t^{\prime}))$ (cf. D1 in (Masolo et al., 2004))
|
| 149 |
+
d11 $\mathsf{RL}(x)\stackrel {def}{=}\mathsf{AR}(x)\wedge \mathsf{FD}(x)$ (cf. D3 in (Masolo et al., 2004))
|
| 150 |
+
|
| 151 |
+
The classification relationship CF applies to an endurant, a concept and a time (a14), requires the endurant to be present when it is classified (a15), and is not symmetrical (a16). A concept can classify other concepts but not what the latter classify, this is stated to avoid circularity (a17). Roles (RL) are defined as concepts that are anti-rigid (d10) and founded (d11). Informally, the foundation property (FD) holds for a concept that is defined by means of another concept such that the instances of the latter are all external to (not part of) the instances of the former (Masolo et al., 2004).
|
| 152 |
+
|
| 153 |
+
# 3. Analysis and formalization in DOLCE: examples
|
| 154 |
+
|
| 155 |
+
We present in the following sections how to formalize the five given cases according to DOLCE. Since to model some cases it is helpful to use a temporal ordering relation and since DOLCE does not formalize any, we introduce one here as follows: $\prec$ is an ordering relation over atomic and convex regions of time (usually, these are understood as time instants and time intervals) such that if $t_1 < t_2$ holds, then $t_1$ and $t_2$ are ordered and non-overlapping, i.e., $\neg \mathsf{O}(t_1, t_2)$ . We write $t_1 \leqslant t_2$ to mean that $t_1$ and $t_2$ are ordered, may properly overlap (i.e., they overlap but none is completely included in the other), and, given $t$ their overlapping region, then $t_1 - t < t_2 - t$ holds.
|
| 156 |
+
|
| 157 |
+
# 3.1. Case 1: Composition/Constitution
|
| 158 |
+
|
| 159 |
+
"There is a four-legged table made of wood. Some time later, a leg of the table is replaced. Even later, the table is demolished so it ceases to exist although the wood is still there after the demolition."
|
| 160 |
+
|
| 161 |
+
DOLCE provides two ways to model this and similar examples. The first option, which we call artifact-based and we follow here, considers entities like tables and legs as ontological entities on their own because of their artifactual status, namely, the fact that tables and the legs are intentionally produced products. The second option, called role-based, considers table and leg as roles of objects. In this view, indeed, some objects play the role of table and leg in a given context but not necessarily. We do not
|
| 162 |
+
|
| 163 |
+
use this second modeling approach for Case 1 and exemplify it for Case 2 (see next section) where the adoption of the role perspective is more natural. Note that DOLCE is neutral with respect to the choice between these two modeling approaches: it entirely depends on what one takes as essential properties of an entity, that is, how one answers the question: is 'being a table' an essential property for that object or is it only an accidental condition? In this way, by using DOLCE, the knowledge engineer is free to choose the option that best matches their modeling purposes and application concerns.
|
| 164 |
+
|
| 165 |
+
Tables and legs are objects whose kinds provide criteria for their persistence in time. We shall assume that a table remains the same object whenever it has a suitable shape and the right functionalities, even though some of its legs may be substituted. For simplicity, let us assume that a table is identified by a tabletop, i.e., no matter what happens, a table remains the same entity provided that its tabletop is not substituted or destroyed. Clearly, when a leg is substituted, the quantity of wood that constitutes the table changes. It follows that the existence of the table does not imply that it is made of the same matter throughout its whole life. Allowing the possibility that some entities keep existing while some of their parts change (or even cease to exist) is a design characteristic of DOLCE. More precisely, the ontology allows distinguishing between quantities of matter (e.g., the wood of which a table is made), the object constituted by the matter (that object made of that wood), and the artifact (the table, i.e., the functional object (Mizoguchi et al., 2016)).
|
| 166 |
+
|
| 167 |
+
The constitution and composition relations in DOLCE capture distinct forms of dependence: the former is the dependence holding between entities with different essential properties (intercategorical) like the dependence of a table from the matter it is made of; the latter holds between entities with the same essential properties (intracategorical) like the dependence of a table from the tabletop and the legs. It follows that constitution connects elements belonging to distinct categories and that are related by an existential co-temporal dependence. Here, it holds between elements of the category Matter (the considered amount of wood) and elements of the category Physical Object (the object made of that wood), since a material object exists at time $t$ only if there exists at $t$ a quantity of matter that constitutes it. The composition relation (expressed in DOLCE by parthood restricted to the category at stake) holds instead among elements of the same category which are bound to form a more complex element. These are generally called composing parts or components. In this case, composition implies that the existence of the composed object requires the co-temporal existence of its composing objects.
|
| 168 |
+
|
| 169 |
+
The DOLCE categories that we use for the artifact-based modeling of this case are: matter (M), physical object (POB), and Time (T). We will also use the Artefact category, as introduced by Borgo and Vieu (2009) and two new subclasses of it introduced specifically for this scenario, i.e., Table and TableLeg. In terms of relations we use: being subclass (IS_A), parthood (P), constitution (K) and being present (PRE). We also use the sum operator (+), and the order relation (<) for time.
|
| 170 |
+
|
| 171 |
+
Figure 2 depicts the portion of the DOLCE taxonomy and relationships considered in this case. For the sake of simplicity, relationships like parthood (P) and constitution (K) are restricted in the figure to the classes relevant for the representation of the example. Also, in all figures, ternary relations are shown in a simplified manner (e.g., K at t).
|
| 172 |
+
|
| 173 |
+
Formally, Case 1 can be expressed as follows.
|
| 174 |
+
|
| 175 |
+

|
| 176 |
+
Fig. 2. Fragment of the DOLCE taxonomy and relevant relationships for Case 1.
|
| 177 |
+
|
| 178 |
+
Taxonomic claims:
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
\operatorname {A r t e f a c t} (x) \rightarrow \operatorname {P O B} (x) \tag {1}
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
$$
|
| 185 |
+
\operatorname {T a b l e} (x) \rightarrow \operatorname {A r t e f a c t} (x) \tag {2}
|
| 186 |
+
$$
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\operatorname {T a b l e t o p} (x) \rightarrow \operatorname {A r t e f a c t} (x) \tag {3}
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
\operatorname {L e g} (x) \rightarrow \operatorname {A r t e f a c t} (x) \tag {4}
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
$$
|
| 197 |
+
\operatorname {W o o d} (x) \rightarrow \mathrm {M} (x) \tag {5}
|
| 198 |
+
$$
|
| 199 |
+
|
| 200 |
+
The previous formulas state that an artifact is a physical object, that table, tabletop and the legs are artifacts, and that wood is matter. Formula (6) represents the elements and the temporal constraints $(L_{1^{\prime}}$ and $W_{1^{\prime}}$ are the elements which are substituted for the original table parts).
|
| 201 |
+
|
| 202 |
+
$$
|
| 203 |
+
T a b l e (T) \wedge T a b l e t o p (T p) \wedge \bigwedge_ {1 \leqslant i \leqslant 4} \operatorname {L e g} \left(L _ {i}\right) \wedge \operatorname {L e g} \left(L _ {4 ^ {\prime}}\right) \wedge \operatorname {W o o d} \left(W _ {t o p}\right) \wedge \bigwedge_ {1 \leqslant i \leqslant 4} \operatorname {W o o d} \left(W _ {i}\right) \wedge
|
| 204 |
+
$$
|
| 205 |
+
|
| 206 |
+
$$
|
| 207 |
+
\operatorname {W o o d} \left(W _ {4 ^ {\prime}}\right) \wedge \mathrm {T} (t) \wedge \mathrm {T} \left(t ^ {\prime}\right) \wedge \mathrm {T} \left(t ^ {\prime \prime}\right) \wedge t < t ^ {\prime} \wedge t ^ {\prime} < t ^ {\prime \prime} \tag {6}
|
| 208 |
+
$$
|
| 209 |
+
|
| 210 |
+
The formula above states that $T$ is a table; $L_{i}$ are legs and so is $L_{1^{\prime}}$ ; $W_{top}$ is an amount of wood and so are $W_{i}$ and $W_{1^{\prime}}$ (informally, these are the amounts of wood of which the tabletop, the legs, and the new leg are made of, respectively); $t, t^{\prime}$ , and $t^{\prime \prime}$ are temporal instants or intervals such that $t$ is earlier than $t^{\prime}$ and $t^{\prime}$ is earlier than $t^{\prime \prime}$ .
|
| 211 |
+
|
| 212 |
+
Stating the elements' presence:
|
| 213 |
+
|
| 214 |
+
$$
|
| 215 |
+
\operatorname {P R E} (T, t) \wedge \operatorname {P R E} (T, t ^ {\prime}) \wedge \operatorname {P R E} (T p, t) \wedge \operatorname {P R E} (T p, t ^ {\prime}) \wedge \bigwedge_ {1 \leqslant i \leqslant 4} \operatorname {P R E} (L _ {i}, t) \wedge \bigwedge_ {1 \leqslant i \leqslant 3} \operatorname {P R E} (L _ {i}, t ^ {\prime}) \wedge
|
| 216 |
+
$$
|
| 217 |
+
|
| 218 |
+
$$
|
| 219 |
+
\begin{array}{l} \operatorname {P R E} \left(L _ {4 ^ {\prime}}, t ^ {\prime}\right) \wedge \bigwedge_ {1 \leqslant i \leqslant 4} \operatorname {P R E} \left(W _ {i}, t\right) \wedge \bigwedge_ {1 \leqslant i \leqslant 3} \operatorname {P R E} \left(W _ {i}, t ^ {\prime}\right) \wedge \operatorname {P R E} \left(W _ {4 ^ {\prime}}, t ^ {\prime}\right) \wedge \bigwedge_ {1 \leqslant i \leqslant 3} \operatorname {P R E} \left(W _ {i}, t ^ {\prime \prime}\right) \\ \wedge \operatorname {P R E} \left(W _ {4 ^ {\prime}}, t ^ {\prime \prime}\right) \wedge \neg \operatorname {P R E} \left(T, t ^ {\prime \prime}\right) \wedge \bigwedge_ {1 \leqslant i \leqslant 3} \neg \operatorname {P R E} \left(L _ {i}, t ^ {\prime \prime}\right) \wedge \neg \operatorname {P R E} \left(L _ {4 ^ {\prime}}, t ^ {\prime \prime}\right) \tag {7} \\ \end{array}
|
| 220 |
+
$$
|
| 221 |
+
|
| 222 |
+
Formula (7) states that the table $T$ is present at $t$ and $t'$ ; the legs $L_i$ are present at $t$ and $t'$ except for $L_4$ which is not present at $t'$ ; $L_{4'}$ is present at $t'$ ; $W_{top}$ and $W_i$ are present at $t, t'$ and $t''$ except $W_4$ for which nothing is said about $t'$ and $t''$ ; $W_{4'}$ is present at $t'$ and $t''$ .
|
| 223 |
+
|
| 224 |
+
Relational claims:
|
| 225 |
+
|
| 226 |
+
$$
|
| 227 |
+
\begin{array}{l} \mathsf {P} (T p, T, t + t ^ {\prime}) \wedge \bigwedge_ {1 \leqslant i \leqslant 4} \mathsf {P} (L _ {i}, T, t) \wedge \bigwedge_ {1 \leqslant i \leqslant 3} \mathsf {P} (L _ {i}, T, t ^ {\prime}) \wedge \mathsf {P} (L _ {4 ^ {\prime}}, T, t ^ {\prime}) \wedge \neg \mathsf {P} (L _ {4}, T, t ^ {\prime}) \wedge \\ \mathsf {K} \left(W _ {t o p}, T p, t + t ^ {\prime}\right) \wedge \bigwedge_ {1 \leqslant i \leqslant 4} \mathsf {K} \left(W _ {i}, L _ {i}, t\right) \wedge \bigwedge_ {1 \leqslant i \leqslant 3} \mathsf {K} \left(W _ {i}, L _ {i}, t ^ {\prime}\right) \wedge \mathsf {K} \left(W _ {4 ^ {\prime}}, L _ {4 ^ {\prime}}, t ^ {\prime}\right) \tag {8} \\ \end{array}
|
| 228 |
+
$$
|
| 229 |
+
|
| 230 |
+
Formula (8) states that the tabletop $Tp$ is a component of the table $T$ at $t$ and $t'$ ; the legs $L_i$ are components of $T$ at $t$ ; the legs $L_1, L_2, L_3$ and $L_{4'}$ are components of $T$ at $t'$ ; $W_{top}$ and $W_1, W_2, W_3$ are constituents of the tabletop and legs (respectively) at $t$ and $t'$ ; $W_4$ is a constituent of $L_4$ at $t$ ; $W_{4'}$ is a constituent of $L_{4'}$ at $t'$ .
|
| 231 |
+
|
| 232 |
+
Since constitution is transitive and distributes over parthood, it follows that the table $T$ is constituted by the sum of $W_{top}$ , $W_1$ , $W_2$ , $W_3$ and $W_4$ at $t$ , and by that of $W_{top}$ , $W_1$ , $W_2$ , $W_3$ and $W_{4'}$ at $t'$ .
|
| 233 |
+
|
| 234 |
+
The modeling presented above is mainly focused on objects: the table as a whole and the legs and tabletop as its components. In this view, the perdurants during which the table changes are not modeled. In DOLCE one can explicitly introduce such perdurants, like the replacement and the demolition accomplishments. This second approach would make explicit the modeling of how and why the changes happen. The two views can be integrated in a single model since the essential relationships between the whole, its components and the material they are made of remain unchanged. Other modeling views, like the functional or the role-based modelings, are also possible in DOLCE.
|
| 235 |
+
|
| 236 |
+
# 3.2. Case 2: Roles
|
| 237 |
+
|
| 238 |
+
"Mr. Potter is the teacher of class 2C at Shapism School and resigns at the beginning of the spring break. After the spring break, Mrs. Bumblebee replaces Mr. Potter as the teacher of 2C. Also, student Mary left the class at the beginning of the break and a new student, John, joins in when the break ends."
|
| 239 |
+
|
| 240 |
+
This case requires to model social roles, thus we follow the role-based modeling approach briefly mentioned in discussing Case 1. Roles are properties that an entity can have temporarily (roles can be acquired and lost at will), and they depend on an external entity, often indicated as the context, which
|
| 241 |
+
|
| 242 |
+
(perhaps implicitly) defines them. In this example, the role of student and teacher are defined within a school system, which we shall assume to stand for the context of the example.
|
| 243 |
+
|
| 244 |
+
To model Case 2, we need four instances of Person, namely Mr. Potter, Mrs. Bumblebee, Mary, and John, as well as two instances of Object, namely, class 2C and Shapism School. $^{12}$
|
| 245 |
+
|
| 246 |
+
At first, say at time $t_1$ , we have that Mr. Potter has the role of teacher (at the Shapism School's class 2C), technically writing that such role property holds for Mr. Potter at $t_1$ . At the same time, $t_1$ , the property does not hold for Mrs. Bumblebee. During the spring break period, say at $t_2$ , the property holds for neither, even though the role property continues to exist, since the entities that define it (the Shapism School and the Shapism School's class 2C) continue to exist during the break. After the spring break, at $t_3$ , Mrs. Bumblebee has the (Shapism School's class 2C) teacher role and Mr. Potter has not. The role teacher is played by a person at $t_1$ , by nobody at $t_2$ , and by another person at $t_3$ . The Shapism School's class 2C teacher role exists and does not change during the whole period. Since the teacher role can be played by one person at a time, usually one says that Mrs. Bumblebee replaced Mr Potter in that teacher role.
|
| 247 |
+
|
| 248 |
+
Similarly, at first Mary has the student role (at the Shapism School's class 2C) and John has not. Only the persons who are students before the break and do not leave the class have the student role during the break. Those people, now including John, have the Shapism School's class 2C student role after the break. In this case, however, one cannot say that John substituted Mary since, differently from teacher roles, which are characterized by individual rights and duties (an English teacher and a math teacher must satisfy different requirements and have duties tailored to the discipline they are hired for), the class 2C student role does not differentiate among players.
|
| 249 |
+
|
| 250 |
+
The DOLCE categories that we need for modeling this case are: agentive physical object (APO), non-agentive social object (NASO), and Time (T). We will also use the Teacher and Student roles as specializations of the Role category (RL, a subcategory of NASO) from (Masolo et al., 2004). In terms of relations we use: being subclass (IS_A), being present (PRE), time order (<), mereological sum (+), and the classify relation (CF) also introduced in (Masolo et al., 2004). Figure 3 depicts some relevant classes and relationships for this case.
|
| 251 |
+
|
| 252 |
+
Formally, Case 2 can be expressed as follows.
|
| 253 |
+
|
| 254 |
+
Taxonomic claims:
|
| 255 |
+
|
| 256 |
+
$$
|
| 257 |
+
\operatorname {P e r s o n} (x) \rightarrow \operatorname {A P O} (x) \tag {9}
|
| 258 |
+
$$
|
| 259 |
+
|
| 260 |
+
$$
|
| 261 |
+
\operatorname {F u n c t} \mathrm {R L} (x) \rightarrow \mathrm {R L} (x) \tag {10}
|
| 262 |
+
$$
|
| 263 |
+
|
| 264 |
+
$$
|
| 265 |
+
\operatorname {R L} (x) \rightarrow \operatorname {N A S O} (x) \tag {11}
|
| 266 |
+
$$
|
| 267 |
+
|
| 268 |
+
The previous formulas state that a person is an agentive physical object, a functional role is a role and a role is a non-agentive social object.
|
| 269 |
+
|
| 270 |
+
Functional role characterization:
|
| 271 |
+
|
| 272 |
+
$$
|
| 273 |
+
\operatorname {F u n c t R L} (y) \wedge \mathsf {C F} (x, y, t) \wedge \mathsf {C F} \left(x ^ {\prime}, y, t\right)\rightarrow x = x ^ {\prime} \tag {12}
|
| 274 |
+
$$
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
Fig. 3. Fragment of the DOLCE taxonomy and relevant relationships for Case 2.
|
| 278 |
+
|
| 279 |
+
Formula (12) states that a functional role $(y)$ can classify only one entity at each time.
|
| 280 |
+
|
| 281 |
+
The elements and the temporal constraints:
|
| 282 |
+
|
| 283 |
+
Person(Potter) $\wedge$ Person(Bumblebee) $\wedge$ Person(Mary) $\wedge$ Person(John) $\wedge$
|
| 284 |
+
|
| 285 |
+
$\mathrm{RL}(2CSstudent)\wedge \mathrm{Funcrl}(2CTeacher)\wedge \neg \mathrm{Funcrl}(2CSstudent)\wedge$
|
| 286 |
+
|
| 287 |
+
$$
|
| 288 |
+
\mathrm {T} \left(t _ {1}\right) \wedge \mathrm {T} \left(t _ {2}\right) \wedge \mathrm {T} \left(t _ {3}\right) \wedge t _ {1} < t _ {2} < t _ {3} \tag {13}
|
| 289 |
+
$$
|
| 290 |
+
|
| 291 |
+
Formula (13) states that Potter, Bumblebee, Mary, and John are persons; that 2CTeacher and 2CStudent are roles and that the first of these is a functional role. Finally, the formula says that $t_i$ are times and indicates their ordering.
|
| 292 |
+
|
| 293 |
+
Stating the elements' presence:
|
| 294 |
+
|
| 295 |
+
PRE(Potter, $t_1$ ) $\wedge$ PRE(Bumblebee, $t_2 + t_3$ )
|
| 296 |
+
|
| 297 |
+
$$
|
| 298 |
+
\operatorname {P R E} \left(M a r y, t _ {1}\right) \wedge \operatorname {P R E} \left(J o h n, t _ {3}\right) \tag {14}
|
| 299 |
+
$$
|
| 300 |
+
|
| 301 |
+
Formula (14) states that Potter, Bumblebee, Mary, and John exist at least at the listed times.
|
| 302 |
+
|
| 303 |
+
Relational claims:
|
| 304 |
+
|
| 305 |
+
$$
|
| 306 |
+
\begin{array}{l} \forall x \neg \mathrm {C F} (x, 2 C T e a c h e r, t _ {2}) \wedge \\ \mathrm {C F} (\text {P o t t e r}, 2 C T \text {e a c h e r}, t _ {1}) \wedge \mathrm {C F} (\text {B u m b l e b e e}, 2 C T \text {e a c h e r}, t _ {3}) \wedge \\ \mathrm {C F} (M a r y, 2 C S t u d e n t, t _ {1}) \wedge \neg \mathrm {C F} (J o h n, 2 C S t u d e n t, t _ {1}) \wedge \\ \neg \mathsf {C F} (M a r y, 2 C S t u d e n t, t _ {2}) \wedge \neg \mathsf {C F} (J o h n, 2 C S t u d e n t, t _ {2}) \wedge \\ \neg \mathsf {C F} (M a r y, 2 C S t u d e n t, t _ {3}) \wedge \mathsf {C F} (J o h n, 2 C S t u d e n t, t _ {3}) \tag {15} \\ \end{array}
|
| 307 |
+
$$
|
| 308 |
+
|
| 309 |
+
Formula (15) states that: 2CTeacher holds for nobody at $t_2$ ; Potter satisfies 2CTeacher at $t_1$ only; Bumblebee satisfies 2CTeacher at $t_3$ only; Mary satisfies 2Student at $t_1$ only; John satisfies 2Student at $t_3$ only; neither Mary nor John satisfies 2Student at $t_2$ .
|
| 310 |
+
|
| 311 |
+
The model presented here is the most natural approach for this kind of scenarios in DOLCE.
|
| 312 |
+
|
| 313 |
+
# 3.3. Property change
|
| 314 |
+
|
| 315 |
+
# 3.3.1. Case 3.1: color change
|
| 316 |
+
|
| 317 |
+
"A flower is red in the summer. As time passes, the color changes. In autumn the flower is brown."
|
| 318 |
+
|
| 319 |
+
We have seen how to understand and model essential properties in Case 1 and roles (dynamic, contextual properties) in Case 2. To model Case 3.1, we use individual qualities, that is, properties as manifested by an object. These are properties that an object must have, they are necessary for its existence. For instance, in the case of material objects, these include mass, color, and speed. Having qualities is necessary for objects, although the value they take may change in time.
|
| 320 |
+
|
| 321 |
+
The DOLCE categories needed to model Case 3.1 are: physical object (POB), physical quality (PQ), physical (quality) space (PR), and time (T). We will also use Flower as specialization of the POB category, ColorQuality as specialization of the PQ category, and ColorSpace as specialization of the PR category. For relations we use: being subclass (IS_A), inheritance (qt), being present (PRE), parthood (P), time order (<), and (the relation) quale (ql). Figure 4 depicts some relevant classes and relations used for representing Case 3.1.
|
| 322 |
+
|
| 323 |
+
Formally, Case 3.1 can be expressed as follows.
|
| 324 |
+
|
| 325 |
+
Taxonomic claims:
|
| 326 |
+
|
| 327 |
+
$$
|
| 328 |
+
F l o w e r (x) \rightarrow \mathrm {P O B} (x) \tag {16}
|
| 329 |
+
$$
|
| 330 |
+
|
| 331 |
+
$$
|
| 332 |
+
\operatorname {C o l o r Q u a l i t y} (x) \rightarrow \mathrm {P Q} (x) \tag {17}
|
| 333 |
+
$$
|
| 334 |
+
|
| 335 |
+
$$
|
| 336 |
+
\operatorname {C o l o r S p a c e} (x) \rightarrow \Pr (x) \tag {18}
|
| 337 |
+
$$
|
| 338 |
+
|
| 339 |
+
The previous formulas state that a flower is a physical object, a color quality is a quality of physical endurants and a color space is one of the spaces in the physical region.
|
| 340 |
+
|
| 341 |
+
The elements we need to model this case are:
|
| 342 |
+
|
| 343 |
+
$$
|
| 344 |
+
F l o w e r (F) \wedge C o l o r Q u a l i t y (q) \wedge \mathrm {T} (S u m m e r) \wedge \mathrm {T} (\text {A u t u m n}) \wedge \mathrm {T} (t _ {0}) \wedge \mathrm {T} (t _ {1}) \tag {19}
|
| 345 |
+
$$
|
| 346 |
+
|
| 347 |
+

|
| 348 |
+
Fig. 4. Fragment of the DOLCE taxonomy and relevant relationships for Case 3.1.
|
| 349 |
+
|
| 350 |
+
Formula (19) states that $F$ is a flower, $q$ is a color quality, Summer and Autumn are times (thus, these are not modeled as seasons in this example) and so are $t_0$ and $t_1$ . The following formula states that the flower $F$ is present during the Summer and the Autumn.
|
| 351 |
+
|
| 352 |
+
Stating the elements' presence:
|
| 353 |
+
|
| 354 |
+
$$
|
| 355 |
+
\operatorname {P R E} (F, \text {S u m m e r}) \wedge \operatorname {P R E} (F, \text {A u t u m n}) \tag {20}
|
| 356 |
+
$$
|
| 357 |
+
|
| 358 |
+
Relational claims:
|
| 359 |
+
|
| 360 |
+
$$
|
| 361 |
+
\mathfrak {q t} (q, F) \wedge \mathfrak {q l} (l, q, t _ {0}) \wedge \mathsf {P} (t _ {0}, S u m m e r) \wedge \mathfrak {q l} (l ^ {\prime}, q, t _ {1}) \wedge \mathsf {P} (t _ {1}, A u t u m n) \wedge
|
| 362 |
+
$$
|
| 363 |
+
|
| 364 |
+
$$
|
| 365 |
+
\mathsf {P} (l, \text {R e d R e g i o n}) \wedge \mathsf {P} (l ^ {\prime}, \text {B r o w n R e g i o n}) \wedge
|
| 366 |
+
$$
|
| 367 |
+
|
| 368 |
+
$$
|
| 369 |
+
\mathsf {P} (\text {R e d R e g i o n}, \text {C o l o r S p a c e}) \wedge \mathsf {P} (\text {B r o w n R e g i o n}, \text {C o l o r S p a c e}) \wedge
|
| 370 |
+
$$
|
| 371 |
+
|
| 372 |
+
$$
|
| 373 |
+
\text {S u m m e r} < \text {A u t u m n} \tag {21}
|
| 374 |
+
$$
|
| 375 |
+
|
| 376 |
+
Formula (21) states that: $q$ is the color quality of flower $F$ ; $q$ has value $l$ at time $t_0$ in the summer and has value $l'$ at time $t_1$ in the autumn where $l$ is located in the red region and $l'$ in the brown region (both regions in the color space). Finally, it states that Summer is before Autumn.
|
| 377 |
+
|
| 378 |
+
One can model that the flower takes all the shades from red to brown by adding the following formula (here $SC$ stands for the property of self-connected region, a property which is defined from the connection relation $C$ in the standard way, cf. (Casati and Varzi, 1996):
|
| 379 |
+
|
| 380 |
+
$$
|
| 381 |
+
\exists p (S C (p) \wedge \mathsf {P} (p, C o l o r S p a c e) \wedge \mathsf {P} (l, p) \wedge \mathsf {P} (l ^ {\prime}, p) \wedge
|
| 382 |
+
$$
|
| 383 |
+
|
| 384 |
+
$$
|
| 385 |
+
\forall l ^ {*} (\mathsf {P} (l ^ {*}, p) \rightarrow \exists t (\mathsf {P} (t, \text {S u m m e r} + \text {A u t u m n}) \land \mathsf {q l} (l ^ {*}, q, t)))) \tag {22}
|
| 386 |
+
$$
|
| 387 |
+
|
| 388 |
+
Formula (22), combined with the earlier formulas, states that there exists a path $(p)$ in the space of colors which has the given red and brown colors of the flower as endpoints, and such that the flower takes all the colors in the path during the Summer and Autumn. In a similar way, one can also model that the change of color has no jumps. For instance, preventing the flower from suddenly jumping from red to light brown, then back to scarlet etc.
|
| 389 |
+
|
| 390 |
+
The model presented here follows the approach that best exploits DOLCE's treatment of qualities.
|
| 391 |
+
|
| 392 |
+
# 3.3.2. Case 3.2: speed change
|
| 393 |
+
|
| 394 |
+
"A man is walking when suddenly he starts walking faster and then breaks into a run."
|
| 395 |
+
|
| 396 |
+
This example focuses on a change that occurs during an event. The event is divided in three parts, in the first part the man is walking, that is, there is a movement based on a repeated regular movement which is a process in DOLCE. In the second part, there is again a movement which is repeated at an increasing frequency until the desired speed is reached.[13] For this reason, we model the second part of the event as an accomplishment whose completion point is the achievement of the desired speed. Finally, the third part is a movement based on a repeated regular movement (running) which is similar to the first movement but with different characteristics. From this analysis, we model Case 3.2 as an event composed of three ordered subevents.
|
| 397 |
+
|
| 398 |
+
The DOLCE categories that we need for modeling Case 3.2 are: agentive physical object (APO), process (PRO), time quality (TQ), accomplishment (ACC), quale (qI), and time (T). In terms of relations we use: being subclass (IS_A), constant participation $(\mathsf{PC}_{\mathbb{C}})$ , parthood (P), quality of (qt), being present (PRE), time order (<), mereological sum (+), and (the relation) quale (qI). Figure 5 depicts (some of) the classes and relationships relevant for representing Case 3.2.
|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
Fig. 5. Fragment of the DOLCE taxonomy and relevant relationships for Case 3.2.
|
| 402 |
+
|
| 403 |
+
Formally, Case 3.2 can be expressed as follows.
|
| 404 |
+
|
| 405 |
+
Taxonomic claims:
|
| 406 |
+
|
| 407 |
+
$$
|
| 408 |
+
\operatorname {P e r s o n} (x) \rightarrow \operatorname {A P O} (x) \tag {23}
|
| 409 |
+
$$
|
| 410 |
+
|
| 411 |
+
$$
|
| 412 |
+
\operatorname {S p e e d Q u a l i t y} (x) \rightarrow \mathrm {T Q} (x) \tag {24}
|
| 413 |
+
$$
|
| 414 |
+
|
| 415 |
+
$$
|
| 416 |
+
\operatorname {S p e e d S p a c e} (x) \rightarrow \operatorname {T R} (x) \tag {25}
|
| 417 |
+
$$
|
| 418 |
+
|
| 419 |
+
$$
|
| 420 |
+
\operatorname {W a l k} (x) \rightarrow \operatorname {P R O} (x) \tag {26}
|
| 421 |
+
$$
|
| 422 |
+
|
| 423 |
+
$$
|
| 424 |
+
\operatorname {R u n} (x) \rightarrow \operatorname {P R O} (x) \tag {27}
|
| 425 |
+
$$
|
| 426 |
+
|
| 427 |
+
$$
|
| 428 |
+
\operatorname {S p e e d} U p (x) \rightarrow \operatorname {A C C} (x) \tag {28}
|
| 429 |
+
$$
|
| 430 |
+
|
| 431 |
+
The formulas above state that a person is an agentive physical object, speed is a quality of perdurants, a space of speed measure is a physical region, walking and running are processes, speeding up is an accomplishment. The elements and the temporal constraints:
|
| 432 |
+
|
| 433 |
+
$$
|
| 434 |
+
\operatorname {P e r s o n} (p) \wedge \operatorname {P D} (e) \wedge \operatorname {W a l k} (e _ {1}) \wedge \operatorname {S p e e d U p} (e _ {2}) \wedge \operatorname {R u n} (e _ {3}) \wedge
|
| 435 |
+
$$
|
| 436 |
+
|
| 437 |
+
$$
|
| 438 |
+
S p e e d Q u a l i t y (s) \wedge S p e e d Q u a l i t y (s _ {1}) \wedge S p e e d Q u a l i t y (s _ {2}) \wedge S p e e d Q u a l i t y (s _ {3}) \wedge
|
| 439 |
+
$$
|
| 440 |
+
|
| 441 |
+
$$
|
| 442 |
+
\mathrm {T} \left(t _ {e}\right) \wedge \mathrm {T} \left(t _ {e 1}\right) \wedge \mathrm {T} \left(t _ {e 2}\right) \wedge \mathrm {T} \left(t _ {e 3}\right) \tag {29}
|
| 443 |
+
$$
|
| 444 |
+
|
| 445 |
+
The formula says that $p$ is a person, that there is a perdurant $e$ , a walking perdurant $e_1$ , a speeding-up perdurant $e_2$ , a running perdurant $e_3$ , that $s$ and $s_i$ are speed qualities, and that $t_e, t_{e1}, t_{e2}, t_{e3}$ are times.
|
| 446 |
+
|
| 447 |
+
The following formula states that $p$ exists during the time $t_e$ :
|
| 448 |
+
|
| 449 |
+
$$
|
| 450 |
+
\operatorname {P R E} \left(p, t _ {e}\right) \tag {30}
|
| 451 |
+
$$
|
| 452 |
+
|
| 453 |
+
Relational claims (note that DOLCE already ensures that the quale "l" is in the speed space):
|
| 454 |
+
|
| 455 |
+
$$
|
| 456 |
+
\mathsf {P} (l, S p e e d S p a c e) \wedge \mathsf {P} (l _ {1}, S p e e d S p a c e) \wedge
|
| 457 |
+
$$
|
| 458 |
+
|
| 459 |
+
$$
|
| 460 |
+
\mathsf {P} \left(l _ {2}, S p e e d S p a c e\right) \wedge \mathsf {P} \left(l _ {3}, S p e e d S p a c e\right) \wedge
|
| 461 |
+
$$
|
| 462 |
+
|
| 463 |
+
$$
|
| 464 |
+
\mathfrak {q t} (s, e) \wedge \mathfrak {q l} (l, s, t _ {e}) \wedge \mathfrak {q t} (s _ {1}, e _ {1}) \wedge \mathfrak {q l} (l _ {1}, s _ {1}, t _ {e 1}) \wedge
|
| 465 |
+
$$
|
| 466 |
+
|
| 467 |
+
$$
|
| 468 |
+
\mathfrak {q t} \left(s _ {2}, e _ {2}\right) \wedge \mathfrak {q l} \left(l _ {2}, s _ {2}, t _ {e 2}\right) \wedge \mathfrak {q t} \left(s _ {3}, e _ {3}\right) \wedge \mathfrak {q l} \left(l _ {3}, s _ {3}, t _ {e 3}\right) \wedge
|
| 469 |
+
$$
|
| 470 |
+
|
| 471 |
+
$$
|
| 472 |
+
e = e _ {1} + e _ {2} + e _ {3} \wedge \mathrm {P C} _ {\mathrm {C}} (p, e) \tag {31}
|
| 473 |
+
$$
|
| 474 |
+
|
| 475 |
+
This formula says that $l, l_1, l_2$ and $l_3$ are locations in SpeedSpace. It also states that $s, s_1, s_2$ and $s_3$ are qualities of the perdurants $e, e_1, e_2$ and $e_3$ , respectively, and have locations $l, l_1, l_2$ and $l_3$ . Finally, it states that $p$ constantly participates in the perdurant $e$ which is the sum of the perdurants $e_1, e_2, e_3$ .
|
| 476 |
+
|
| 477 |
+
We can now characterize the core property of walking and of running: these are events across which the speed of the participant is qualitatively stable. This is what formula (32) states by enforcing the speed quality of a walking (or of a running) perdurant to remain in the same position during the perdurant, say
|
| 478 |
+
|
| 479 |
+
within the range for walking or for running. $^{14}$ A speeding up event is an event in which the frequency of a process increases. In the specific case, the change leads to move from a walking to a running process. To characterize events in which speed regularly changes, we introduce formula (33): this formula states that there is at least one speed change during the event, and that any speed change during the event can only increase the speed (here $<_{speed}$ is the ordering in the speed quality space).
|
| 480 |
+
|
| 481 |
+
$$
|
| 482 |
+
\begin{array}{l} \left(\mathbf {q t} (s, x) \wedge \left(W a l k (x) \vee R u n (x)\right)\right)\rightarrow \forall l _ {i}, l _ {j}, t _ {i}, t _ {j} \left(\mathbf {q l} \left(l _ {i}, s, t _ {i}\right) \wedge \right. \\ \mathfrak {q l} \left(l _ {j}, s, t _ {j}\right) \wedge \mathsf {P} \left(t _ {i}, t _ {x}\right) \wedge \mathsf {P} \left(t _ {j}, t _ {x}\right)\rightarrow l _ {i} = l _ {j}) \tag {32} \\ \end{array}
|
| 483 |
+
$$
|
| 484 |
+
|
| 485 |
+
$$
|
| 486 |
+
q t (s, x) \wedge S p e e d U p (x) \rightarrow
|
| 487 |
+
$$
|
| 488 |
+
|
| 489 |
+
$$
|
| 490 |
+
\exists l _ {i}, l _ {j}, t _ {i}, t _ {j} \left(\mathrm {P} \left(t _ {i}, t _ {x}\right) \wedge \mathrm {P} \left(t _ {j}, t _ {x}\right) \wedge \mathrm {q l} \left(l _ {i}, s, t _ {i}\right) \wedge \mathrm {q l} \left(l _ {j}, s, t _ {j}\right) \wedge l _ {i} \neq l _ {j}\right) \wedge
|
| 491 |
+
$$
|
| 492 |
+
|
| 493 |
+
$$
|
| 494 |
+
\forall l _ {i}, l _ {j}, t _ {i}, t _ {j} \left(\mathrm {P} \left(t _ {i}, t _ {x}\right) \wedge \mathrm {P} \left(t _ {j}, t _ {x}\right) \wedge \mathrm {q l} \left(l _ {i}, s, t _ {i}\right) \wedge \mathrm {q l} \left(l _ {j}, s, t _ {j}\right)\rightarrow \left(l _ {i} \leqslant_ {\text {s p e e d}} l _ {j} \leftrightarrow t _ {i} < t _ {j}\right)\right) \tag {33}
|
| 495 |
+
$$
|
| 496 |
+
|
| 497 |
+
DOLCE and these formulas for the specific Case 3.2 suffice to model the example of this section. To model continuity in speed change, one can use the approach exploited in formula (22).
|
| 498 |
+
|
| 499 |
+
As for the previous case, the model presented here shows the most natural modeling approach for this kind of scenarios in DOLCE.
|
| 500 |
+
|
| 501 |
+
# 3.4. Case 4: Event Change
|
| 502 |
+
|
| 503 |
+
"A man is walking to the station, but before he gets there, he turns around and goes home."
|
| 504 |
+
|
| 505 |
+
Following the viewpoint of DOLCE, this case is composed of (sub)events that correspond to the execution of distinct plans: reaching the station and reaching home. The first event (a man walking to the station) and the third (a man going home) are processes that are intended to be parts of a plan execution, that is, parts of distinct accomplishments. The intermediate event is an accomplishment (turning towards a direction) which is part of the second plan, namely, reaching home. To model this case, we need to include in the formalization the purpose of the (sub) events.
|
| 506 |
+
|
| 507 |
+
The DOLCE categories that we need for modeling Case 4 are: physical object (POB), agentive physical object (APO), concept (C), process (PRO), accomplishment (ACC), temporal quality (TQ), and time (T). We will also use DirectionQuality and SpeedQuality as specialization of the quality category. In terms of relations we use: subsumption (IS_A), constant participation $(\mathsf{PC}_{\mathbb{C}})$ , being present (PRE), mereological sum (+), parthood (P), quale (qI), inheritance (qt), classification (CF), temporal order (<). In addition, we introduce the new relationship ExecutesPlan to connect a perdurant to a plan. This relation is used to state that an event complies with the plan requirements. For instance, if a plan $p$ states that a person must go first to point A and then to point B, then any event $e$ that takes that person to point A satisfies ExecutesPlan(e,p) because it executes the plan even though it does not complete it. Figure 6 depicts some relevant classes and relationships.
|
| 508 |
+
|
| 509 |
+

|
| 510 |
+
Fig. 6. Fragment of the DOLCE taxonomy and relevant relationships for Case 4
|
| 511 |
+
|
| 512 |
+
Formally, Case 4 can be expressed as follows.
|
| 513 |
+
|
| 514 |
+
Taxonomic claims:
|
| 515 |
+
|
| 516 |
+
$$
|
| 517 |
+
\operatorname {P e r s o n} (x) \rightarrow \operatorname {A P O} (x) \tag {34}
|
| 518 |
+
$$
|
| 519 |
+
|
| 520 |
+
$$
|
| 521 |
+
\operatorname {D i r e c t i o n Q u a l i t y} (x) \rightarrow \mathrm {T Q} (x) \tag {35}
|
| 522 |
+
$$
|
| 523 |
+
|
| 524 |
+
$$
|
| 525 |
+
\operatorname {S p e e d Q u a l i t y} (x) \rightarrow \operatorname {T Q} (x) \tag {36}
|
| 526 |
+
$$
|
| 527 |
+
|
| 528 |
+
$$
|
| 529 |
+
\operatorname {W a l k} (x) \rightarrow \operatorname {P R O} (x) \tag {37}
|
| 530 |
+
$$
|
| 531 |
+
|
| 532 |
+
$$
|
| 533 |
+
\operatorname {T u r n} (x) \rightarrow \operatorname {A C C} (x) \tag {38}
|
| 534 |
+
$$
|
| 535 |
+
|
| 536 |
+
$$
|
| 537 |
+
\operatorname {P l a n} (x) \rightarrow \mathrm {C} (x) \tag {39}
|
| 538 |
+
$$
|
| 539 |
+
|
| 540 |
+
The previous formulas state that a person is an agentive physical object and that direction and speed qualities are qualities of perdurants.
|
| 541 |
+
|
| 542 |
+
The elements we need to model this case are a person, a perdurant, two walking and a turning events, two plans and three times:
|
| 543 |
+
|
| 544 |
+
$$
|
| 545 |
+
\operatorname {P e r s o n} (a) \wedge \operatorname {P D} (e) \wedge \operatorname {W a l k} (e _ {1}) \wedge \operatorname {T u r n} (e _ {2}) \wedge
|
| 546 |
+
$$
|
| 547 |
+
|
| 548 |
+
$$
|
| 549 |
+
\operatorname {W a l k} \left(e _ {3}\right) \wedge \operatorname {P l a n} \left(p _ {1}\right) \wedge \operatorname {P l a n} \left(p _ {2}\right) \wedge \mathrm {T} \left(t _ {e 1}\right) \wedge \mathrm {T} \left(t _ {e 2}\right) \wedge \mathrm {T} \left(t _ {e 3}\right) \tag {40}
|
| 550 |
+
$$
|
| 551 |
+
|
| 552 |
+
Stating the temporal constraints and the elements' presence:
|
| 553 |
+
|
| 554 |
+
$$
|
| 555 |
+
t _ {e 1} < t _ {e 2} < t _ {e 3} \wedge \mathfrak {q} | _ {T} (t _ {e 1}, e 1) \wedge \mathfrak {q} | _ {T} (t _ {e 2}, e 2) \wedge \mathfrak {q} | _ {T} (t _ {e 3}, e 3) \wedge \operatorname {P R E} (a, t _ {e}) \wedge \operatorname {P R E} (p _ {1}, t _ {e 1}) \wedge
|
| 556 |
+
$$
|
| 557 |
+
|
| 558 |
+
$$
|
| 559 |
+
\mathrm {P R E} \left(p _ {2}, t _ {e 2}\right) \wedge \mathrm {P R E} \left(p _ {2}, t _ {e 3}\right) \wedge \neg \mathrm {P R E} \left(p _ {1}, t _ {e 2}\right) \wedge \neg \mathrm {P R E} \left(p _ {1}, t _ {e 3}\right) \wedge \neg \mathrm {P R E} \left(p _ {2}, t _ {e 1}\right) \tag {41}
|
| 560 |
+
$$
|
| 561 |
+
|
| 562 |
+
Formula (41) states the ordering of the times, that $t_{ei}$ is the time of perdurant $e_i$ , that person $a$ is present all the times, that plan $p_1$ is present during $e_1$ and plan $p_2$ is during $e_2$ and $e_3$ . It also says that plan $p_1$ is not present during $e_2$ and $e_3$ while plan $p_2$ is not present during $e_1$ .
|
| 563 |
+
|
| 564 |
+
The following formula binds the use of the execution relation to pairs of one perduant and one concept, we do not characterize it further:
|
| 565 |
+
|
| 566 |
+
$$
|
| 567 |
+
E x e c u t e s P l a n (x, y) \rightarrow \mathrm {P D} (x) \wedge \mathrm {C} (y) \tag {42}
|
| 568 |
+
$$
|
| 569 |
+
|
| 570 |
+
We now write $t_{2i}$ and $t_{2f}$ for the initial and final time of event $e_2$ :
|
| 571 |
+
|
| 572 |
+
$$
|
| 573 |
+
\text {D i r e c t i o n Q u a l i t y} (s) \wedge \mathfrak {q t} (s, e) \wedge \mathfrak {q l} \left(l _ {1}, s, t _ {e 1}\right) \wedge \mathfrak {q l} \left(l _ {2}, s, t _ {e 2}\right) \wedge \mathfrak {q l} \left(l _ {3}, s, t _ {e 3}\right) \wedge
|
| 574 |
+
$$
|
| 575 |
+
|
| 576 |
+
$$
|
| 577 |
+
\begin{array}{l} \mathfrak {q l} \left(l _ {1}, s, t _ {2 i}\right) \wedge \mathfrak {q l} \left(l _ {3}, s, t _ {2 f}\right) \wedge l _ {1} \neq l _ {3} \wedge e = e _ {1} + e _ {2} + e _ {3} \wedge \mathbb {P C} _ {\mathbb {C}} (a, e) \wedge \\ E x e c u t e s P l a n \left(e _ {1}, p _ {1}\right) \wedge E x e c u t e s P l a n \left(e _ {2} + e _ {3}, p _ {2}\right) \tag {43} \\ \end{array}
|
| 578 |
+
$$
|
| 579 |
+
|
| 580 |
+
Formula (43) states that the direction quality $s$ of the event $e$ changes during the turning subevent $e_2$ , and that event $e_1$ executes plan $p_1$ and event $e_2 + e_3$ executes plan $p_2$ . Finally, it states that $e_1, e_2$ and $e_3$ span the whole event $e$ and that person $a$ participates to the whole event.
|
| 581 |
+
|
| 582 |
+
To state that an event $x$ is a walking event, we can use a formula similar to the one introduced in Case 3.2, reported below as (44). To characterize the core property of a turning event $y$ , we use formula (45) where $l_{1}$ and $l_{3}$ are as in formula (43) and write $t_y$ , $t_y i$ and $t_y f$ for the temporal interval of event $y$ and for its initial and final instants, respectively.[15]
|
| 583 |
+
|
| 584 |
+
$$
|
| 585 |
+
\operatorname {S p e e d Q u a l i t y} (s) \wedge \mathfrak {q t} (s, x) \wedge \operatorname {W a l k} (x) \rightarrow
|
| 586 |
+
$$
|
| 587 |
+
|
| 588 |
+
$$
|
| 589 |
+
\forall l _ {i}, l _ {j}, t _ {i}, t _ {j} (\mathsf {q l} (l _ {i}, s, t _ {i}) \wedge \mathsf {q l} (l _ {j}, s, t _ {j}) \wedge \mathsf {P} (t _ {i}, t _ {x}) \wedge \mathsf {P} (t _ {j}, t _ {x}) \rightarrow l _ {i} = l _ {j}) \tag {44}
|
| 590 |
+
$$
|
| 591 |
+
|
| 592 |
+
$$
|
| 593 |
+
\begin{array}{l} \text {D i r e c t i o n Q u a l i t y} (s) \wedge \mathfrak {q t} (s, y) \wedge \text {T u r n} (y) \wedge \mathfrak {q} \mid (l _ {1}, s, t _ {y i}) \wedge \mathfrak {q} \mid (l _ {3}, s, t _ {y f}) \wedge t _ {i} < t _ {j} \wedge \\ l _ {1} < l _ {3} \wedge \mathsf {P} (t _ {i}, t _ {y}) \wedge \mathsf {P} (t _ {j}, t _ {y}) \wedge \mathsf {q l} (l _ {i}, s, t _ {i}) \wedge \mathsf {q l} (l _ {j}, s, t _ {j}) \wedge l _ {i} + r _ {i} = l _ {j} + r _ {j} = l _ {3} \rightarrow \\ 0 \leqslant r _ {j} < r _ {i} \tag {45} \\ \end{array}
|
| 594 |
+
$$
|
| 595 |
+
|
| 596 |
+
The modeling approach we followed here is the preferred one in DOLCE for this kind of scenarios.
|
| 597 |
+
|
| 598 |
+
# 3.5. Case 5: Concept Evolution
|
| 599 |
+
|
| 600 |
+
Background: marriage is a contract between two people that is present in most social and cultural systems and it can change in major (e.g. gender constraints) and minor (e.g. marriage breaking procedures) aspects. "Marriage is a contract that is regulated by civil and social constraints. These constraints can change but the meaning of marriage continues over time."
|
| 601 |
+
|
| 602 |
+
There is disagreement about the nature of concepts, including whether concepts can change in time while preserving identity. Some argue that concepts have a stable nature (their characterizations cannot
|
| 603 |
+
|
| 604 |
+
change in time), others argue the opposite (Masolo et al., 2019). Similarly to the case of artifacts presented in Sect. 3.1, DOLCE does not prescribe the adoption of one or the other view, allowing in this way the knowledge engineer to select the approach that better fits with their modeling needs and world-view. For instance, the example mentioned above assumes that concepts can persist through time while partially changing in their characterization. In particular, it points to a social scenario where the concepts characterizing a socio-cultural system are associated with different rules across time because of the legal and cultural evolution of the society. We shall therefore take this perspective for the sake of this case.
|
| 605 |
+
|
| 606 |
+
The DOLCE categories that we need for modeling Case 5 are: social object (SOB), concept (C), and time (T). In terms of relations, we use: subsumption (IS_A), being present (PRE), and classification (CF). Figure 7 depicts the DOLCE classes and relationships used for Case 5.
|
| 607 |
+
|
| 608 |
+

|
| 609 |
+
Fig. 7. Fragment of the DOLCE taxonomy and relevant relationships for Case 5.
|
| 610 |
+
|
| 611 |
+
Formally, Case 5 can be expressed as follows.
|
| 612 |
+
|
| 613 |
+
Taxonomic claims (a social relationship, SocRelationship, holds for various types of unions between people; the notions of social marriage and legal marriage are intended to be elements in the DOLCE category of concepts):
|
| 614 |
+
|
| 615 |
+
$$
|
| 616 |
+
\operatorname {S o c M a r r i a g e} (x) \rightarrow \mathrm {C} (x) \tag {46}
|
| 617 |
+
$$
|
| 618 |
+
|
| 619 |
+
$$
|
| 620 |
+
\operatorname {L e g M a r r i a g e} (x) \rightarrow \mathrm {C} (x) \tag {47}
|
| 621 |
+
$$
|
| 622 |
+
|
| 623 |
+
$$
|
| 624 |
+
\operatorname {S o c R e l a t i o n s h i p} (x) \rightarrow \operatorname {S O B} (x) \tag {48}
|
| 625 |
+
$$
|
| 626 |
+
|
| 627 |
+
The elements and the temporal constraints that we need are: a social relationship $M$ , a social concept of marriage $sm$ , two legal concepts of marriage and two times:
|
| 628 |
+
|
| 629 |
+
SocRelationship $(M)\wedge$ SocMarriage(sm) $\wedge$ LegMarriage(lm) $\wedge$ LegMarriage(lm')
|
| 630 |
+
|
| 631 |
+
$$
|
| 632 |
+
\wedge \mathrm {T} (t) \wedge \mathrm {T} \left(t ^ {\prime}\right) \tag {49}
|
| 633 |
+
$$
|
| 634 |
+
|
| 635 |
+
The social relationship holds in both times and so does the social marriage, one legal marriage concept exists at $t$ , the other at $t'$ . Then, the elements' presence is as follows:
|
| 636 |
+
|
| 637 |
+
$$
|
| 638 |
+
\operatorname {P R E} (M, t) \wedge \operatorname {P R E} (M, t ^ {\prime}) \wedge \operatorname {P R E} (s m, t) \wedge \operatorname {P R E} (s m, t ^ {\prime}) \wedge
|
| 639 |
+
$$
|
| 640 |
+
|
| 641 |
+
$$
|
| 642 |
+
\mathrm {P R E} (l m, t) \wedge \neg \mathrm {P R E} (l m, t ^ {\prime}) \wedge \neg \mathrm {P R E} (l m ^ {\prime}, t) \wedge \mathrm {P R E} (l m ^ {\prime}, t ^ {\prime}) \tag {50}
|
| 643 |
+
$$
|
| 644 |
+
|
| 645 |
+
The relational claims are simple: first the two legal concepts are different; second if the social relationship is classified by the social marriage concept at a time, then it has to satisfy the legal concept existing at that very time.
|
| 646 |
+
|
| 647 |
+
$$
|
| 648 |
+
l m \neq l m ^ {\prime} \wedge \mathrm {C F} (s m, M, t) \rightarrow \mathrm {C F} (l m, M, t) \wedge \mathrm {C F} (s m, M, t ^ {\prime}) \rightarrow \mathrm {C F} \left(l m ^ {\prime}, M, t ^ {\prime}\right) \tag {51}
|
| 649 |
+
$$
|
| 650 |
+
|
| 651 |
+
The same concept of social marriage (sm) persists through time, from $t$ to $t'$ while changing its legal characterization (from $lm$ to $lm'$ ). For $sm$ to classify a marriage relationship $M$ at $t$ , it is necessary that $M$ is also classified as a legal marriage $lm$ (so satisfying concept $lm$ is necessary at $t$ for $sm$ ), while at $t'$ it is necessary that $M$ is classified by $sm$ which now depends on $lm'$ .
|
| 652 |
+
|
| 653 |
+
The model presented here is quite natural in DOLCE for this kind of scenarios. By changing the assumptions we made in the initial discussion of this case, other approaches can be put forward like, e.g., the use of role theory applied to concepts. Note also that these modeling approaches are not limited to purely social concepts. They apply to technology-dependent concepts like, e.g., that of road which has different qualifications across history (e.g. in ancient Rome, during the 19th century or today).
|
| 654 |
+
|
| 655 |
+
# 4. Ontology usage and community impact
|
| 656 |
+
|
| 657 |
+
Foundational ontologies enjoy a double-edged reputation in several communities, spanning across conceptual modeling, semantic web, natural language processing, etc. They are intuitively needed by most data-intensive applications, but their precise utility at different steps of design methodologies is not widely agreed, and certainly not for the same reasons. As a consequence, the wide application of DOLCE ranges from the simple reuse of a few categories, to delving into full-fledged axiomatic versions. We provide here a quick description of the OWL version of DOLCE, a list of application areas and specific reuse cases, with a few comments on the current opportunity for foundational approaches to ontology design. (For the new CLIF and OWL versions of DOLCE produced for the ISO 21838 standard under development, see http://www.loa.istic.cnr.it/index.php/dolce/).
|
| 658 |
+
|
| 659 |
+
DOLCE "lite" versions take into account the requirements from semantic web modeling practices, and the need for simplified semantics as in natural language processing lexicons. They also address the need for some extensions of DOLCE categories, by reusing the D&S (Description and Situations) ontology pattern framework, which was early designed to overcome the expressivity limits of OWL, later much facilitated by punning in OWL2 W3C OWL Working Group (2012) (i.e. the ability to use a constant as the name for a class, an individual, or a binary relation).
|
| 660 |
+
|
| 661 |
+
In particular, the DOLCE+D&S Ultralite $^{16}$ (DUL) OWL ontology was intended to popularize DOLCE to the Semantic Web community. DUL uses DOLCE, D&S, and a few more ontology design patterns (Plan $^{17}$ ,
|
| 662 |
+
|
| 663 |
+
Information Object $^{18}$ , and Collection, that extend DOLCE. Presutti and Gangemi (2016) give an account of DUL as an architecture of ontology design patterns inspired by those integrated theories, and Gangemi (2008) offers an integrated axiomatization of plans, information objects and collections in D&S. DUL is the result of various refinements and integrations of the OWL versions of those theories. The main motivations why DUL was conceived include: (i) intuitive terminology (e.g. substituting Endurant and Perdurant with Object and Event), (ii) lighter axiomatization (e.g. giving up some predicate indexing), (iii) integration of other theories, (iv) semantic-web-oriented OWL2 modeling styles.
|
| 664 |
+
|
| 665 |
+
As reported in (Presutti and Gangemi, 2016), even a non-exhaustive search makes one stumble upon the great variety of DUL reuse, citing 25 large ontology projects for: e-learning systems, water quality systems; in multimedia: annotation facets, content annotation, audiovisual formal descriptions; in medicine: for modelling intracranial aneurysms, annotating medical images and neuroimages, and for modelling biomedical research; law; events; geo-spatial data; robotics and automation; industry and smart products, textile manufacturing; cybersecurity; enterprise integration; process mining; disaster management; semantic sensor networks; customer relationship management.
|
| 666 |
+
|
| 667 |
+
In addition, DUL has been applied as a tool to improve existing semantic resources. This has happened for example in identifying and fixing millions of inconsistencies in DBpedia, on-the-go discovering modelling anti-patterns that were completely opaque to the axioms of the DBpedia ontology (Paulheim and Gangemi, 2015). Another example is the DUL application to improve the quality of lexical resources, from the very inception of DOLCE, used to reorganize the WordNet top level and causing Princeton WordNet developers to include the individual/class distinction in their lexicon (Gangemi et al., 2003), to the recent massive Framester knowledge graph (Gangemi et al., 2016), which unifies many different linguistic databases under a frame semantics, and maps them to widely used ontologies under a common DUL hat. Several other standard or de facto standard are based on or compatible with DUL, e.g., CIDOC CRM (CIDOC Conceptual Reference Model)<sup>19</sup>, SSN (Semantic Sensor Network Ontology)<sup>20</sup> and SAREF (Smart REFerence Ontology)<sup>21</sup>.
|
| 668 |
+
|
| 669 |
+
An important lesson learnt is that DOLCE can be used to foster different design approaches:
|
| 670 |
+
|
| 671 |
+
(1) as an upper ontology, in order to support a minimal agreement about a few distinctions;
|
| 672 |
+
(2) as an expressive axiomatic theory, in order to associate one's ontological commitment to well-defined criteria, and to perform (detailed) meaning negotiation;
|
| 673 |
+
(3) as a coherence/consistency stabilizer, able to reveal problems in a conceptualization against both its domain schema, and the data. This approach could also be used to reveal unwanted inferences, even when no inconsistency emerges;
|
| 674 |
+
(4) as a source of patterns that improve the quality of ontologies by applying the good practices encoded in DOLCE, and eventually ameliorating semantic interoperability.
|
| 675 |
+
|
| 676 |
+
Especially (3) and (4) are central to the current needs of the huge knowledge graphs maintained by the Web stakeholders, but also (2) is finally emerging as a potential tool to help clarifying the underlying semantics in domains that have been less prone to formalization in the past (e.g. sociology).
|
| 677 |
+
|
| 678 |
+
# Acknowledgements
|
| 679 |
+
|
| 680 |
+
Over the years many people have contributed to the application of DOLCE, to the discussion of the best modeling approaches, and to the development of DOLCE's modular extensions. We take the opportunity to thank in particular Emanuele Bottazzi, Francesco Compagno and Alessandro Oltramari. DOLCE was conceptualized and developed as part of the European project WonderWeb (IST-2001-33052) and many public and industrial projects reused it since then. Among these, the authors thank the European project OntoCommons (GA 958371) for co-funding the writing of this paper.
|
| 681 |
+
|
| 682 |
+
# References
|
| 683 |
+
|
| 684 |
+
Biccheri, L., Ferrario, R. & Porello, D. (2020). Needs and Intentionality. In B. Brodaric and F. Neuhaus (Eds.), Formal Ontology in Information Systems - Proceedings of the 11th International Conference, FOIS 2020. Frontiers in Artificial Intelligence and Applications (Vol. 330, pp. 125-139). Amsterdam, The Netherlands: IOS Press.
|
| 685 |
+
Borgo, S. & Masolo, C. (2009). Foundational Choices in DOLCE. In S. Staab and R. Studer (Eds.), Handbook on Ontologies (2nd ed.). Springer.
|
| 686 |
+
Borgo, S. & Vieu, L. (2009). Artifacts in Formal Ontology. In A. Meijers (Ed.), Handbook of the Philosophy of the Technological Sciences. Technology and Engineering Sciences (Vol. 9, pp. 273-307). Elsevier.
|
| 687 |
+
Borgo, S., Carrara, M., Garbacz, P. & Vermaas, P.E. (2010). Formalizations of functions within the DOLCE ontology. In Proceedings of the Eighth International Symposium on Tools and Methods of Competitive Engineering TMCE (Vol. 1, pp. 113-126). CiteSeer.
|
| 688 |
+
Borgo, S., Franssen, M., Garbacz, P., Kitamura, Y., Mizoguchi, R. & Vermaas, P.E. (2014). Technical artifacts: An integrated perspective. Appl. Ontology, 9(3-4), 217-235.
|
| 689 |
+
Bottazzi, E. & Ferrario, R. (2009). Preliminaries to a DOLCE Ontology of Organizations. International Journal of Business Process Integration and Management, Special Issue on Vocabularies, Ontologies and Business Rules for Enterprise Modeling, 4(4), 225-238.
|
| 690 |
+
Casati, R. & Varzi, A.C. (Eds.) (1996). Events. Aldershot: Dartmund.
|
| 691 |
+
Coniglario, D., Ferrario, R., Hudelot, C. & Porello, D. (2017). Integrating Computer Vision Algorithms and Ontologies for Spectator Crowd Behavior Analysis. In V. Murino, M. Cristani, S. Shah and S. Savarese (Eds.), Group and Crowd Behavior for Computer Vision, 1st Edition (pp. 297-319). Academic Press. doi:10.1016/B978-0-12-809276-7.00016-3.
|
| 692 |
+
Ferrario, R. & Oltramari, A. (2004). Towards a Computational Ontology of Mind. In A.C. Varzi and L. Vieu (Eds.), Formal Ontology in Information Systems, Proceedings of the Intl. Conf. FOIS 2004 (pp. 287-297). IOS Press.
|
| 693 |
+
Ferrario, R. & Porello, D. (2015). Towards a Conceptualization of Sociomaterial Entanglement. In H. Christiansen, I. Stojanovic and G.A. Papadopoulos (Eds.), Modeling and Using Context - 9th International and Interdisciplinary Conference, CONTEXT 2015, Lanarca, Cyprus, November 2-6, 2015. Proceedings. Lecture Notes in Computer Science (Vol. 9405, pp. 32-46). Springer. doi:10.1007/978-3-319-25591-0_3.
|
| 694 |
+
Fitting, M. & Mendelsohn, R.L. (2012). First-order modal logic (Vol. 277). Springer Science & Business Media.
|
| 695 |
+
Gangemi, A. (2008). Norms and plans as unification criteria for social collectives. Autonomous Agents and Multi-Agent Systems, 17(1), 70-112. doi:10.1007/s10458-008-9038-9.
|
| 696 |
+
Gangemi, A., Guarino, N., Masolo, C. & Oltramari, A. (2003). Sweetening WORDNET with DOLCE. AI Mag., 24(3), 13-24. doi:10.1609/imag.v24i3.1715.
|
| 697 |
+
Gangemi, A., Alam, M., Asprino, L., Presutti, V. & Recupero, D.R. (2016). Framester: A Wide Coverage Linguistic Linked Data Hub. In EKAW.
|
| 698 |
+
Gärdenfors, P. (2000). Conceptual Spaces: the Geometry of Thought. Cambridge, Massachusetts: MIT Press.
|
| 699 |
+
Guarino, N. (2009). The Ontological Level: Revisiting 30 Years of Knowledge Representation. In A. Borgida, V.K. Chaudhri, P. Giorgini and E.S.K. Yu (Eds.), Conceptual Modeling: Foundations and Applications - Essays in Honor of John Mylopoulos. Lecture Notes in Computer Science (Vol. 5600, pp. 52-67). Springer. doi:10.1007/978-3-642-02463-4_4.
|
| 700 |
+
Guarino, N. & Welty, C.A. (2002). Evaluating ontological decisions with OntoClean. Commun. ACM, 45(2), 61-65. doi:10.1145/503124.503150.
|
| 701 |
+
Guarino, N. & Welty, C. (2009). An Overview of OntoClean. In S. Staab and R. Studer (Eds.), Handbook on Ontologies. International Handbooks on Information Systems (pp. 201-220). Springer Berlin Heidelberg. doi:10.1007/978-3-540-92673-3_9.
|
| 702 |
+
ISO 24707 (2018). ISO/IEC-24707:2018 - Information technology — Common Logic (CL): a framework for a family of logic-based languages. International Organization for Standardization.
|
| 703 |
+
|
| 704 |
+
Kutz, O. & Mossakowski, T. (2011). A Modular Consistency Proof for DOLCE. In W. Burgard and D. Roth (Eds.), Proceedings of the Twenty-Fifth AAAI Conference on Artificial Intelligence, AAAI 2011, San Francisco, California, USA, August 7-11, 2011. AAAI Press. http://www.aaai.org/ocs/index.php/AAAI/AAAI11/paper/view/3754.
|
| 705 |
+
Masolo, C., Borgo, S., Gangemi, A., Guarino, N. & Oltramari, A. (2003). WonderWeb Deliverable D18. Technical report, CNR.
|
| 706 |
+
Masolo, C., Vieu, L., Bottazzi, E., Catenacci, C., Ferrario, R., Gangemi, A. & Guarino, N. (2004). Social Roles and their Descriptions. In Proceedings of the 9th International Conference on the Principles of Knowledge Representation and Reasoning (KR-2004) (pp. 267-277).
|
| 707 |
+
Masolo, C., Sanfilippo, E., Lamé, M. & Pittet, P. (2019). Modeling Concept Drift for Historical Research in the Digital Humanities. In 1st International Workshop on Ontologies for Digital Humanities and their Social Analysis (WODHSA).
|
| 708 |
+
Mizoguchi, R., Kitamura, Y. & Borgo, S. (2016). A unifying definition for artifact and biological functions. Appl. Ontology, 11(2), 129-154.
|
| 709 |
+
Paulheim, H. & Gangemi, A. (2015). Serving DBpedia with DOLCE - More than Just Adding a Cherry on Top. In *The Semantic Web - ISWC 2015*. ISWC 2015, Part I (pp. 180-196). Springer, Cham. doi:10.1007/978-3-319-25007-6_11.
|
| 710 |
+
Porello, D., Bottazzi, E. & Ferrario, R. (2014a). The Ontology of Group Agency. In P. Garbacz and O. Kutz (Eds.), Formal Ontology in Information Systems, Proceedings of the Intl. Conf. FOIS 2014. Frontiers in Artificial Intelligence and its Applications (Vol. 267, pp. 183-196). IOS Press.
|
| 711 |
+
Porello, D., Bottazzi, E. & Ferrario, R. (2014b). The Ontology of Group Agency. In FOIS (pp. 183-196).
|
| 712 |
+
Porello, D., Setti, F., Ferrario, R. & Cristani, M. (2013). Multiagent Socio-technical Systems. An Ontological Approach. In T. Balke, F. Dignum, M.B. Riemsdijk and A.K. Chopra (Eds.), Coordination, Organisations, Institutions and Norms in Agent Systems IX (COIN 2013). LNAI (Vol. 8368, pp. 42–63). Springer Verlag.
|
| 713 |
+
Presutti, V. & Gangemi, A. (2016). Dolce+D&S Ultralite and its main Ontology Design Patterns. Ontology Engineering with Ontology Design Patterns, 25, 81-103.
|
| 714 |
+
Vieu, L., Borgo, S. & Masolo, C. (2008). Artefacts and Roles: Modelling Strategies in a Multiplicative Ontology. In C. Eschenbach and M. Grüninger (Eds.), Formal Ontology in Information Systems, Proceedings of the Fifth International Conference, FOIS 2008, Saarbrücken, Germany, October 31st - November 3rd, 2008. Frontiers in Artificial Intelligence and Applications (Vol. 183, pp. 121-134). IOS Press. doi:10.3233/978-1-58603-923-3-121.
|
| 715 |
+
W3C OWL Working Group (2012). OWL 2 Web Ontology Language Document Overview (Second Edition) - W3C Recommendation 11 December 2012. World Wide Web Consortium (W3C). http://www.w3.org/TR/owl2-overview/.
|
data/2023/2308_01xxx/2308.01597/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60951162ed0c7960d90e5b749bff625960d188953b3d863037fe6b72dc7e8659
|
| 3 |
+
size 717177
|
data/2023/2308_01xxx/2308.01597/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01661/7263487d-f861-4e28-bbc9-bb783bebeb71_content_list.json
ADDED
|
@@ -0,0 +1,1617 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "BEVControl: Accurately Controlling Street-view Elements with Multi-perspective Consistency via BEV Sketch Layout",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
163,
|
| 8 |
+
130,
|
| 9 |
+
805,
|
| 10 |
+
176
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Kairui Yang $^{1*}$ Enhui Ma $^{1*}$ Jibin Peng $^{1}$ Qing Guo $^{2}$ Jianping Wu $^{3}$ Di Lin $^{1\\dagger}$ Kaicheng Yu $^{4}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
119,
|
| 19 |
+
203,
|
| 20 |
+
908,
|
| 21 |
+
222
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{1}$ Tianjin University $^{2}$ IHPC and CFAR, Agency for Science, Technology and Research, Singapore $^{3}$ Tsinghua University $^{4}$ Westlake University",
|
| 28 |
+
"bbox": [
|
| 29 |
+
88,
|
| 30 |
+
223,
|
| 31 |
+
937,
|
| 32 |
+
239
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "image",
|
| 38 |
+
"img_path": "images/909faaa65a25ad478cf38500ee0b30017b2f0e7df11d4f7fcc8f0013a6d8f468.jpg",
|
| 39 |
+
"image_caption": [
|
| 40 |
+
"(a) Vanilla generative method"
|
| 41 |
+
],
|
| 42 |
+
"image_footnote": [],
|
| 43 |
+
"bbox": [
|
| 44 |
+
98,
|
| 45 |
+
273,
|
| 46 |
+
872,
|
| 47 |
+
407
|
| 48 |
+
],
|
| 49 |
+
"page_idx": 0
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"type": "image",
|
| 53 |
+
"img_path": "images/6e35f0d9d12c027ac00d71fb64c805ebc626d9240c85981c56c452237191b154.jpg",
|
| 54 |
+
"image_caption": [
|
| 55 |
+
"(b) Two-stage BEVControl",
|
| 56 |
+
"Figure 1. Comparison between different generative networks hinted by Bird's Eye View (BEV) segmentation layout v.s. sketch layout. (a) Vanilla generative pipeline feeds a semantic segmentation style input into a generative network and outputs reasonable multi-view images. However, we discover that it fails to generate accurate object-level details. For example, we show a common failure of a state-of-the-art algorithm where the generated vehicle has reversed heading compared to the target 3D bounding box. In addition, editing the semantic segmentation style input is a hard task and requires non-trivial human effort. (b) To this end, we propose a two-stage method that provides finer background and foreground geometry control, dubbed BEVControl. It supports sketch style input that enables fast and easy editing. In addition, our BEVControl decouples visual consistency into two sub-goals: achieving geometry consistency between street and bird's-eye views through the Controller; and achieving appearance consistency between street views through the Coordinator."
|
| 57 |
+
],
|
| 58 |
+
"image_footnote": [],
|
| 59 |
+
"bbox": [
|
| 60 |
+
101,
|
| 61 |
+
422,
|
| 62 |
+
875,
|
| 63 |
+
545
|
| 64 |
+
],
|
| 65 |
+
"page_idx": 0
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"type": "text",
|
| 69 |
+
"text": "Abstract",
|
| 70 |
+
"text_level": 1,
|
| 71 |
+
"bbox": [
|
| 72 |
+
233,
|
| 73 |
+
689,
|
| 74 |
+
310,
|
| 75 |
+
704
|
| 76 |
+
],
|
| 77 |
+
"page_idx": 0
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"type": "text",
|
| 81 |
+
"text": "Using synthesized images to boost the performance of perception models is a long-standing research challenge in computer vision. It becomes more eminent in visual-centric autonomous driving systems with multi-view cameras as some long-tail scenarios can never be collected. Guided by the BEV segmentation layouts, the existing generative networks seem to synthesize photo-realistic street-view images when evaluated solely on scene-level metrics. However, once zoom-in, they usually fail to produce accurate foreground and background details such as heading. To this end, we pro",
|
| 82 |
+
"bbox": [
|
| 83 |
+
75,
|
| 84 |
+
709,
|
| 85 |
+
470,
|
| 86 |
+
859
|
| 87 |
+
],
|
| 88 |
+
"page_idx": 0
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"type": "text",
|
| 92 |
+
"text": "pose a two-stage generative method, dubbed BEVControl, that can generate accurate foreground and background contents. In contrast to segmentation-like input, it also supports sketch style input, which is more flexible for humans to edit. In addition, we propose a comprehensive multi-level evaluation protocol to fairly compare the quality of the generated scene, foreground object, and background geometry. Our extensive experiments show that our BEVControl surpasses the state-of-the-art method, BEVGen, by a significant margin, from 5.89 to 26.80 on foreground segmentation mIoU. In addition, we show that using images generated by BEVControl to train the downstream perception model, it achieves on average 1.29 improvement in NDS score.",
|
| 93 |
+
"bbox": [
|
| 94 |
+
496,
|
| 95 |
+
690,
|
| 96 |
+
893,
|
| 97 |
+
886
|
| 98 |
+
],
|
| 99 |
+
"page_idx": 0
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"type": "aside_text",
|
| 103 |
+
"text": "arXiv:2308.01661v4 [cs.CV] 23 Sep 2023",
|
| 104 |
+
"bbox": [
|
| 105 |
+
22,
|
| 106 |
+
263,
|
| 107 |
+
60,
|
| 108 |
+
705
|
| 109 |
+
],
|
| 110 |
+
"page_idx": 0
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"type": "page_footnote",
|
| 114 |
+
"text": "*Co-first authors.",
|
| 115 |
+
"bbox": [
|
| 116 |
+
78,
|
| 117 |
+
875,
|
| 118 |
+
171,
|
| 119 |
+
886
|
| 120 |
+
],
|
| 121 |
+
"page_idx": 0
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"type": "page_footnote",
|
| 125 |
+
"text": "†Corresponding author.",
|
| 126 |
+
"bbox": [
|
| 127 |
+
81,
|
| 128 |
+
887,
|
| 129 |
+
204,
|
| 130 |
+
898
|
| 131 |
+
],
|
| 132 |
+
"page_idx": 0
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"type": "page_number",
|
| 136 |
+
"text": "1",
|
| 137 |
+
"bbox": [
|
| 138 |
+
480,
|
| 139 |
+
924,
|
| 140 |
+
488,
|
| 141 |
+
935
|
| 142 |
+
],
|
| 143 |
+
"page_idx": 0
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"type": "text",
|
| 147 |
+
"text": "1. Introduction",
|
| 148 |
+
"text_level": 1,
|
| 149 |
+
"bbox": [
|
| 150 |
+
80,
|
| 151 |
+
90,
|
| 152 |
+
207,
|
| 153 |
+
104
|
| 154 |
+
],
|
| 155 |
+
"page_idx": 1
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"text": "BEV perception for autonomous driving has become popular. It requires understanding the objects in the streets captured from multiple cameras' views, where the things should correspond to the positions from the bird's-eye perspective. The street and bird's-eye views allow the autonomous driving car to broadly sense the objects, thus advancing the progress on an array of downstream applications (e.g., street-view object recognition [15, 37] and traffic flow prediction [1, 11, 35]).",
|
| 160 |
+
"bbox": [
|
| 161 |
+
78,
|
| 162 |
+
114,
|
| 163 |
+
468,
|
| 164 |
+
250
|
| 165 |
+
],
|
| 166 |
+
"page_idx": 1
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"text": "In today's age of deep learning, reliable BEV perception heavily relies on deep networks trained on many street-view images and the corresponding BEV segmentation layouts, to enable the autonomous car's self-control. To achieve large-scale data for BEV perception, someone may employ autonomous vehicles to travel around the city while recording the street-view images by multiple cameras and mapping the objects to the BEV segmentation layout. Undoubtedly, this solution reduces the human effort for data collection. Yet, autonomous cars without perfect self-control may give rise to traffic congestion or even fatal accident. Moreover, someone must annotate objects across the street and bird's-eye views at an expensive cost. Extra effort is needed to double-check the consistency of annotation across various views.",
|
| 171 |
+
"bbox": [
|
| 172 |
+
78,
|
| 173 |
+
251,
|
| 174 |
+
468,
|
| 175 |
+
460
|
| 176 |
+
],
|
| 177 |
+
"page_idx": 1
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"text": "Rather than laboriously collecting street-view images from the natural environment and annotating multi-view photos, there are many works [24] resort to the fast-growing family of generative networks [6, 27] for creating new street-view images with a realistic style, which augment the training data for BEV perception. As illustrated in Figure 1(a), these methods feed the BEV layout into the generative network. The BEV segmentation layout provides the semantic categories and spatial distribution of the objects in the street for the generative network, thus controlling the content of the generated images. Even with an identical BEV layout, the generative network can randomly associate diverse appearances to the objects already appearing in the street.",
|
| 182 |
+
"bbox": [
|
| 183 |
+
78,
|
| 184 |
+
463,
|
| 185 |
+
468,
|
| 186 |
+
657
|
| 187 |
+
],
|
| 188 |
+
"page_idx": 1
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "text",
|
| 192 |
+
"text": "In spite of the success of the generative networks, the current methods consider less about two critical issues when generating street-view images based on the BEV segmentation layouts. First, the BEV segmentation layout can be analogy to the panoptic segmentation map, where the background stuff and foreground objects unanimously have the pixel-wise annotations in details. It is inconvenient to edit the details of the BEV segmentation layout, thus disallowing many layouts with diversity to be produced for further enriching the street-view data. Second, the existing methods generally focus on improving the visual consistency between various street views and the geometric correspondence between the street and bird's-eye views. Nevertheless, only reasonable visual consistency and geometric correspondence are inadequate for the data augmentation, which also requires a diversity of visual elements (e.g., road layout, lane line,",
|
| 193 |
+
"bbox": [
|
| 194 |
+
78,
|
| 195 |
+
659,
|
| 196 |
+
468,
|
| 197 |
+
900
|
| 198 |
+
],
|
| 199 |
+
"page_idx": 1
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"text": "and vehicle position/heading) in the street-view images to enhance the generalization power of the BEV perception models. For this purpose, the generative network should accurately control the visual elements to achieve data diversity.",
|
| 204 |
+
"bbox": [
|
| 205 |
+
501,
|
| 206 |
+
90,
|
| 207 |
+
892,
|
| 208 |
+
151
|
| 209 |
+
],
|
| 210 |
+
"page_idx": 1
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "text",
|
| 214 |
+
"text": "This paper proposes BEVControl, which has a strong power for controlling the visual elements of the generated street-view images based on the BEV sketch layout. We illustrate the architecture of BEVControl in Figure 1(b). BEVControl has the controller and coordinator. The controller relies on the sketches of the background (e.g., road layout and lane line) and foreground elements (e.g., vehicle and pedestrian), which are easier to be edited than the pixel-wise annotations on the segmentation layout, to control the appearances and geometric properties of these two kinds of elements in the generated street-view images separately. The coordinator attends to the underlying correlation between the background and foreground elements, whose visual consistency across different views is preserved.",
|
| 215 |
+
"bbox": [
|
| 216 |
+
503,
|
| 217 |
+
152,
|
| 218 |
+
892,
|
| 219 |
+
362
|
| 220 |
+
],
|
| 221 |
+
"page_idx": 1
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "text",
|
| 225 |
+
"text": "The controller regards the background and foreground elements' sketches as hints. Here, the sketch and bounding boxes mainly represent the geometric shapes of the background and foreground elements. They are mapped from the identical BEV sketch layout, thus preserving the geometric correspondence between the elements across the street and bird's-eye views. With the hints attending to the background and foreground elements respectively, the controller employs the diffusion model to compute the latent feature maps of the street-view images, which represent various perspectives captured by multiple vehicle cameras. We feed these street-view feature maps to the coordinator. The coordinator uses a novel cross-view-cross-element attention mechanism to comprehensively model the context of visual elements in different views. It uses the context to enhance the visual consistency between the visual elements from multiple street perspectives, eventually producing street-view images.",
|
| 226 |
+
"bbox": [
|
| 227 |
+
503,
|
| 228 |
+
363,
|
| 229 |
+
892,
|
| 230 |
+
619
|
| 231 |
+
],
|
| 232 |
+
"page_idx": 1
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "text",
|
| 236 |
+
"text": "We extract the BEV sketches from the public dataset, nuScenes [5], to drive BEVControl to generate the street-view images for the classical object detection task. In contrast to the current methods that primarily mind the usefulness of the generated data for improving the performances on down-stream tasks, we extensively evaluate the controlling power of BEVControl, which helps to yield richer training data and achieve state-of-the-art object detection performance on nuScenes. We brief our contributions below:",
|
| 237 |
+
"bbox": [
|
| 238 |
+
503,
|
| 239 |
+
619,
|
| 240 |
+
892,
|
| 241 |
+
753
|
| 242 |
+
],
|
| 243 |
+
"page_idx": 1
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"type": "list",
|
| 247 |
+
"sub_type": "text",
|
| 248 |
+
"list_items": [
|
| 249 |
+
"- We use the cost-effective BEV sketch layouts to easier produce a large amount of street-view images.",
|
| 250 |
+
"- We propose the sketch-based BEVControl, which has a strong control of the background and foreground elements in the generated street-view images.",
|
| 251 |
+
"- BEVControl remarkably augments the training dataset, which helps to achieve state-of-the-art object detection results on nuScenes."
|
| 252 |
+
],
|
| 253 |
+
"bbox": [
|
| 254 |
+
521,
|
| 255 |
+
762,
|
| 256 |
+
890,
|
| 257 |
+
898
|
| 258 |
+
],
|
| 259 |
+
"page_idx": 1
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"type": "page_number",
|
| 263 |
+
"text": "2",
|
| 264 |
+
"bbox": [
|
| 265 |
+
480,
|
| 266 |
+
925,
|
| 267 |
+
488,
|
| 268 |
+
935
|
| 269 |
+
],
|
| 270 |
+
"page_idx": 1
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"type": "image",
|
| 274 |
+
"img_path": "images/d6c24163ae812db58aa6e336d426412e3cdd446d38aae54e37dc6e8f530f8c29.jpg",
|
| 275 |
+
"image_caption": [],
|
| 276 |
+
"image_footnote": [],
|
| 277 |
+
"bbox": [
|
| 278 |
+
86,
|
| 279 |
+
85,
|
| 280 |
+
890,
|
| 281 |
+
271
|
| 282 |
+
],
|
| 283 |
+
"page_idx": 2
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"type": "image",
|
| 287 |
+
"img_path": "images/3cd76990b532767c2980fcc290b05c762b4abe29f5f935989338ccc19639e4a8.jpg",
|
| 288 |
+
"image_caption": [
|
| 289 |
+
"Figure 2. (a) Overview of BEVControl. It takes inputs as an edit-friendly BEV sketch $S$ , multi-view noisy images $\\mathcal{Z}_t$ and text prompt, generating multi-view images $\\mathcal{Z}_0$ . BEVControl is a UNet structure generative network composed of a sequence of modules. Each module has two elements, controller and coordinator. Each controller takes input from BEV sketch features extracted from the projection module. See Fig. 3 for more details. Text features are encoded cross-attention as in [22]. (b) Details of Controller. A controller module takes in the foreground and background location information of the camera views sketch in a self-attention manner and outputs the geometry-consistent street view features $\\mathcal{G}_t$ concerning the BEV sketch $S$ . (c) Details of Coordinator. A coordinator module leverages a novel cross-view-cross-element attention mechanism that enables context interaction across views, outputting the appearance-consistent street view features $\\mathcal{A}_t$ ."
|
| 290 |
+
],
|
| 291 |
+
"image_footnote": [],
|
| 292 |
+
"bbox": [
|
| 293 |
+
83,
|
| 294 |
+
273,
|
| 295 |
+
883,
|
| 296 |
+
426
|
| 297 |
+
],
|
| 298 |
+
"page_idx": 2
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"type": "text",
|
| 302 |
+
"text": "2. Related Work",
|
| 303 |
+
"text_level": 1,
|
| 304 |
+
"bbox": [
|
| 305 |
+
76,
|
| 306 |
+
585,
|
| 307 |
+
218,
|
| 308 |
+
601
|
| 309 |
+
],
|
| 310 |
+
"page_idx": 2
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"type": "text",
|
| 314 |
+
"text": "The literature on image generation is vast [22, 26]. We mainly survey the approaches to the conditional generation of images with visual consistency. These approaches are closely relevant to our work because they also leverage various types of image information to control the image contents.",
|
| 315 |
+
"bbox": [
|
| 316 |
+
75,
|
| 317 |
+
617,
|
| 318 |
+
470,
|
| 319 |
+
694
|
| 320 |
+
],
|
| 321 |
+
"page_idx": 2
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"type": "text",
|
| 325 |
+
"text": "Image Generation via Multi-modal Information The recent progress in image generation is primarily attributed to the generative networks pre-trained on large-scale image data. Amidst a broad range of generative networks, the family of diffusion models [19, 21-23] lead a fashion of using multi-modal information for generating the image contents. The latent diffusion model [22] is a framework for producing images based on text. Note that the text-based information roughly specifies the image contents, disallowing a fine-grained control of the image contents. To address the above problem, the multi-modal image information like layout images [7, 14, 32, 36], semantic segmentation maps [2, 3, 9, 18, 29, 31], object sketches [4, 12, 18, 28, 33],",
|
| 326 |
+
"bbox": [
|
| 327 |
+
75,
|
| 328 |
+
704,
|
| 329 |
+
472,
|
| 330 |
+
901
|
| 331 |
+
],
|
| 332 |
+
"page_idx": 2
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"type": "text",
|
| 336 |
+
"text": "and depth images [12, 33] have been involved for hinting the image generation.",
|
| 337 |
+
"bbox": [
|
| 338 |
+
498,
|
| 339 |
+
585,
|
| 340 |
+
890,
|
| 341 |
+
617
|
| 342 |
+
],
|
| 343 |
+
"page_idx": 2
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"type": "text",
|
| 347 |
+
"text": "Generally, the above methods concentrate on generating a single image, where the image contents' semantic categories are aligned with the hints. This paper considers a more complex setting where the street-view image contents of multiple perspectives are generated. In addition to the semantic categories, we should accurately control the geometric properties of the generated street-view images. This goal is non-trivial, especially when the geometric patterns of the foreground and background contents are diverse. To achieve this goal, we resort to the appropriate modalities for controlling the foreground and background contents, thus enhancing the controlling power of the generative network.",
|
| 348 |
+
"bbox": [
|
| 349 |
+
496,
|
| 350 |
+
621,
|
| 351 |
+
893,
|
| 352 |
+
801
|
| 353 |
+
],
|
| 354 |
+
"page_idx": 2
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"type": "text",
|
| 358 |
+
"text": "Multi-view Image Generation with Visual Consistency The visual consistency is a natural property of the authentic images of multiple views. Similarly, we should preserve the visual consistency across multi-view images generated by the deep network. For this purpose, MVD-iffusion [26] uses the cross-view attention mechanism to",
|
| 359 |
+
"bbox": [
|
| 360 |
+
496,
|
| 361 |
+
809,
|
| 362 |
+
893,
|
| 363 |
+
900
|
| 364 |
+
],
|
| 365 |
+
"page_idx": 2
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"type": "page_number",
|
| 369 |
+
"text": "3",
|
| 370 |
+
"bbox": [
|
| 371 |
+
478,
|
| 372 |
+
924,
|
| 373 |
+
490,
|
| 374 |
+
936
|
| 375 |
+
],
|
| 376 |
+
"page_idx": 2
|
| 377 |
+
},
|
| 378 |
+
{
|
| 379 |
+
"type": "text",
|
| 380 |
+
"text": "create panoramic images from text, maintaining the global correspondence of multi-view images. The video generation methods [8, 13, 30, 34] use the temporal cross-frame attention to preserve the visual consistency across distinct views of image contents at different moments. BEVGen [24] is a contemporary work that generates multi-view images of the street based on the BEV segmentation layout. It employs an auto-regressive transformer with cross-view attention to maintain visual consistency across multi-view images.",
|
| 381 |
+
"bbox": [
|
| 382 |
+
75,
|
| 383 |
+
90,
|
| 384 |
+
472,
|
| 385 |
+
226
|
| 386 |
+
],
|
| 387 |
+
"page_idx": 3
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"type": "text",
|
| 391 |
+
"text": "The above methods usually work well when the global appearances of multi-view images coincide. But they are less effective for preserving the multi-view consistency when more accurate control of the individual contents (e.g., the orientations of different cars) is desired. This is because the independent operations of content control easily lead to inconsistency across other contents in the same venue. In contrast to the existing methods, we propose cross-view cross-object attention, which remarkably augments the visual consistency of the generated multi-view images.",
|
| 392 |
+
"bbox": [
|
| 393 |
+
75,
|
| 394 |
+
227,
|
| 395 |
+
473,
|
| 396 |
+
380
|
| 397 |
+
],
|
| 398 |
+
"page_idx": 3
|
| 399 |
+
},
|
| 400 |
+
{
|
| 401 |
+
"type": "text",
|
| 402 |
+
"text": "3. BEVControl",
|
| 403 |
+
"text_level": 1,
|
| 404 |
+
"bbox": [
|
| 405 |
+
76,
|
| 406 |
+
391,
|
| 407 |
+
209,
|
| 408 |
+
407
|
| 409 |
+
],
|
| 410 |
+
"page_idx": 3
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "text",
|
| 414 |
+
"text": "We illustrate the overall architecture of BEVControl in Figure 2. Following the LDM [22], BEVControl is a classic UNet structure consisting of an encoder and a decoder. They are composed of three modules stacked multiple times: controller, coordinator, and text cross-attention. We process all image features in the latent space, so the image features below specifically refer to those in the latent space.",
|
| 415 |
+
"bbox": [
|
| 416 |
+
75,
|
| 417 |
+
416,
|
| 418 |
+
472,
|
| 419 |
+
521
|
| 420 |
+
],
|
| 421 |
+
"page_idx": 3
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"text": "At first, BEVControl takes the edit-friendly BEV sketch $S \\in \\mathbb{R}^{K \\times K \\times 5}$ , text description, and street-view noisy images $\\mathcal{Z}_t = \\{\\mathbf{Z}_t^v \\in \\mathbb{R}^{H \\times W \\times C} \\mid v = 1, \\dots, V\\}$ as input. Here, $V$ denotes the number of perspective views. All sets denoted by $\\{\\cdot\\}$ represent $V$ viewpoints in the method below to foster better readability. $S$ is an editable canvas, which supports editing the objects within a $160 \\times 160$ -meter range around the ego car. The five channels of $S$ represent the background sketch (road line), pixel coordinates of the box's center point, text label, and heading of foreground objects, respectively. In training, $\\mathcal{Z}_t$ is a noisy version of street-view authentic images $\\mathcal{Z}_0$ by forward diffusion process of [22]. In inference, $\\mathcal{Z}_t$ is street-view noise sampled from $\\mathcal{N}(0, \\mathbf{I})$ . $H, W$ , and $C$ expressly represent the spatial resolutions and channels of latent features.",
|
| 426 |
+
"bbox": [
|
| 427 |
+
75,
|
| 428 |
+
523,
|
| 429 |
+
473,
|
| 430 |
+
748
|
| 431 |
+
],
|
| 432 |
+
"page_idx": 3
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"text": "BEVControl first projects the BEV sketch $\\mathcal{S}$ onto the 2D camera space as shown in Figure 3, computing a set of camera foreground conditions $\\mathcal{M} = \\{\\mathbf{b}^v,\\mathbf{l}^v,\\mathbf{h}^v\\}$ and background conditions $\\mathcal{R} = \\{\\mathbf{R}^v\\}$ of all view, which details see Sec. 3.1. Then we encode camera foreground and background conditions into a set of camera foreground and background embedding $\\mathcal{F} = \\{\\mathbf{F}^v\\in \\mathbb{R}^{N\\times C}\\}$ and $\\mathcal{B} = \\{\\mathbf{B}^v\\in \\mathbb{R}^{(H\\times W)\\times C}\\}$ , where $N$ denotes the number of bounding boxes in each view. Through the Controller, each perspective can obtain semantic control in",
|
| 437 |
+
"bbox": [
|
| 438 |
+
75,
|
| 439 |
+
750,
|
| 440 |
+
473,
|
| 441 |
+
901
|
| 442 |
+
],
|
| 443 |
+
"page_idx": 3
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "image",
|
| 447 |
+
"img_path": "images/50cc1dbf98b87b17c43482a4ed1798d75ba2903085a03425241b68752d188494.jpg",
|
| 448 |
+
"image_caption": [
|
| 449 |
+
"Figure 3. The camera projection process from BEV sketch."
|
| 450 |
+
],
|
| 451 |
+
"image_footnote": [],
|
| 452 |
+
"bbox": [
|
| 453 |
+
501,
|
| 454 |
+
88,
|
| 455 |
+
890,
|
| 456 |
+
327
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 3
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"text": "formation from the foreground and the background embedding of the corresponding camera view. This process results in the generation of geometry-consistent street-view latent features $\\mathcal{G}_t = \\{\\mathbf{G}_t^v\\in \\mathbb{R}^{H\\times W\\times C}\\}$ . Next, the geometry-consistent features $\\mathcal{G}_t$ are fed into Coordinator. The Coordinator employs a novel cross-view-cross-element attention mechanism to enhance adjacent views' consistency, yielding the appearance-consistent street-view latent features $\\mathcal{A}_t = \\{\\mathbf{A}_t^v\\in \\mathbb{R}^{H\\times W\\times C}\\}$ .",
|
| 463 |
+
"bbox": [
|
| 464 |
+
496,
|
| 465 |
+
375,
|
| 466 |
+
893,
|
| 467 |
+
511
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 3
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "text",
|
| 473 |
+
"text": "Then BEVControl employs the cross-attention mechanism of the diffusion model to handle the text prompt, allowing us to control the generated images' environmental factors (e.g., weather and lighting conditions). Then BEVControl repeats the execution of this UNet formed by stacking these three blocks $T$ times. Eventually, the output is the generated street-view images $\\mathcal{Z}_0 = \\{\\mathbf{Z}_0^v \\in \\mathbb{R}^{H \\times W \\times C}\\}$ , which are geometry-consistent, appearance-consistent and caption-aligned.",
|
| 474 |
+
"bbox": [
|
| 475 |
+
496,
|
| 476 |
+
512,
|
| 477 |
+
895,
|
| 478 |
+
648
|
| 479 |
+
],
|
| 480 |
+
"page_idx": 3
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "text",
|
| 484 |
+
"text": "3.1. Controller",
|
| 485 |
+
"text_level": 1,
|
| 486 |
+
"bbox": [
|
| 487 |
+
500,
|
| 488 |
+
661,
|
| 489 |
+
617,
|
| 490 |
+
676
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 3
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "text",
|
| 496 |
+
"text": "Based on the internal and external parameters of different cameras, we project the foreground and background classes of the BEV sketch $S$ onto the corresponding pixel coordinate system to obtain the camera foreground conditions $\\mathcal{M}$ and background conditions $\\mathcal{R}$ .",
|
| 497 |
+
"bbox": [
|
| 498 |
+
496,
|
| 499 |
+
686,
|
| 500 |
+
892,
|
| 501 |
+
760
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 3
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "text",
|
| 507 |
+
"text": "We define the camera foreground conditions as $\\mathcal{M} = \\{\\mathbf{b}^v,\\mathbf{l}^v,\\mathbf{h}^v\\}$ , where $\\mathbf{b}^v\\in [0,1]^{N\\times 4},\\mathbf{l}^v$ and $\\mathbf{h}^v\\in [-180,180)^{N\\times 1}$ denotes the normalized pixel coordinates of the upper left and lower right corners, text label and heading degree of $N$ boxes in the current perspective. The camera background conditions $\\mathcal{R} = \\{\\mathbf{R}^v\\in \\mathbb{R}^{H\\times W\\times 3}\\}$ , which are spatially aligned with the authentic camera images, representing the trend of the road. Then we extract the camera foreground embedding $\\mathcal{F} = \\{\\mathbf{F}^v\\in \\mathbb{R}^{N\\times C}\\}$ and background",
|
| 508 |
+
"bbox": [
|
| 509 |
+
496,
|
| 510 |
+
762,
|
| 511 |
+
893,
|
| 512 |
+
902
|
| 513 |
+
],
|
| 514 |
+
"page_idx": 3
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"type": "page_number",
|
| 518 |
+
"text": "4",
|
| 519 |
+
"bbox": [
|
| 520 |
+
478,
|
| 521 |
+
924,
|
| 522 |
+
491,
|
| 523 |
+
936
|
| 524 |
+
],
|
| 525 |
+
"page_idx": 3
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "text",
|
| 529 |
+
"text": "embedding $\\mathcal{B} = \\{\\mathbf{B}^v\\in \\mathbb{R}^{(H\\times W)\\times C}\\}$ as below:",
|
| 530 |
+
"bbox": [
|
| 531 |
+
76,
|
| 532 |
+
89,
|
| 533 |
+
392,
|
| 534 |
+
107
|
| 535 |
+
],
|
| 536 |
+
"page_idx": 4
|
| 537 |
+
},
|
| 538 |
+
{
|
| 539 |
+
"type": "equation",
|
| 540 |
+
"text": "\n$$\n\\mathbf {F} ^ {v} = \\operatorname {l i n e a r} (\\operatorname {f e} (\\mathbf {b} ^ {v}) + \\operatorname {c t e} (\\mathbf {l} ^ {v}) + \\operatorname {f e} (\\mathbf {h} ^ {v})),\n$$\n",
|
| 541 |
+
"text_format": "latex",
|
| 542 |
+
"bbox": [
|
| 543 |
+
138,
|
| 544 |
+
118,
|
| 545 |
+
405,
|
| 546 |
+
135
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 4
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "text",
|
| 552 |
+
"text": "(1)",
|
| 553 |
+
"bbox": [
|
| 554 |
+
447,
|
| 555 |
+
132,
|
| 556 |
+
468,
|
| 557 |
+
146
|
| 558 |
+
],
|
| 559 |
+
"page_idx": 4
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"type": "equation",
|
| 563 |
+
"text": "\n$$\n\\mathbf {B} ^ {v} = \\operatorname {c n n} \\left(\\mathbf {R} ^ {v}\\right),\n$$\n",
|
| 564 |
+
"text_format": "latex",
|
| 565 |
+
"bbox": [
|
| 566 |
+
220,
|
| 567 |
+
146,
|
| 568 |
+
325,
|
| 569 |
+
161
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 4
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "text",
|
| 575 |
+
"text": "where fe denotes Fourier Embedder [17], cte denotes CLIP Text Encoder [20], and cnn denotes a pre-trained CNN network [16]. Based on the existing extensive pre-trained diffusion model [14,22], we inject the foreground and background embedding $\\mathcal{F}$ and $\\mathcal{B}$ by adding two trainable self-attention layer to the UNet architecture. The calculation formula is shown below:",
|
| 576 |
+
"bbox": [
|
| 577 |
+
75,
|
| 578 |
+
172,
|
| 579 |
+
470,
|
| 580 |
+
277
|
| 581 |
+
],
|
| 582 |
+
"page_idx": 4
|
| 583 |
+
},
|
| 584 |
+
{
|
| 585 |
+
"type": "equation",
|
| 586 |
+
"text": "\n$$\n\\mathbf {G} _ {t} ^ {v} = \\mathbf {Z} _ {t} ^ {v} + \\alpha \\cdot \\operatorname {s a} ([ \\mathbf {Z} _ {t} ^ {v}, \\mathbf {F} ^ {v} ]) + \\beta \\cdot \\operatorname {s a} ([ \\mathbf {Z} _ {t} ^ {v}, \\mathbf {B} ^ {v} ]), \\tag {2}\n$$\n",
|
| 587 |
+
"text_format": "latex",
|
| 588 |
+
"bbox": [
|
| 589 |
+
99,
|
| 590 |
+
291,
|
| 591 |
+
470,
|
| 592 |
+
309
|
| 593 |
+
],
|
| 594 |
+
"page_idx": 4
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "text",
|
| 598 |
+
"text": "where $[\\cdot]$ denotes the concatenation operation. sa denotes the self-attention block. $\\alpha$ and $\\beta$ are trainable parameters initialized to 0. The introduced self-attention layer can effectively find the mapping relationship between visual latent features and various camera condition embedding. Therefore, the controller can utilize spatial hints to output a set of latent features $\\mathcal{G}_t = \\{\\mathbf{G}_t^v\\in \\mathbb{R}^{H\\times W\\times C}\\}$ , which geometry is consistent with the corresponding camera foreground and background conditions.",
|
| 599 |
+
"bbox": [
|
| 600 |
+
75,
|
| 601 |
+
320,
|
| 602 |
+
470,
|
| 603 |
+
455
|
| 604 |
+
],
|
| 605 |
+
"page_idx": 4
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"type": "text",
|
| 609 |
+
"text": "3.2. Coordinator",
|
| 610 |
+
"text_level": 1,
|
| 611 |
+
"bbox": [
|
| 612 |
+
76,
|
| 613 |
+
465,
|
| 614 |
+
209,
|
| 615 |
+
481
|
| 616 |
+
],
|
| 617 |
+
"page_idx": 4
|
| 618 |
+
},
|
| 619 |
+
{
|
| 620 |
+
"type": "text",
|
| 621 |
+
"text": "Taking $\\mathcal{G}_t = \\{\\mathbf{G}_t^v\\}$ as input, we employ the Coordinator to enhance the consistency of different views and make them look like somebody capture them from the same scene.",
|
| 622 |
+
"bbox": [
|
| 623 |
+
75,
|
| 624 |
+
489,
|
| 625 |
+
468,
|
| 626 |
+
535
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 4
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "text",
|
| 632 |
+
"text": "Specifically, we propose a novel cross-view-cross-element attention mechanism that enables context interaction between the different views. Sufficient context interaction makes the semantics of visual elements in various perspectives uniform. According to the characteristics of ring-shaped cameras, each camera has the highest correlation with its adjacent cameras. Therefore, we carefully design each view to interact only with the contextual information of adjacent views, reducing the demand for computing resources. In particular, we let all camera views learn the context of their adjacent views in parallel. The context comprises two layers of information: global level and local level. The global level represents the entire latent feature of the previous perspective, while the local level refers to the specific element feature. Taking adjacent view $v$ and $u$ as an example, the learning context is $\\mathbf{k}$ , $\\mathbf{v}$ as shown below for view $v$ :",
|
| 633 |
+
"bbox": [
|
| 634 |
+
75,
|
| 635 |
+
536,
|
| 636 |
+
470,
|
| 637 |
+
777
|
| 638 |
+
],
|
| 639 |
+
"page_idx": 4
|
| 640 |
+
},
|
| 641 |
+
{
|
| 642 |
+
"type": "equation",
|
| 643 |
+
"text": "\n$$\n\\mathbf {q} = \\operatorname {l i n e a r} \\left(\\mathbf {G} _ {t} ^ {v}\\right),\n$$\n",
|
| 644 |
+
"text_format": "latex",
|
| 645 |
+
"bbox": [
|
| 646 |
+
215,
|
| 647 |
+
789,
|
| 648 |
+
328,
|
| 649 |
+
806
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 4
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "equation",
|
| 655 |
+
"text": "\n$$\n\\mathbf {k} = \\operatorname {l i n e a r} \\left(\\left[ \\mathbf {G} _ {t} ^ {u}, \\mathbf {F} ^ {u}, \\mathbf {B} ^ {u} \\right]\\right), \\tag {3}\n$$\n",
|
| 656 |
+
"text_format": "latex",
|
| 657 |
+
"bbox": [
|
| 658 |
+
184,
|
| 659 |
+
815,
|
| 660 |
+
468,
|
| 661 |
+
832
|
| 662 |
+
],
|
| 663 |
+
"page_idx": 4
|
| 664 |
+
},
|
| 665 |
+
{
|
| 666 |
+
"type": "equation",
|
| 667 |
+
"text": "\n$$\n\\mathbf {v} = \\operatorname {l i n e a r} \\left(\\left[ \\mathbf {G} _ {t} ^ {u}, \\mathbf {F} ^ {u}, \\mathbf {B} ^ {u} \\right]\\right),\n$$\n",
|
| 668 |
+
"text_format": "latex",
|
| 669 |
+
"bbox": [
|
| 670 |
+
184,
|
| 671 |
+
842,
|
| 672 |
+
359,
|
| 673 |
+
859
|
| 674 |
+
],
|
| 675 |
+
"page_idx": 4
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
"type": "text",
|
| 679 |
+
"text": "where linear modules above are independent of each other, and we set $u = 1$ , $v = V$ or $v = u + 1$ to enforce $u$ and",
|
| 680 |
+
"bbox": [
|
| 681 |
+
75,
|
| 682 |
+
869,
|
| 683 |
+
470,
|
| 684 |
+
898
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 4
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "text",
|
| 690 |
+
"text": "$v$ as the adjacent views. The context interaction process of adjacent views is formulated as:",
|
| 691 |
+
"bbox": [
|
| 692 |
+
500,
|
| 693 |
+
90,
|
| 694 |
+
893,
|
| 695 |
+
121
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 4
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "equation",
|
| 701 |
+
"text": "\n$$\n\\mathbf {A} _ {t} ^ {v} = \\mathbf {G} _ {t} ^ {v} + \\mathbf {v} ^ {\\top} \\cdot \\operatorname {s o f t m a x} (\\mathbf {k} \\cdot \\mathbf {q} ^ {\\top}). \\tag {4}\n$$\n",
|
| 702 |
+
"text_format": "latex",
|
| 703 |
+
"bbox": [
|
| 704 |
+
578,
|
| 705 |
+
128,
|
| 706 |
+
893,
|
| 707 |
+
147
|
| 708 |
+
],
|
| 709 |
+
"page_idx": 4
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"type": "text",
|
| 713 |
+
"text": "We perform the above operation on all views in parallel, resulting in a set of street-view latent feature $\\mathcal{A}_t = \\{\\mathbf{A}_t^v\\in$ $\\mathbb{R}^{H\\times W\\times C}\\}$ . By interacting between the global and local levels, global information, such as environmental conditions and weather, and local information, such as object appearance and identity, can be transmitted from the previous to the following perspective. Thus the cross-view-cross-element attention effectively improves the appearance consistency of the street-view images.",
|
| 714 |
+
"bbox": [
|
| 715 |
+
496,
|
| 716 |
+
154,
|
| 717 |
+
893,
|
| 718 |
+
290
|
| 719 |
+
],
|
| 720 |
+
"page_idx": 4
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "text",
|
| 724 |
+
"text": "3.3. Training Objective",
|
| 725 |
+
"text_level": 1,
|
| 726 |
+
"bbox": [
|
| 727 |
+
500,
|
| 728 |
+
297,
|
| 729 |
+
679,
|
| 730 |
+
314
|
| 731 |
+
],
|
| 732 |
+
"page_idx": 4
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"type": "text",
|
| 736 |
+
"text": "By repeatedly applying the above UNet $\\epsilon_{\\theta}$ in the latent space, we can obtain street-view images with gradually reduced noise. By adding $t$ step noise $\\epsilon$ to the original clear images $\\mathcal{Z}_0$ , we obtain a noisy version $\\mathcal{Z}_t$ of the images. We train $\\epsilon_{\\theta}$ to predict the noise we added. Following the training objective of the original LDM [22], we finetune the pretrained diffusion model [14] to adapt to new conditions $c$ (e.g. BEV sketch and text prompt):",
|
| 737 |
+
"bbox": [
|
| 738 |
+
496,
|
| 739 |
+
321,
|
| 740 |
+
893,
|
| 741 |
+
443
|
| 742 |
+
],
|
| 743 |
+
"page_idx": 4
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"type": "equation",
|
| 747 |
+
"text": "\n$$\n\\min _ {\\theta} \\mathcal {L} = \\mathbb {E} _ {\\mathcal {Z} _ {0}, \\epsilon \\sim \\mathcal {N} (\\mathbf {0}, \\mathbf {I}), t, c} \\left[ \\| \\epsilon - \\epsilon_ {\\theta} \\left(\\mathcal {Z} _ {t}, t, c\\right) \\| _ {2} ^ {2} \\right], \\tag {5}\n$$\n",
|
| 748 |
+
"text_format": "latex",
|
| 749 |
+
"bbox": [
|
| 750 |
+
537,
|
| 751 |
+
450,
|
| 752 |
+
892,
|
| 753 |
+
472
|
| 754 |
+
],
|
| 755 |
+
"page_idx": 4
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"text": "where time step $t$ is uniformly sampled from $[1, T]$ , and $\\theta$ refers to the newly added layer in the UNet. We only train the newly introduced layer while freezing the layers of the original diffusion model. This approach reduces memory consumption and avoids knowledge forgetting and model collapsing.",
|
| 760 |
+
"bbox": [
|
| 761 |
+
496,
|
| 762 |
+
479,
|
| 763 |
+
893,
|
| 764 |
+
571
|
| 765 |
+
],
|
| 766 |
+
"page_idx": 4
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"text": "4. Evaluation Metrics for Content Controlling",
|
| 771 |
+
"text_level": 1,
|
| 772 |
+
"bbox": [
|
| 773 |
+
498,
|
| 774 |
+
584,
|
| 775 |
+
888,
|
| 776 |
+
601
|
| 777 |
+
],
|
| 778 |
+
"page_idx": 4
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"type": "text",
|
| 782 |
+
"text": "Recent street view image generation works [24] only evaluate the generation quality based on scene-level metrics such as FID, vehicle mIoU, and road mIoU. However, we found that using only these metrics cannot evaluate the true generation ability of the generative network. As shown in the Figure 4, the reported qualitative and quantitative results simultaneously indicate that several sets of generated street view images with similar FID scores have vastly different fine-grained control abilities over foreground and background. Therefore in this section, we introduce the evaluation metrics in our experiment for measuring the controlling power of the generative network.",
|
| 783 |
+
"bbox": [
|
| 784 |
+
496,
|
| 785 |
+
609,
|
| 786 |
+
893,
|
| 787 |
+
790
|
| 788 |
+
],
|
| 789 |
+
"page_idx": 4
|
| 790 |
+
},
|
| 791 |
+
{
|
| 792 |
+
"type": "text",
|
| 793 |
+
"text": "Evaluation Metrics for Realism, Diversity, and Consistency Given the street-view images $\\{\\mathbf{X}_v' \\mid v = 1, \\dots, V\\}$ from $V$ perspectives output by the image decoder of CLIP [20], we use the Frechet Inception Distance (FID) [10] to measure the realism and diversity of the generated street-view images. We compute the FID between the latent features of the generated and real images, which capture the",
|
| 794 |
+
"bbox": [
|
| 795 |
+
496,
|
| 796 |
+
794,
|
| 797 |
+
893,
|
| 798 |
+
900
|
| 799 |
+
],
|
| 800 |
+
"page_idx": 4
|
| 801 |
+
},
|
| 802 |
+
{
|
| 803 |
+
"type": "page_number",
|
| 804 |
+
"text": "5",
|
| 805 |
+
"bbox": [
|
| 806 |
+
480,
|
| 807 |
+
924,
|
| 808 |
+
488,
|
| 809 |
+
936
|
| 810 |
+
],
|
| 811 |
+
"page_idx": 4
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"type": "image",
|
| 815 |
+
"img_path": "images/cc0a7eed6a1d970ed64294cc79f18f5efd307e69d222f3e3511fc7f5e7ba84aa.jpg",
|
| 816 |
+
"image_caption": [
|
| 817 |
+
"Figure 4. Comparison of detail evaluation metrics."
|
| 818 |
+
],
|
| 819 |
+
"image_footnote": [],
|
| 820 |
+
"bbox": [
|
| 821 |
+
83,
|
| 822 |
+
89,
|
| 823 |
+
467,
|
| 824 |
+
497
|
| 825 |
+
],
|
| 826 |
+
"page_idx": 5
|
| 827 |
+
},
|
| 828 |
+
{
|
| 829 |
+
"type": "text",
|
| 830 |
+
"text": "same perspective's foreground and background contents. Here, we employ the Inception-V3 network [25] to extract the latent features of the generated and real images. We compute the average FID score $S_{\\mathrm{FID}} \\in \\mathbb{R}$ as:",
|
| 831 |
+
"bbox": [
|
| 832 |
+
76,
|
| 833 |
+
541,
|
| 834 |
+
468,
|
| 835 |
+
602
|
| 836 |
+
],
|
| 837 |
+
"page_idx": 5
|
| 838 |
+
},
|
| 839 |
+
{
|
| 840 |
+
"type": "equation",
|
| 841 |
+
"text": "\n$$\nS _ {\\mathrm {F I D}} = \\frac {1}{V} \\sum_ {v = 1} ^ {V} \\operatorname {f i d} \\left(\\sigma \\left(\\mathbf {X} _ {v}\\right), \\sigma \\left(\\mathbf {X} _ {v} ^ {\\prime}\\right)\\right). \\tag {6}\n$$\n",
|
| 842 |
+
"text_format": "latex",
|
| 843 |
+
"bbox": [
|
| 844 |
+
156,
|
| 845 |
+
609,
|
| 846 |
+
468,
|
| 847 |
+
651
|
| 848 |
+
],
|
| 849 |
+
"page_idx": 5
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"type": "text",
|
| 853 |
+
"text": "$\\{\\mathbf{X}_v \\in \\mathbb{R}^{H \\times W \\times 3} \\mid v = 1, \\dots, V\\}$ are the real images. We denote $\\sigma$ as the Inception-V3 network. $\\sigma(\\mathbf{X}_v), \\sigma(\\mathbf{X}_v') \\in \\mathbb{R}^C$ are the latent features of the $v^{th}$ perspective's generated and real images. A lower FID score $S_{\\mathrm{FID}}$ means that the generated contents are more realistic and diverse.",
|
| 854 |
+
"bbox": [
|
| 855 |
+
76,
|
| 856 |
+
659,
|
| 857 |
+
468,
|
| 858 |
+
736
|
| 859 |
+
],
|
| 860 |
+
"page_idx": 5
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"type": "text",
|
| 864 |
+
"text": "To evaluate the visual consistency between the generated street-view images, we compute the CLIP score [20], based on the latent features of the overlap between the adjacent perspectives of the generated street-view images. We calculate the CLIP score $S_{\\mathrm{CLIP}} \\in \\mathbb{R}$ as:",
|
| 865 |
+
"bbox": [
|
| 866 |
+
75,
|
| 867 |
+
737,
|
| 868 |
+
468,
|
| 869 |
+
811
|
| 870 |
+
],
|
| 871 |
+
"page_idx": 5
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"type": "equation",
|
| 875 |
+
"text": "\n$$\nS _ {\\mathrm {C L I P}} = \\frac {1}{V} \\sum_ {u, v} \\operatorname {c l i p} \\left(\\psi \\left(\\mathbf {X} _ {u} ^ {\\prime}\\right), \\psi \\left(\\mathbf {X} _ {v} ^ {\\prime}\\right)\\right), \\tag {7}\n$$\n",
|
| 876 |
+
"text_format": "latex",
|
| 877 |
+
"bbox": [
|
| 878 |
+
147,
|
| 879 |
+
819,
|
| 880 |
+
468,
|
| 881 |
+
854
|
| 882 |
+
],
|
| 883 |
+
"page_idx": 5
|
| 884 |
+
},
|
| 885 |
+
{
|
| 886 |
+
"type": "equation",
|
| 887 |
+
"text": "\n$$\ns. t., u = 1, v = V \\text {o r} v = u + 1,\n$$\n",
|
| 888 |
+
"text_format": "latex",
|
| 889 |
+
"bbox": [
|
| 890 |
+
168,
|
| 891 |
+
859,
|
| 892 |
+
393,
|
| 893 |
+
875
|
| 894 |
+
],
|
| 895 |
+
"page_idx": 5
|
| 896 |
+
},
|
| 897 |
+
{
|
| 898 |
+
"type": "text",
|
| 899 |
+
"text": "where $\\mathbf{X}_u^{\\prime},\\mathbf{X}_v^{\\prime}$ are the generated street-view images of the",
|
| 900 |
+
"bbox": [
|
| 901 |
+
76,
|
| 902 |
+
885,
|
| 903 |
+
468,
|
| 904 |
+
901
|
| 905 |
+
],
|
| 906 |
+
"page_idx": 5
|
| 907 |
+
},
|
| 908 |
+
{
|
| 909 |
+
"type": "text",
|
| 910 |
+
"text": "adjacent perspectives. We denote $\\psi (\\mathbf{X}_{v - 1}^{\\prime}),\\psi (\\mathbf{X}_v^{\\prime})\\in \\mathbb{R}^{C}$ as latent features of the overlap between $\\mathbf{X}_u^\\prime ,\\mathbf{X}_v^\\prime$ . A higher CLIP score means satisfactory visual consistency.",
|
| 911 |
+
"bbox": [
|
| 912 |
+
498,
|
| 913 |
+
90,
|
| 914 |
+
890,
|
| 915 |
+
137
|
| 916 |
+
],
|
| 917 |
+
"page_idx": 5
|
| 918 |
+
},
|
| 919 |
+
{
|
| 920 |
+
"type": "text",
|
| 921 |
+
"text": "Evaluation Metrics for Foreground and Background Controlling We employ the official detection metrics of the nuScenes dataset, i.e., the mean average precision (mAP), the nuScenes detection score (NDS), and the mean average orientation error (mAOE), for measuring the foreground controlling score. We denote the scores of mAP, NDS, and mAOE as $S_{\\mathrm{AP}}$ , $S_{\\mathrm{NDS}}$ , and $S_{\\mathrm{AOE}}$ . Specifically, based on the generated street-view images $\\{\\mathbf{X}_v' \\mid v = 1,\\dots,V\\}$ , we use BEVFormer [15] trained on the nuScenes dataset to detect the foreground objects on the BEV layouts. We achieve the scores $S_{\\mathrm{AP}}$ , $S_{\\mathrm{NDS}}$ , and $S_{\\mathrm{AOE}}$ of foreground object detection by comparing the detection results with the BEV layout used for generating the street-view images. We use CVT [37] trained on the nuScenes dataset to segment the foreground on the BEV layouts and report the foreground mean intersection-over-union (fIoU) performance denoted as $S_{\\mathrm{floU}}$ . To evaluate the background controlling power of the generative network, we employ CVT to segment the background contents on the BEV layouts and report the performance in terms of the background mean intersection-over-union (bIoU) denoted as $S_{\\mathrm{bloU}}$ .",
|
| 922 |
+
"bbox": [
|
| 923 |
+
498,
|
| 924 |
+
140,
|
| 925 |
+
892,
|
| 926 |
+
455
|
| 927 |
+
],
|
| 928 |
+
"page_idx": 5
|
| 929 |
+
},
|
| 930 |
+
{
|
| 931 |
+
"type": "text",
|
| 932 |
+
"text": "We remark that higher scores of $S_{\\mathrm{AP}}$ , $S_{\\mathrm{NDS}}$ , $S_{\\mathrm{fIoU}}$ , $S_{\\mathrm{bIoU}}$ , and a lower score of $S_{\\mathrm{AOE}}$ mean a good controlling power of the generative network, which produces the foreground and background contents corresponding to the ground-truth annotations in the BEV layouts.",
|
| 933 |
+
"bbox": [
|
| 934 |
+
498,
|
| 935 |
+
458,
|
| 936 |
+
893,
|
| 937 |
+
532
|
| 938 |
+
],
|
| 939 |
+
"page_idx": 5
|
| 940 |
+
},
|
| 941 |
+
{
|
| 942 |
+
"type": "text",
|
| 943 |
+
"text": "Overall Evaluation Metric We propose a combinatorial metric to summarize the above metrics that measure the controlling power of the generative network from separate aspects. We name this combinatorial metric as the overall controlling score (OCS) denoted $S_{\\mathrm{OCS}}$ . We compute $S_{\\mathrm{OCS}}$ as:",
|
| 944 |
+
"bbox": [
|
| 945 |
+
498,
|
| 946 |
+
537,
|
| 947 |
+
890,
|
| 948 |
+
627
|
| 949 |
+
],
|
| 950 |
+
"page_idx": 5
|
| 951 |
+
},
|
| 952 |
+
{
|
| 953 |
+
"type": "equation",
|
| 954 |
+
"text": "\n$$\nS _ {\\mathrm {O C S}} = \\frac {U _ {\\mathrm {F I D}}}{S _ {\\mathrm {F I D}}} + \\frac {S _ {\\mathrm {C L I P}}}{U _ {\\mathrm {C L I P}}} + \\frac {S _ {\\mathrm {N D S}}}{U _ {\\mathrm {N D S}}} + \\frac {S _ {\\mathrm {f l o U}}}{U _ {\\mathrm {f l o U}}} + \\frac {S _ {\\mathrm {b l o U}}}{U _ {\\mathrm {b l o U}}}. \\tag {8}\n$$\n",
|
| 955 |
+
"text_format": "latex",
|
| 956 |
+
"bbox": [
|
| 957 |
+
519,
|
| 958 |
+
633,
|
| 959 |
+
890,
|
| 960 |
+
666
|
| 961 |
+
],
|
| 962 |
+
"page_idx": 5
|
| 963 |
+
},
|
| 964 |
+
{
|
| 965 |
+
"type": "text",
|
| 966 |
+
"text": "The scores $\\{S_{\\mathrm{FID}}, S_{\\mathrm{CLIP}}, S_{\\mathrm{NDS}}, S_{\\mathrm{floU}}, S_{\\mathrm{bIoU}}\\}$ are achieved by using BEVFormer to detect and CVT to segment street-view contents on the BEV layouts, according to the generated images. We define another set of reference scores $\\{U_{\\mathrm{FID}}, U_{\\mathrm{CLIP}}, U_{\\mathrm{NDS}}, U_{\\mathrm{floU}}, U_{\\mathrm{bIoU}}\\}$ , which are detection and segmentation performances based on the authentic images. A high score of $S_{\\mathrm{OCS}}$ means the entire controlling power is strong.",
|
| 967 |
+
"bbox": [
|
| 968 |
+
498,
|
| 969 |
+
672,
|
| 970 |
+
893,
|
| 971 |
+
794
|
| 972 |
+
],
|
| 973 |
+
"page_idx": 5
|
| 974 |
+
},
|
| 975 |
+
{
|
| 976 |
+
"type": "text",
|
| 977 |
+
"text": "5. Experimental Results",
|
| 978 |
+
"text_level": 1,
|
| 979 |
+
"bbox": [
|
| 980 |
+
500,
|
| 981 |
+
806,
|
| 982 |
+
705,
|
| 983 |
+
823
|
| 984 |
+
],
|
| 985 |
+
"page_idx": 5
|
| 986 |
+
},
|
| 987 |
+
{
|
| 988 |
+
"type": "text",
|
| 989 |
+
"text": "5.1. Dataset",
|
| 990 |
+
"text_level": 1,
|
| 991 |
+
"bbox": [
|
| 992 |
+
500,
|
| 993 |
+
832,
|
| 994 |
+
594,
|
| 995 |
+
845
|
| 996 |
+
],
|
| 997 |
+
"page_idx": 5
|
| 998 |
+
},
|
| 999 |
+
{
|
| 1000 |
+
"type": "text",
|
| 1001 |
+
"text": "We use the public nuScenes dataset [5] to examine the effectiveness of our method. nuScenes contains 1,000 examples of street-view scenes. There are 700/150/150 train",
|
| 1002 |
+
"bbox": [
|
| 1003 |
+
498,
|
| 1004 |
+
854,
|
| 1005 |
+
893,
|
| 1006 |
+
900
|
| 1007 |
+
],
|
| 1008 |
+
"page_idx": 5
|
| 1009 |
+
},
|
| 1010 |
+
{
|
| 1011 |
+
"type": "page_number",
|
| 1012 |
+
"text": "6",
|
| 1013 |
+
"bbox": [
|
| 1014 |
+
478,
|
| 1015 |
+
925,
|
| 1016 |
+
490,
|
| 1017 |
+
936
|
| 1018 |
+
],
|
| 1019 |
+
"page_idx": 5
|
| 1020 |
+
},
|
| 1021 |
+
{
|
| 1022 |
+
"type": "table",
|
| 1023 |
+
"img_path": "images/b2b730014d148446ea6c885ad736eb48bd4eb8b564385c87817e03382a8b3425.jpg",
|
| 1024 |
+
"table_caption": [],
|
| 1025 |
+
"table_footnote": [],
|
| 1026 |
+
"table_body": "<table><tr><td rowspan=\"3\">Method</td><td>Real & Diverse</td><td>Consistency</td><td colspan=\"4\">Foreground Control</td><td>Background Control</td><td rowspan=\"3\">\\( S_{OCS} \\uparrow \\)</td></tr><tr><td rowspan=\"2\">\\( S_{FID} \\downarrow \\)</td><td rowspan=\"2\">\\( S_{CLIP} \\uparrow \\)</td><td colspan=\"3\">Detection</td><td>Segmentation</td><td>Segmentation</td></tr><tr><td>\\( S_{AP} \\uparrow \\)</td><td>\\( S_{NDS} \\uparrow \\)</td><td>\\( S_{AOE} \\downarrow \\)</td><td>\\( S_{floU} \\uparrow \\)</td><td>\\( S_{bIoU} \\uparrow \\)</td></tr><tr><td>Reference-score</td><td>0.01</td><td>87.96</td><td>36.04</td><td>44.10</td><td>0.42</td><td>34.83</td><td>74.33</td><td>5.00</td></tr><tr><td>BEVGen [24]</td><td>25.54</td><td>-</td><td>-</td><td>-</td><td>-</td><td>5.89</td><td>50.20</td><td>-</td></tr><tr><td>LayoutDiffusion [36]</td><td>29.64</td><td>79.80</td><td>3.68</td><td>14.68</td><td>1.31</td><td>15.51</td><td>35.31</td><td>2.16</td></tr><tr><td>GLIGEN [14]</td><td>31.34</td><td>78.80</td><td>15.42</td><td>22.35</td><td>1.22</td><td>22.02</td><td>38.12</td><td>2.55</td></tr><tr><td>BEVControl</td><td>24.85(↓ 6.49)</td><td>82.70(↑ 3.9)</td><td>19.64(↑ 4.22)</td><td>28.68(↑ 6.33)</td><td>0.78(↓ 0.44)</td><td>26.80(↑ 4.78)</td><td>60.80(↑ 22.68)</td><td>3.18(↑ 0.63)</td></tr></table>",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
78,
|
| 1029 |
+
88,
|
| 1030 |
+
893,
|
| 1031 |
+
243
|
| 1032 |
+
],
|
| 1033 |
+
"page_idx": 6
|
| 1034 |
+
},
|
| 1035 |
+
{
|
| 1036 |
+
"type": "text",
|
| 1037 |
+
"text": "Table 1. We compare BEVControl with state-of-the-art methods on the validation subset of nuScenes. The results measure the controlling power of different methods. $\\downarrow / \\uparrow$ means a smaller/larger value of the metric represents a better performance.",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
75,
|
| 1040 |
+
253,
|
| 1041 |
+
892,
|
| 1042 |
+
282
|
| 1043 |
+
],
|
| 1044 |
+
"page_idx": 6
|
| 1045 |
+
},
|
| 1046 |
+
{
|
| 1047 |
+
"type": "text",
|
| 1048 |
+
"text": "ing/validation/testing examples. Each example records about 40 frames of BEV layouts. Each frame of the BEV layout is associated with six street-view RGB images, which are captured by an ego vehicle's side, front, and back cameras. We follow the convention [24] to sample 600 frames of BEV layouts from the validation set, forming a validation subset to evaluate our method.",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
75,
|
| 1051 |
+
308,
|
| 1052 |
+
470,
|
| 1053 |
+
412
|
| 1054 |
+
],
|
| 1055 |
+
"page_idx": 6
|
| 1056 |
+
},
|
| 1057 |
+
{
|
| 1058 |
+
"type": "text",
|
| 1059 |
+
"text": "The objects in each BEV layout are annotated as the foreground and background. For object detection, the foreground includes ten categories (i.e., car, bus, truck, trailer, motorcycle, bicycle, construction vehicle, pedestrian, barrier, and traffic cone), while the background is the road. For object segmentation, the categories of car, bus, truck, trailer, motorcycle, bicycle, and construction vehicle are merged into the vehicle category. Thus, each BEV layout contains the binary categories of vehicle and road.",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
75,
|
| 1062 |
+
415,
|
| 1063 |
+
470,
|
| 1064 |
+
551
|
| 1065 |
+
],
|
| 1066 |
+
"page_idx": 6
|
| 1067 |
+
},
|
| 1068 |
+
{
|
| 1069 |
+
"type": "text",
|
| 1070 |
+
"text": "5.2. Visual Element Control",
|
| 1071 |
+
"text_level": 1,
|
| 1072 |
+
"bbox": [
|
| 1073 |
+
76,
|
| 1074 |
+
566,
|
| 1075 |
+
294,
|
| 1076 |
+
580
|
| 1077 |
+
],
|
| 1078 |
+
"page_idx": 6
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "text",
|
| 1082 |
+
"text": "In Table 1, we compare BEVControl with the recent methods [14,24,36], which can also generate the street-view images based on the BEV layout. Given a BEV layout, each method generates a set of street-view images from 6 perspectives. The results in Table 1 measure the quality of the generated images and the controlling power of the compared methods on the background foreground objects. We also report the performance improvement of BEVControl relative to the GLIGEN [14] in the last row. BEVControl achieves a higher OGS than other methods (see the right-most column of Table 1). Below, we evaluate the detailed performances of controlling various visual elements.",
|
| 1083 |
+
"bbox": [
|
| 1084 |
+
75,
|
| 1085 |
+
592,
|
| 1086 |
+
470,
|
| 1087 |
+
773
|
| 1088 |
+
],
|
| 1089 |
+
"page_idx": 6
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "text",
|
| 1093 |
+
"text": "Realism and Diversity In Table 1 \"Real & Diverse\", we measure the realism and diversity of the image data generated by different methods in terms of the Frechet Inception Distance (FID). BEVControl achieves 24.85 FID, outperforming other methods. We also compare the street-view images generated differently in Figure 5 and 6, where BEVControl produces a higher image quality than the compared methods.",
|
| 1094 |
+
"bbox": [
|
| 1095 |
+
75,
|
| 1096 |
+
779,
|
| 1097 |
+
470,
|
| 1098 |
+
900
|
| 1099 |
+
],
|
| 1100 |
+
"page_idx": 6
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"type": "text",
|
| 1104 |
+
"text": "Foreground Control In Table 1 \"Foreground Control\", we examine the controlling power of different methods on the foreground objects. In this examination, we use BEVFormer [15] to detect the ten categories of foreground objects on the BEV layouts, reporting the performance in terms of mAP, NDS, and mAOE. We use CVT [37] to segment the foreground (i.e., vehicle) on the BEV layouts, whose performance is reported in terms of mIoU. In the first row of Table 1 \"Reference-score\", we report the detection performances of BEVFormer and segmentation performances of CVT on the original validation subset. These results can be regarded as the Reference-score performance of BEVFormer and CVT for measuring the controlling power of BEVControl.",
|
| 1105 |
+
"bbox": [
|
| 1106 |
+
496,
|
| 1107 |
+
308,
|
| 1108 |
+
893,
|
| 1109 |
+
505
|
| 1110 |
+
],
|
| 1111 |
+
"page_idx": 6
|
| 1112 |
+
},
|
| 1113 |
+
{
|
| 1114 |
+
"type": "text",
|
| 1115 |
+
"text": "Based on the data generated by BEVControl, BEVFormer and CVT achieve better detection and segmentation performances than other generative models. It demonstrates a more substantial controlling power of BEVControl on the foreground objects. We compare the generated foreground objects by different methods in Figure 5, where BEVControl satisfactorily yields the foreground objects according to the ground-truth annotations. Furthermore, Figure 7 demonstrates the generation capability of BEVControl for user-drawn BEV sketches of different vehicle orientations.",
|
| 1116 |
+
"bbox": [
|
| 1117 |
+
496,
|
| 1118 |
+
526,
|
| 1119 |
+
895,
|
| 1120 |
+
678
|
| 1121 |
+
],
|
| 1122 |
+
"page_idx": 6
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "text",
|
| 1126 |
+
"text": "Background Control In Table 1 \"Background Control\", we study the controlling power of different methods on the background objects. Again, we use the trained CVT, which uses the generated street-view images to segment the background (i.e., road) on the BEV layouts. We report the segmentation accuracy mIoU on the road category. We also compare the generated roads by different methods in Figure 6. Compared to the street-view images generated by other methods, those generated by BEVControl lead to a better segmentation accuracy, which means a more substantial controlling power of BEVControl on the background objects. Additionally, Figure 8 displays the generation capability of BEVControl for user-edited BEV sketches of different road traffic situations.",
|
| 1127 |
+
"bbox": [
|
| 1128 |
+
496,
|
| 1129 |
+
704,
|
| 1130 |
+
895,
|
| 1131 |
+
900
|
| 1132 |
+
],
|
| 1133 |
+
"page_idx": 6
|
| 1134 |
+
},
|
| 1135 |
+
{
|
| 1136 |
+
"type": "page_number",
|
| 1137 |
+
"text": "7",
|
| 1138 |
+
"bbox": [
|
| 1139 |
+
478,
|
| 1140 |
+
924,
|
| 1141 |
+
491,
|
| 1142 |
+
935
|
| 1143 |
+
],
|
| 1144 |
+
"page_idx": 6
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "table",
|
| 1148 |
+
"img_path": "images/d05976d9dcacbb8b1f7c6394f9fc66515044b877d367ae882438cd96ad29fc89.jpg",
|
| 1149 |
+
"table_caption": [],
|
| 1150 |
+
"table_footnote": [
|
| 1151 |
+
"Table 2. Different strategies of using the foreground and background hints for controlling the visual elements. The evaluation metrics (i.e., FID, scores of foreground and background control) reported in this table are the same to those in Table 1."
|
| 1152 |
+
],
|
| 1153 |
+
"table_body": "<table><tr><td rowspan=\"2\">Controller</td><td colspan=\"2\">FC</td><td>BC</td><td rowspan=\"2\">Socs ↑</td></tr><tr><td>SNDS ↑</td><td>SfloU ↑</td><td>SbIoU ↑</td></tr><tr><td>Reference-score</td><td>44.10</td><td>34.83</td><td>74.33</td><td>5.00</td></tr><tr><td>foreground</td><td>25.23</td><td>22.50</td><td>41.70</td><td>2.69</td></tr><tr><td>background</td><td>3.70</td><td>3.53</td><td>49.71</td><td>1.74</td></tr><tr><td>both w/o separation</td><td>26.87</td><td>23.78</td><td>52.30</td><td>2.90</td></tr><tr><td>both w/ separation</td><td>28.68</td><td>26.80</td><td>60.80</td><td>3.18</td></tr></table>",
|
| 1154 |
+
"bbox": [
|
| 1155 |
+
78,
|
| 1156 |
+
88,
|
| 1157 |
+
470,
|
| 1158 |
+
209
|
| 1159 |
+
],
|
| 1160 |
+
"page_idx": 7
|
| 1161 |
+
},
|
| 1162 |
+
{
|
| 1163 |
+
"type": "text",
|
| 1164 |
+
"text": "5.3. Ablation Study on Controller",
|
| 1165 |
+
"text_level": 1,
|
| 1166 |
+
"bbox": [
|
| 1167 |
+
76,
|
| 1168 |
+
301,
|
| 1169 |
+
338,
|
| 1170 |
+
316
|
| 1171 |
+
],
|
| 1172 |
+
"page_idx": 7
|
| 1173 |
+
},
|
| 1174 |
+
{
|
| 1175 |
+
"type": "text",
|
| 1176 |
+
"text": "The controller of BEVControl regards the bounding boxes and the road sketches as hints. The diffusion model uses these hints to generate the foreground and background objects in the street-view images. Here, we experiment with different strategies of using the foreground and background hints to examine their effect on controlling the visual elements in the generated street-view images. We report the quantitative results in Table 2.",
|
| 1177 |
+
"bbox": [
|
| 1178 |
+
75,
|
| 1179 |
+
324,
|
| 1180 |
+
468,
|
| 1181 |
+
445
|
| 1182 |
+
],
|
| 1183 |
+
"page_idx": 7
|
| 1184 |
+
},
|
| 1185 |
+
{
|
| 1186 |
+
"type": "text",
|
| 1187 |
+
"text": "First, we use the foreground or background hint only, reporting the scores of FID, foreground control (FC), and background control (BC) in the second and third rows of Table 2. Without foreground or background hint, we degrade the realism and diversity of the generated images, while the score of foreground or background control also decreases consequently. These results demonstrate the importance of using the foreground and background hints together.",
|
| 1188 |
+
"bbox": [
|
| 1189 |
+
75,
|
| 1190 |
+
445,
|
| 1191 |
+
468,
|
| 1192 |
+
566
|
| 1193 |
+
],
|
| 1194 |
+
"page_idx": 7
|
| 1195 |
+
},
|
| 1196 |
+
{
|
| 1197 |
+
"type": "text",
|
| 1198 |
+
"text": "Next, we compare various strategies for using both foreground and background hints. We evaluate an alternative method that employs the foreground and background hints without separately controlling the visual elements. We use a single attention layer to jointly embed the foreground and background hints into latent space. The controller uses the latent embedding of these hints for outputting an information-controlled feature map. The coordinator relies on the information-controlled feature map to generate the street-view images. We report the foreground or background control scores in the fourth row of Table 2. Though the scores are higher than those achieved using the foreground or background hint alone, they still lag behind the results of the entire controller in the fifth row. The complete controller uses separate network streams to enable a more focused control of the foreground and background objects.",
|
| 1199 |
+
"bbox": [
|
| 1200 |
+
75,
|
| 1201 |
+
566,
|
| 1202 |
+
470,
|
| 1203 |
+
808
|
| 1204 |
+
],
|
| 1205 |
+
"page_idx": 7
|
| 1206 |
+
},
|
| 1207 |
+
{
|
| 1208 |
+
"type": "text",
|
| 1209 |
+
"text": "5.4. Ablation Study on Coordinator",
|
| 1210 |
+
"text_level": 1,
|
| 1211 |
+
"bbox": [
|
| 1212 |
+
76,
|
| 1213 |
+
816,
|
| 1214 |
+
354,
|
| 1215 |
+
832
|
| 1216 |
+
],
|
| 1217 |
+
"page_idx": 7
|
| 1218 |
+
},
|
| 1219 |
+
{
|
| 1220 |
+
"type": "text",
|
| 1221 |
+
"text": "The coordinator utilizes the CVCE attention to enhance the visual consistency between the generated street-view images. Here, we use the street-view images from various perspectives to compute the CLIP score for measuring visual",
|
| 1222 |
+
"bbox": [
|
| 1223 |
+
75,
|
| 1224 |
+
839,
|
| 1225 |
+
468,
|
| 1226 |
+
901
|
| 1227 |
+
],
|
| 1228 |
+
"page_idx": 7
|
| 1229 |
+
},
|
| 1230 |
+
{
|
| 1231 |
+
"type": "table",
|
| 1232 |
+
"img_path": "images/2ca28337aae9bf4f2efafba03c97a328716b1198b2553271a4015fc1d5df2d01.jpg",
|
| 1233 |
+
"table_caption": [],
|
| 1234 |
+
"table_footnote": [
|
| 1235 |
+
"Table 3. Different strategies of using the coordinator for yielding the street-view images from different perspectives. We report the results as CLIP scores, which measure the visual consistency of the street-view images."
|
| 1236 |
+
],
|
| 1237 |
+
"table_body": "<table><tr><td>Coordinator</td><td>\\( S_{CLIP} \\uparrow \\)</td></tr><tr><td>Reference-score</td><td>87.96</td></tr><tr><td>w/o coordinator</td><td>79.50</td></tr><tr><td>w/ CV, w/o CE</td><td>82.30</td></tr><tr><td>w/ CVCE</td><td>82.70</td></tr></table>",
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
552,
|
| 1240 |
+
88,
|
| 1241 |
+
841,
|
| 1242 |
+
183
|
| 1243 |
+
],
|
| 1244 |
+
"page_idx": 7
|
| 1245 |
+
},
|
| 1246 |
+
{
|
| 1247 |
+
"type": "text",
|
| 1248 |
+
"text": "consistency. In Table 3, we compare the CLIP scores for the street-view images generated by different alternatives.",
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
496,
|
| 1251 |
+
277,
|
| 1252 |
+
890,
|
| 1253 |
+
306
|
| 1254 |
+
],
|
| 1255 |
+
"page_idx": 7
|
| 1256 |
+
},
|
| 1257 |
+
{
|
| 1258 |
+
"type": "text",
|
| 1259 |
+
"text": "We remove the coordinator from BEVControl, which has the controller alone. This alternative significantly degrades the visual consistency of the generated street-view images (see the CLIP score in the second row of Table 3). We improve the CLIP score by adding the coordinator with the cross-view attention but without the cross-element attention (see the third row). This result demonstrates the positive impact of cross-view attention on visual consistency.",
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
496,
|
| 1262 |
+
308,
|
| 1263 |
+
890,
|
| 1264 |
+
429
|
| 1265 |
+
],
|
| 1266 |
+
"page_idx": 7
|
| 1267 |
+
},
|
| 1268 |
+
{
|
| 1269 |
+
"type": "text",
|
| 1270 |
+
"text": "5.5. Data Augmentation for Object Detection",
|
| 1271 |
+
"text_level": 1,
|
| 1272 |
+
"bbox": [
|
| 1273 |
+
498,
|
| 1274 |
+
438,
|
| 1275 |
+
846,
|
| 1276 |
+
455
|
| 1277 |
+
],
|
| 1278 |
+
"page_idx": 7
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "text",
|
| 1282 |
+
"text": "Based on each BEV layout from the training set of the nuScenes dataset, we again employ BEVControl to generate a set of street-view images. Note that the BEV layout and the generated street-view images can be used together for augmenting the training set of nuScenes. We use the generated data for training BEVFormer for object detection on the BEV layout from the train subset. We report the performances of object detection in Table 4. Compared to the BEVFormer trained without data augmentation (see the first row), the counterpart with data augmentation yields better results (see the second row).",
|
| 1283 |
+
"bbox": [
|
| 1284 |
+
496,
|
| 1285 |
+
462,
|
| 1286 |
+
890,
|
| 1287 |
+
628
|
| 1288 |
+
],
|
| 1289 |
+
"page_idx": 7
|
| 1290 |
+
},
|
| 1291 |
+
{
|
| 1292 |
+
"type": "table",
|
| 1293 |
+
"img_path": "images/c3f817abed735ddd6600e9e5490684e4daa618f5a4707c81b4ba7ba5c18453f7.jpg",
|
| 1294 |
+
"table_caption": [],
|
| 1295 |
+
"table_footnote": [
|
| 1296 |
+
"Table 4. Application of using the generated street-view images for augmenting the training data. We report the detection performances of BEVFormer on the validation set."
|
| 1297 |
+
],
|
| 1298 |
+
"table_body": "<table><tr><td>Method</td><td>SAP↑</td><td>SNDS↑</td><td>SAOE↓</td></tr><tr><td>w/o augmentation</td><td>37.00</td><td>47.90</td><td>0.66</td></tr><tr><td>w/ augmentation</td><td>38.96</td><td>49.19</td><td>0.42</td></tr></table>",
|
| 1299 |
+
"bbox": [
|
| 1300 |
+
537,
|
| 1301 |
+
641,
|
| 1302 |
+
856,
|
| 1303 |
+
700
|
| 1304 |
+
],
|
| 1305 |
+
"page_idx": 7
|
| 1306 |
+
},
|
| 1307 |
+
{
|
| 1308 |
+
"type": "text",
|
| 1309 |
+
"text": "6. Conclusion",
|
| 1310 |
+
"text_level": 1,
|
| 1311 |
+
"bbox": [
|
| 1312 |
+
500,
|
| 1313 |
+
784,
|
| 1314 |
+
617,
|
| 1315 |
+
800
|
| 1316 |
+
],
|
| 1317 |
+
"page_idx": 7
|
| 1318 |
+
},
|
| 1319 |
+
{
|
| 1320 |
+
"type": "text",
|
| 1321 |
+
"text": "Given the BEV layout as the hint, the most advanced generative networks can synthesize the street-view images with realistic and diverse appearances, thus enriching the data for training the BEV perception model and profiting the autonomous driving. This paper advocates the significance of strengthening the controlling power of the generative",
|
| 1322 |
+
"bbox": [
|
| 1323 |
+
496,
|
| 1324 |
+
809,
|
| 1325 |
+
890,
|
| 1326 |
+
901
|
| 1327 |
+
],
|
| 1328 |
+
"page_idx": 7
|
| 1329 |
+
},
|
| 1330 |
+
{
|
| 1331 |
+
"type": "page_number",
|
| 1332 |
+
"text": "8",
|
| 1333 |
+
"bbox": [
|
| 1334 |
+
478,
|
| 1335 |
+
924,
|
| 1336 |
+
488,
|
| 1337 |
+
935
|
| 1338 |
+
],
|
| 1339 |
+
"page_idx": 7
|
| 1340 |
+
},
|
| 1341 |
+
{
|
| 1342 |
+
"type": "image",
|
| 1343 |
+
"img_path": "images/510608a762fa986295dbf2061ce7820fc02820d3d12e5165d1731bfc328472b7.jpg",
|
| 1344 |
+
"image_caption": [],
|
| 1345 |
+
"image_footnote": [],
|
| 1346 |
+
"bbox": [
|
| 1347 |
+
94,
|
| 1348 |
+
99,
|
| 1349 |
+
872,
|
| 1350 |
+
368
|
| 1351 |
+
],
|
| 1352 |
+
"page_idx": 8
|
| 1353 |
+
},
|
| 1354 |
+
{
|
| 1355 |
+
"type": "image",
|
| 1356 |
+
"img_path": "images/6980728c7014132b2b73e6bca5230f8096fed313f51d9cca242f905776427c71.jpg",
|
| 1357 |
+
"image_caption": [
|
| 1358 |
+
"Figure 5. The visualization of foreground controlling generation. Compared to other methods, ours can generate objects that correspond more closely to the bounding box sketch conditions, especially the accurate orientation."
|
| 1359 |
+
],
|
| 1360 |
+
"image_footnote": [],
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
106,
|
| 1363 |
+
378,
|
| 1364 |
+
848,
|
| 1365 |
+
654
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 8
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "text",
|
| 1371 |
+
"text": "network for BEV perception. We propose a novel generative network, BEVControl, which relies on the sketches of the BEV layout to synthesize the background and foreground elements in the street-view images. By depending on more focused hints, BEVControl enables accurate control of the background and foreground elements, whose visual consistency across multiple perspectives is maintained by the cross-view-cross-element attention. Compared to the contemporary methods, a better controlling power allows",
|
| 1372 |
+
"bbox": [
|
| 1373 |
+
75,
|
| 1374 |
+
756,
|
| 1375 |
+
472,
|
| 1376 |
+
893
|
| 1377 |
+
],
|
| 1378 |
+
"page_idx": 8
|
| 1379 |
+
},
|
| 1380 |
+
{
|
| 1381 |
+
"type": "text",
|
| 1382 |
+
"text": "BEVControl to yield richer data for BEV perception.",
|
| 1383 |
+
"bbox": [
|
| 1384 |
+
500,
|
| 1385 |
+
756,
|
| 1386 |
+
849,
|
| 1387 |
+
772
|
| 1388 |
+
],
|
| 1389 |
+
"page_idx": 8
|
| 1390 |
+
},
|
| 1391 |
+
{
|
| 1392 |
+
"type": "text",
|
| 1393 |
+
"text": "In future work, we will investigate how to better control more kinds of visual elements like lighting and weather in the generated images rather than the background and foreground only. In addition to generating street-view images, we will also study how to transfer the idea of BEVControl to create more general scenes.",
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
498,
|
| 1396 |
+
810,
|
| 1397 |
+
890,
|
| 1398 |
+
900
|
| 1399 |
+
],
|
| 1400 |
+
"page_idx": 8
|
| 1401 |
+
},
|
| 1402 |
+
{
|
| 1403 |
+
"type": "page_number",
|
| 1404 |
+
"text": "9",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
478,
|
| 1407 |
+
924,
|
| 1408 |
+
491,
|
| 1409 |
+
936
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 8
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "image",
|
| 1415 |
+
"img_path": "images/a3ba3b08ea73c31a727271239cdeb896cf3715780946280edfd43ef8809d76b6.jpg",
|
| 1416 |
+
"image_caption": [
|
| 1417 |
+
"Figure 6. The visualization of background controlling generation. Compared to other methods, ours can generate street views that correspond more closely to the road sketch conditions."
|
| 1418 |
+
],
|
| 1419 |
+
"image_footnote": [],
|
| 1420 |
+
"bbox": [
|
| 1421 |
+
94,
|
| 1422 |
+
99,
|
| 1423 |
+
872,
|
| 1424 |
+
657
|
| 1425 |
+
],
|
| 1426 |
+
"page_idx": 9
|
| 1427 |
+
},
|
| 1428 |
+
{
|
| 1429 |
+
"type": "text",
|
| 1430 |
+
"text": "References",
|
| 1431 |
+
"text_level": 1,
|
| 1432 |
+
"bbox": [
|
| 1433 |
+
78,
|
| 1434 |
+
755,
|
| 1435 |
+
173,
|
| 1436 |
+
770
|
| 1437 |
+
],
|
| 1438 |
+
"page_idx": 9
|
| 1439 |
+
},
|
| 1440 |
+
{
|
| 1441 |
+
"type": "list",
|
| 1442 |
+
"sub_type": "ref_text",
|
| 1443 |
+
"list_items": [
|
| 1444 |
+
"[1] Adil Kaan Akan and Fatma Güney. Stretchbev: Stretching future instance prediction spatially and temporally. In European Conference on Computer Vision, pages 444-460. Springer, 2022. 2",
|
| 1445 |
+
"[2] Omri Avrahami, Thomas Hayes, Oran Gafni, Sonal Gupta, Yaniv Taigman, Devi Parikh, Dani Lischinski, Ohad Fried, and Xi Yin. Spatext: Spatio-textual representation for controllable image generation. In Proceedings of the IEEE/CVF"
|
| 1446 |
+
],
|
| 1447 |
+
"bbox": [
|
| 1448 |
+
84,
|
| 1449 |
+
782,
|
| 1450 |
+
470,
|
| 1451 |
+
900
|
| 1452 |
+
],
|
| 1453 |
+
"page_idx": 9
|
| 1454 |
+
},
|
| 1455 |
+
{
|
| 1456 |
+
"type": "list",
|
| 1457 |
+
"sub_type": "ref_text",
|
| 1458 |
+
"list_items": [
|
| 1459 |
+
"Conference on Computer Vision and Pattern Recognition, pages 18370-18380, 2023. 3",
|
| 1460 |
+
"[3] Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel. Multidiffusion: Fusing diffusion paths for controlled image generation. 2023. 3",
|
| 1461 |
+
"[4] Dina Bashkirova, José Lezama, Kihyuk Sohn, Kate Saenko, and Irfan Essa. Masksketch: Unpaired structure-guided masked image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1879-1889, 2023. 3"
|
| 1462 |
+
],
|
| 1463 |
+
"bbox": [
|
| 1464 |
+
508,
|
| 1465 |
+
757,
|
| 1466 |
+
893,
|
| 1467 |
+
900
|
| 1468 |
+
],
|
| 1469 |
+
"page_idx": 9
|
| 1470 |
+
},
|
| 1471 |
+
{
|
| 1472 |
+
"type": "page_number",
|
| 1473 |
+
"text": "10",
|
| 1474 |
+
"bbox": [
|
| 1475 |
+
475,
|
| 1476 |
+
924,
|
| 1477 |
+
493,
|
| 1478 |
+
936
|
| 1479 |
+
],
|
| 1480 |
+
"page_idx": 9
|
| 1481 |
+
},
|
| 1482 |
+
{
|
| 1483 |
+
"type": "image",
|
| 1484 |
+
"img_path": "images/29928e9f1c25b5caec90c5fbe3bdc175d9f6d353ee9b639c9de06334bdc8c111.jpg",
|
| 1485 |
+
"image_caption": [
|
| 1486 |
+
"Figure 7. The visualization of foreground controlling generation in various vehicle orientation."
|
| 1487 |
+
],
|
| 1488 |
+
"image_footnote": [],
|
| 1489 |
+
"bbox": [
|
| 1490 |
+
96,
|
| 1491 |
+
99,
|
| 1492 |
+
872,
|
| 1493 |
+
420
|
| 1494 |
+
],
|
| 1495 |
+
"page_idx": 10
|
| 1496 |
+
},
|
| 1497 |
+
{
|
| 1498 |
+
"type": "image",
|
| 1499 |
+
"img_path": "images/023dcb21da603e92fe8e3ed20d2f7398d63199420149f3f34b14842e9e949cf2.jpg",
|
| 1500 |
+
"image_caption": [
|
| 1501 |
+
"Figure 8. The visualization of background controlling generation in various road sketch."
|
| 1502 |
+
],
|
| 1503 |
+
"image_footnote": [],
|
| 1504 |
+
"bbox": [
|
| 1505 |
+
94,
|
| 1506 |
+
506,
|
| 1507 |
+
875,
|
| 1508 |
+
806
|
| 1509 |
+
],
|
| 1510 |
+
"page_idx": 10
|
| 1511 |
+
},
|
| 1512 |
+
{
|
| 1513 |
+
"type": "page_number",
|
| 1514 |
+
"text": "11",
|
| 1515 |
+
"bbox": [
|
| 1516 |
+
475,
|
| 1517 |
+
924,
|
| 1518 |
+
491,
|
| 1519 |
+
935
|
| 1520 |
+
],
|
| 1521 |
+
"page_idx": 10
|
| 1522 |
+
},
|
| 1523 |
+
{
|
| 1524 |
+
"type": "list",
|
| 1525 |
+
"sub_type": "ref_text",
|
| 1526 |
+
"list_items": [
|
| 1527 |
+
"[5] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 2, 6",
|
| 1528 |
+
"[6] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 2",
|
| 1529 |
+
"[7] Jiaxin Cheng, Xiao Liang, Xingjian Shi, Tong He, Tianjun Xiao, and Mu Li. Layoutdiffuse: Adapting foundational diffusion models for layout-to-image generation. arXiv preprint arXiv:2302.08908, 2023. 3",
|
| 1530 |
+
"[8] Ernie Chu, Shuo-Yen Lin, and Jun-Cheng Chen. Video controlnet: Towards temporally consistent synthetic-to-real video translation using conditional image diffusion models. arXiv preprint arXiv:2305.19193, 2023. 4",
|
| 1531 |
+
"[9] Cusuh Ham, James Hays, Jingwan Lu, Krishna Kumar Singh, Zhifei Zhang, and Tobias Hinz. Modulating pretrained diffusion models for multimodal image synthesis. arXiv preprint arXiv:2302.12764, 2023. 3",
|
| 1532 |
+
"[10] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5",
|
| 1533 |
+
"[11] Anthony Hu, Zak Murez, Nikhil Mohan, Sofia Dudas, Jeffrey Hawke, Vijay Badrinarayanan, Roberto Cipolla, and Alex Kendall. Fiery: Future instance prediction in bird's-eye view from surround monocular cameras. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15273-15282, 2021. 2",
|
| 1534 |
+
"[12] Lianghua Huang, Di Chen, Yu Liu, Yujun Shen, Deli Zhao, and Jingren Zhou.Composer: Creative and controllable image synthesis with composable conditions. arXiv preprint arXiv:2302.09778, 2023. 3",
|
| 1535 |
+
"[13] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 4",
|
| 1536 |
+
"[14] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 3, 5, 7",
|
| 1537 |
+
"[15] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In European conference on computer vision, pages 1-18. Springer, 2022. 2, 6, 7",
|
| 1538 |
+
"[16] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11976-11986, 2022. 5"
|
| 1539 |
+
],
|
| 1540 |
+
"bbox": [
|
| 1541 |
+
78,
|
| 1542 |
+
90,
|
| 1543 |
+
470,
|
| 1544 |
+
898
|
| 1545 |
+
],
|
| 1546 |
+
"page_idx": 11
|
| 1547 |
+
},
|
| 1548 |
+
{
|
| 1549 |
+
"type": "list",
|
| 1550 |
+
"sub_type": "ref_text",
|
| 1551 |
+
"list_items": [
|
| 1552 |
+
"[17] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 5",
|
| 1553 |
+
"[18] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023. 3",
|
| 1554 |
+
"[19] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 3",
|
| 1555 |
+
"[20] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 5, 6",
|
| 1556 |
+
"[21] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3",
|
| 1557 |
+
"[22] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 3, 4, 5",
|
| 1558 |
+
"[23] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 3",
|
| 1559 |
+
"[24] Alexander Swerdlow, Runsheng Xu, and Bolei Zhou. Street-view image generation from a bird's-eye view layout. arXiv preprint arXiv:2301.04634, 2023. 2, 4, 5, 7",
|
| 1560 |
+
"[25] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2818-2826, 2016. 6",
|
| 1561 |
+
"[26] Shitao Tang, Fuyang Zhang, Jiacheng Chen, Peng Wang, and Yasutaka Furukawa. Mvdiffusion: Enabling holistic multi-view image generation with correspondence-aware diffusion. arXiv preprint arXiv:2307.01097, 2023. 3",
|
| 1562 |
+
"[27] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 2",
|
| 1563 |
+
"[28] Andrey Voynov, Kfir Aberman, and Daniel Cohen-Or. Sketch-guided text-to-image diffusion models. arXiv preprint arXiv:2211.13752, 2022. 3",
|
| 1564 |
+
"[29] Weilun Wang, Jianmin Bao, Wengang Zhou, Dongdong Chen, Dong Chen, Lu Yuan, and Houqiang Li. Semantic image synthesis via diffusion models. arXiv preprint arXiv:2207.00050, 2022.3"
|
| 1565 |
+
],
|
| 1566 |
+
"bbox": [
|
| 1567 |
+
501,
|
| 1568 |
+
90,
|
| 1569 |
+
893,
|
| 1570 |
+
898
|
| 1571 |
+
],
|
| 1572 |
+
"page_idx": 11
|
| 1573 |
+
},
|
| 1574 |
+
{
|
| 1575 |
+
"type": "page_number",
|
| 1576 |
+
"text": "12",
|
| 1577 |
+
"bbox": [
|
| 1578 |
+
475,
|
| 1579 |
+
924,
|
| 1580 |
+
493,
|
| 1581 |
+
936
|
| 1582 |
+
],
|
| 1583 |
+
"page_idx": 11
|
| 1584 |
+
},
|
| 1585 |
+
{
|
| 1586 |
+
"type": "list",
|
| 1587 |
+
"sub_type": "ref_text",
|
| 1588 |
+
"list_items": [
|
| 1589 |
+
"[30] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Weixian Lei, Yuchao Gu, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. arXiv preprint arXiv:2212.11565, 2022.4",
|
| 1590 |
+
"[31] Han Xue, Zhiwu Huang, Qianru Sun, Li Song, and Wenjun Zhang. Freestyle layout-to-image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14256-14266, 2023. 3",
|
| 1591 |
+
"[32] Zhengyuan Yang, Jianfeng Wang, Zhe Gan, Linjie Li, Kevin Lin, Chenfei Wu, Nan Duan, Zicheng Liu, Ce Liu, Michael Zeng, et al. Reco: Region-controlled text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14246-14255, 2023. 3",
|
| 1592 |
+
"[33] Lvmin Zhang and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. arXiv preprint arXiv:2302.05543, 2023. 3",
|
| 1593 |
+
"[34] Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. Controlvideo: Training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077, 2023. 4",
|
| 1594 |
+
"[35] Yunpeng Zhang, Zheng Zhu, Wenzhao Zheng, Junjie Huang, Guan Huang, Jie Zhou, and Jiwen Lu. **Reverse: Unified perception and prediction in birds-eye-view for vision-centric autonomous driving. arXiv preprint arXiv:2205.09743, 2022.2**",
|
| 1595 |
+
"[36] Guangcong Zheng, Xianpan Zhou, Xuewei Li, Zhongang Qi, Ying Shan, and Xi Li. Layoutdiffusion: Controllable diffusion model for layout-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22490-22499, 2023. 3, 7",
|
| 1596 |
+
"[37] Brady Zhou and Philipp Krahenbuhl. Cross-view transformers for real-time map-view semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13760-13769, 2022. 2, 6, 7"
|
| 1597 |
+
],
|
| 1598 |
+
"bbox": [
|
| 1599 |
+
78,
|
| 1600 |
+
90,
|
| 1601 |
+
468,
|
| 1602 |
+
585
|
| 1603 |
+
],
|
| 1604 |
+
"page_idx": 12
|
| 1605 |
+
},
|
| 1606 |
+
{
|
| 1607 |
+
"type": "page_number",
|
| 1608 |
+
"text": "13",
|
| 1609 |
+
"bbox": [
|
| 1610 |
+
477,
|
| 1611 |
+
924,
|
| 1612 |
+
493,
|
| 1613 |
+
936
|
| 1614 |
+
],
|
| 1615 |
+
"page_idx": 12
|
| 1616 |
+
}
|
| 1617 |
+
]
|
data/2023/2308_01xxx/2308.01661/7263487d-f861-4e28-bbc9-bb783bebeb71_model.json
ADDED
|
@@ -0,0 +1,2107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.264,
|
| 8 |
+
0.061,
|
| 9 |
+
0.707
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2308.01661v4 [cs.CV] 23 Sep 2023"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.165,
|
| 18 |
+
0.131,
|
| 19 |
+
0.806,
|
| 20 |
+
0.177
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "BEVControl: Accurately Controlling Street-view Elements with Multi-perspective Consistency via BEV Sketch Layout"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.12,
|
| 29 |
+
0.204,
|
| 30 |
+
0.91,
|
| 31 |
+
0.223
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Kairui Yang\\(^{1*}\\) Enhui Ma\\(^{1*}\\) Jibin Peng\\(^{1}\\) Qing Guo\\(^{2}\\) Jianping Wu\\(^{3}\\) Di Lin\\(^{1\\dagger}\\) Kaicheng Yu\\(^{4}\\)"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.089,
|
| 40 |
+
0.224,
|
| 41 |
+
0.938,
|
| 42 |
+
0.24
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "\\(^{1}\\)Tianjin University \\(^{2}\\)IHPC and CFAR, Agency for Science, Technology and Research, Singapore \\(^{3}\\)Tsinghua University \\(^{4}\\)Westlake University"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "image",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.099,
|
| 51 |
+
0.274,
|
| 52 |
+
0.873,
|
| 53 |
+
0.409
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": null
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "image_caption",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.404,
|
| 62 |
+
0.41,
|
| 63 |
+
0.603,
|
| 64 |
+
0.422
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "(a) Vanilla generative method"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "image",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.102,
|
| 73 |
+
0.424,
|
| 74 |
+
0.877,
|
| 75 |
+
0.546
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": null
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "image_caption",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.418,
|
| 84 |
+
0.547,
|
| 85 |
+
0.596,
|
| 86 |
+
0.561
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "(b) Two-stage BEVControl"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "image_caption",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.075,
|
| 95 |
+
0.565,
|
| 96 |
+
0.894,
|
| 97 |
+
0.677
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Figure 1. Comparison between different generative networks hinted by Bird's Eye View (BEV) segmentation layout v.s. sketch layout. (a) Vanilla generative pipeline feeds a semantic segmentation style input into a generative network and outputs reasonable multi-view images. However, we discover that it fails to generate accurate object-level details. For example, we show a common failure of a state-of-the-art algorithm where the generated vehicle has reversed heading compared to the target 3D bounding box. In addition, editing the semantic segmentation style input is a hard task and requires non-trivial human effort. (b) To this end, we propose a two-stage method that provides finer background and foreground geometry control, dubbed BEVControl. It supports sketch style input that enables fast and easy editing. In addition, our BEVControl decouples visual consistency into two sub-goals: achieving geometry consistency between street and bird's-eye views through the Controller; and achieving appearance consistency between street views through the Coordinator."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "title",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.235,
|
| 106 |
+
0.69,
|
| 107 |
+
0.312,
|
| 108 |
+
0.705
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Abstract"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.076,
|
| 117 |
+
0.71,
|
| 118 |
+
0.471,
|
| 119 |
+
0.86
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "Using synthesized images to boost the performance of perception models is a long-standing research challenge in computer vision. It becomes more eminent in visual-centric autonomous driving systems with multi-view cameras as some long-tail scenarios can never be collected. Guided by the BEV segmentation layouts, the existing generative networks seem to synthesize photo-realistic street-view images when evaluated solely on scene-level metrics. However, once zoom-in, they usually fail to produce accurate foreground and background details such as heading. To this end, we pro"
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.498,
|
| 128 |
+
0.691,
|
| 129 |
+
0.895,
|
| 130 |
+
0.887
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "pose a two-stage generative method, dubbed BEVControl, that can generate accurate foreground and background contents. In contrast to segmentation-like input, it also supports sketch style input, which is more flexible for humans to edit. In addition, we propose a comprehensive multi-level evaluation protocol to fairly compare the quality of the generated scene, foreground object, and background geometry. Our extensive experiments show that our BEVControl surpasses the state-of-the-art method, BEVGen, by a significant margin, from 5.89 to 26.80 on foreground segmentation mIoU. In addition, we show that using images generated by BEVControl to train the downstream perception model, it achieves on average 1.29 improvement in NDS score."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "page_footnote",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.08,
|
| 139 |
+
0.876,
|
| 140 |
+
0.173,
|
| 141 |
+
0.887
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "*Co-first authors."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "page_footnote",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.082,
|
| 150 |
+
0.888,
|
| 151 |
+
0.205,
|
| 152 |
+
0.9
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "†Corresponding author."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "list",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.08,
|
| 161 |
+
0.876,
|
| 162 |
+
0.205,
|
| 163 |
+
0.9
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": null
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "page_number",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.481,
|
| 172 |
+
0.925,
|
| 173 |
+
0.49,
|
| 174 |
+
0.936
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "1"
|
| 178 |
+
}
|
| 179 |
+
],
|
| 180 |
+
[
|
| 181 |
+
{
|
| 182 |
+
"type": "title",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.081,
|
| 185 |
+
0.091,
|
| 186 |
+
0.208,
|
| 187 |
+
0.106
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "1. Introduction"
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.08,
|
| 196 |
+
0.116,
|
| 197 |
+
0.47,
|
| 198 |
+
0.251
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "BEV perception for autonomous driving has become popular. It requires understanding the objects in the streets captured from multiple cameras' views, where the things should correspond to the positions from the bird's-eye perspective. The street and bird's-eye views allow the autonomous driving car to broadly sense the objects, thus advancing the progress on an array of downstream applications (e.g., street-view object recognition [15, 37] and traffic flow prediction [1, 11, 35])."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.08,
|
| 207 |
+
0.252,
|
| 208 |
+
0.47,
|
| 209 |
+
0.462
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "In today's age of deep learning, reliable BEV perception heavily relies on deep networks trained on many street-view images and the corresponding BEV segmentation layouts, to enable the autonomous car's self-control. To achieve large-scale data for BEV perception, someone may employ autonomous vehicles to travel around the city while recording the street-view images by multiple cameras and mapping the objects to the BEV segmentation layout. Undoubtedly, this solution reduces the human effort for data collection. Yet, autonomous cars without perfect self-control may give rise to traffic congestion or even fatal accident. Moreover, someone must annotate objects across the street and bird's-eye views at an expensive cost. Extra effort is needed to double-check the consistency of annotation across various views."
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "text",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.08,
|
| 218 |
+
0.464,
|
| 219 |
+
0.47,
|
| 220 |
+
0.659
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "Rather than laboriously collecting street-view images from the natural environment and annotating multi-view photos, there are many works [24] resort to the fast-growing family of generative networks [6, 27] for creating new street-view images with a realistic style, which augment the training data for BEV perception. As illustrated in Figure 1(a), these methods feed the BEV layout into the generative network. The BEV segmentation layout provides the semantic categories and spatial distribution of the objects in the street for the generative network, thus controlling the content of the generated images. Even with an identical BEV layout, the generative network can randomly associate diverse appearances to the objects already appearing in the street."
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.08,
|
| 229 |
+
0.66,
|
| 230 |
+
0.47,
|
| 231 |
+
0.901
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "In spite of the success of the generative networks, the current methods consider less about two critical issues when generating street-view images based on the BEV segmentation layouts. First, the BEV segmentation layout can be analogy to the panoptic segmentation map, where the background stuff and foreground objects unanimously have the pixel-wise annotations in details. It is inconvenient to edit the details of the BEV segmentation layout, thus disallowing many layouts with diversity to be produced for further enriching the street-view data. Second, the existing methods generally focus on improving the visual consistency between various street views and the geometric correspondence between the street and bird's-eye views. Nevertheless, only reasonable visual consistency and geometric correspondence are inadequate for the data augmentation, which also requires a diversity of visual elements (e.g., road layout, lane line,"
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.503,
|
| 240 |
+
0.092,
|
| 241 |
+
0.893,
|
| 242 |
+
0.152
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "and vehicle position/heading) in the street-view images to enhance the generalization power of the BEV perception models. For this purpose, the generative network should accurately control the visual elements to achieve data diversity."
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.504,
|
| 251 |
+
0.153,
|
| 252 |
+
0.893,
|
| 253 |
+
0.363
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "This paper proposes BEVControl, which has a strong power for controlling the visual elements of the generated street-view images based on the BEV sketch layout. We illustrate the architecture of BEVControl in Figure 1(b). BEVControl has the controller and coordinator. The controller relies on the sketches of the background (e.g., road layout and lane line) and foreground elements (e.g., vehicle and pedestrian), which are easier to be edited than the pixel-wise annotations on the segmentation layout, to control the appearances and geometric properties of these two kinds of elements in the generated street-view images separately. The coordinator attends to the underlying correlation between the background and foreground elements, whose visual consistency across different views is preserved."
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.504,
|
| 262 |
+
0.364,
|
| 263 |
+
0.893,
|
| 264 |
+
0.62
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "The controller regards the background and foreground elements' sketches as hints. Here, the sketch and bounding boxes mainly represent the geometric shapes of the background and foreground elements. They are mapped from the identical BEV sketch layout, thus preserving the geometric correspondence between the elements across the street and bird's-eye views. With the hints attending to the background and foreground elements respectively, the controller employs the diffusion model to compute the latent feature maps of the street-view images, which represent various perspectives captured by multiple vehicle cameras. We feed these street-view feature maps to the coordinator. The coordinator uses a novel cross-view-cross-element attention mechanism to comprehensively model the context of visual elements in different views. It uses the context to enhance the visual consistency between the visual elements from multiple street perspectives, eventually producing street-view images."
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.504,
|
| 273 |
+
0.621,
|
| 274 |
+
0.893,
|
| 275 |
+
0.755
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "We extract the BEV sketches from the public dataset, nuScenes [5], to drive BEVControl to generate the street-view images for the classical object detection task. In contrast to the current methods that primarily mind the usefulness of the generated data for improving the performances on down-stream tasks, we extensively evaluate the controlling power of BEVControl, which helps to yield richer training data and achieve state-of-the-art object detection performance on nuScenes. We brief our contributions below:"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.522,
|
| 284 |
+
0.763,
|
| 285 |
+
0.891,
|
| 286 |
+
0.793
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "- We use the cost-effective BEV sketch layouts to easier produce a large amount of street-view images."
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.522,
|
| 295 |
+
0.801,
|
| 296 |
+
0.892,
|
| 297 |
+
0.845
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "- We propose the sketch-based BEVControl, which has a strong control of the background and foreground elements in the generated street-view images."
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.522,
|
| 306 |
+
0.856,
|
| 307 |
+
0.892,
|
| 308 |
+
0.899
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "- BEVControl remarkably augments the training dataset, which helps to achieve state-of-the-art object detection results on nuScenes."
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "list",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.522,
|
| 317 |
+
0.763,
|
| 318 |
+
0.892,
|
| 319 |
+
0.899
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": null
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "page_number",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.481,
|
| 328 |
+
0.926,
|
| 329 |
+
0.49,
|
| 330 |
+
0.936
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "2"
|
| 334 |
+
}
|
| 335 |
+
],
|
| 336 |
+
[
|
| 337 |
+
{
|
| 338 |
+
"type": "image",
|
| 339 |
+
"bbox": [
|
| 340 |
+
0.087,
|
| 341 |
+
0.087,
|
| 342 |
+
0.891,
|
| 343 |
+
0.272
|
| 344 |
+
],
|
| 345 |
+
"angle": 0,
|
| 346 |
+
"content": null
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "image",
|
| 350 |
+
"bbox": [
|
| 351 |
+
0.084,
|
| 352 |
+
0.274,
|
| 353 |
+
0.885,
|
| 354 |
+
0.427
|
| 355 |
+
],
|
| 356 |
+
"angle": 0,
|
| 357 |
+
"content": null
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "image_caption",
|
| 361 |
+
"bbox": [
|
| 362 |
+
0.076,
|
| 363 |
+
0.445,
|
| 364 |
+
0.896,
|
| 365 |
+
0.543
|
| 366 |
+
],
|
| 367 |
+
"angle": 0,
|
| 368 |
+
"content": "Figure 2. (a) Overview of BEVControl. It takes inputs as an edit-friendly BEV sketch \\( S \\), multi-view noisy images \\( \\mathcal{Z}_t \\) and text prompt, generating multi-view images \\( \\mathcal{Z}_0 \\). BEVControl is a UNet structure generative network composed of a sequence of modules. Each module has two elements, controller and coordinator. Each controller takes input from BEV sketch features extracted from the projection module. See Fig. 3 for more details. Text features are encoded cross-attention as in [22]. (b) Details of Controller. A controller module takes in the foreground and background location information of the camera views sketch in a self-attention manner and outputs the geometry-consistent street view features \\( \\mathcal{G}_t \\) concerning the BEV sketch \\( S \\). (c) Details of Coordinator. A coordinator module leverages a novel cross-view-cross-element attention mechanism that enables context interaction across views, outputting the appearance-consistent street view features \\( \\mathcal{A}_t \\)."
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "title",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.077,
|
| 374 |
+
0.586,
|
| 375 |
+
0.22,
|
| 376 |
+
0.602
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "2. Related Work"
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.076,
|
| 385 |
+
0.618,
|
| 386 |
+
0.471,
|
| 387 |
+
0.695
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "The literature on image generation is vast [22, 26]. We mainly survey the approaches to the conditional generation of images with visual consistency. These approaches are closely relevant to our work because they also leverage various types of image information to control the image contents."
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "text",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.076,
|
| 396 |
+
0.705,
|
| 397 |
+
0.473,
|
| 398 |
+
0.902
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "Image Generation via Multi-modal Information The recent progress in image generation is primarily attributed to the generative networks pre-trained on large-scale image data. Amidst a broad range of generative networks, the family of diffusion models [19, 21-23] lead a fashion of using multi-modal information for generating the image contents. The latent diffusion model [22] is a framework for producing images based on text. Note that the text-based information roughly specifies the image contents, disallowing a fine-grained control of the image contents. To address the above problem, the multi-modal image information like layout images [7, 14, 32, 36], semantic segmentation maps [2, 3, 9, 18, 29, 31], object sketches [4, 12, 18, 28, 33],"
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.499,
|
| 407 |
+
0.587,
|
| 408 |
+
0.892,
|
| 409 |
+
0.618
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "and depth images [12, 33] have been involved for hinting the image generation."
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.498,
|
| 418 |
+
0.622,
|
| 419 |
+
0.895,
|
| 420 |
+
0.803
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "Generally, the above methods concentrate on generating a single image, where the image contents' semantic categories are aligned with the hints. This paper considers a more complex setting where the street-view image contents of multiple perspectives are generated. In addition to the semantic categories, we should accurately control the geometric properties of the generated street-view images. This goal is non-trivial, especially when the geometric patterns of the foreground and background contents are diverse. To achieve this goal, we resort to the appropriate modalities for controlling the foreground and background contents, thus enhancing the controlling power of the generative network."
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.498,
|
| 429 |
+
0.81,
|
| 430 |
+
0.895,
|
| 431 |
+
0.901
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "Multi-view Image Generation with Visual Consistency The visual consistency is a natural property of the authentic images of multiple views. Similarly, we should preserve the visual consistency across multi-view images generated by the deep network. For this purpose, MVD-iffusion [26] uses the cross-view attention mechanism to"
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "page_number",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.48,
|
| 440 |
+
0.925,
|
| 441 |
+
0.491,
|
| 442 |
+
0.937
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": "3"
|
| 446 |
+
}
|
| 447 |
+
],
|
| 448 |
+
[
|
| 449 |
+
{
|
| 450 |
+
"type": "text",
|
| 451 |
+
"bbox": [
|
| 452 |
+
0.076,
|
| 453 |
+
0.092,
|
| 454 |
+
0.473,
|
| 455 |
+
0.227
|
| 456 |
+
],
|
| 457 |
+
"angle": 0,
|
| 458 |
+
"content": "create panoramic images from text, maintaining the global correspondence of multi-view images. The video generation methods [8, 13, 30, 34] use the temporal cross-frame attention to preserve the visual consistency across distinct views of image contents at different moments. BEVGen [24] is a contemporary work that generates multi-view images of the street based on the BEV segmentation layout. It employs an auto-regressive transformer with cross-view attention to maintain visual consistency across multi-view images."
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"bbox": [
|
| 463 |
+
0.076,
|
| 464 |
+
0.228,
|
| 465 |
+
0.474,
|
| 466 |
+
0.381
|
| 467 |
+
],
|
| 468 |
+
"angle": 0,
|
| 469 |
+
"content": "The above methods usually work well when the global appearances of multi-view images coincide. But they are less effective for preserving the multi-view consistency when more accurate control of the individual contents (e.g., the orientations of different cars) is desired. This is because the independent operations of content control easily lead to inconsistency across other contents in the same venue. In contrast to the existing methods, we propose cross-view cross-object attention, which remarkably augments the visual consistency of the generated multi-view images."
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "title",
|
| 473 |
+
"bbox": [
|
| 474 |
+
0.077,
|
| 475 |
+
0.392,
|
| 476 |
+
0.21,
|
| 477 |
+
0.408
|
| 478 |
+
],
|
| 479 |
+
"angle": 0,
|
| 480 |
+
"content": "3. BEVControl"
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "text",
|
| 484 |
+
"bbox": [
|
| 485 |
+
0.076,
|
| 486 |
+
0.417,
|
| 487 |
+
0.473,
|
| 488 |
+
0.522
|
| 489 |
+
],
|
| 490 |
+
"angle": 0,
|
| 491 |
+
"content": "We illustrate the overall architecture of BEVControl in Figure 2. Following the LDM [22], BEVControl is a classic UNet structure consisting of an encoder and a decoder. They are composed of three modules stacked multiple times: controller, coordinator, and text cross-attention. We process all image features in the latent space, so the image features below specifically refer to those in the latent space."
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "text",
|
| 495 |
+
"bbox": [
|
| 496 |
+
0.076,
|
| 497 |
+
0.524,
|
| 498 |
+
0.474,
|
| 499 |
+
0.749
|
| 500 |
+
],
|
| 501 |
+
"angle": 0,
|
| 502 |
+
"content": "At first, BEVControl takes the edit-friendly BEV sketch \\(S \\in \\mathbb{R}^{K \\times K \\times 5}\\), text description, and street-view noisy images \\(\\mathcal{Z}_t = \\{\\mathbf{Z}_t^v \\in \\mathbb{R}^{H \\times W \\times C} \\mid v = 1, \\dots, V\\}\\) as input. Here, \\(V\\) denotes the number of perspective views. All sets denoted by \\(\\{\\cdot\\}\\) represent \\(V\\) viewpoints in the method below to foster better readability. \\(S\\) is an editable canvas, which supports editing the objects within a \\(160 \\times 160\\)-meter range around the ego car. The five channels of \\(S\\) represent the background sketch (road line), pixel coordinates of the box's center point, text label, and heading of foreground objects, respectively. In training, \\(\\mathcal{Z}_t\\) is a noisy version of street-view authentic images \\(\\mathcal{Z}_0\\) by forward diffusion process of [22]. In inference, \\(\\mathcal{Z}_t\\) is street-view noise sampled from \\(\\mathcal{N}(0, \\mathbf{I})\\). \\(H, W\\), and \\(C\\) expressly represent the spatial resolutions and channels of latent features."
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "text",
|
| 506 |
+
"bbox": [
|
| 507 |
+
0.076,
|
| 508 |
+
0.75,
|
| 509 |
+
0.474,
|
| 510 |
+
0.902
|
| 511 |
+
],
|
| 512 |
+
"angle": 0,
|
| 513 |
+
"content": "BEVControl first projects the BEV sketch \\(\\mathcal{S}\\) onto the 2D camera space as shown in Figure 3, computing a set of camera foreground conditions \\(\\mathcal{M} = \\{\\mathbf{b}^v,\\mathbf{l}^v,\\mathbf{h}^v\\}\\) and background conditions \\(\\mathcal{R} = \\{\\mathbf{R}^v\\}\\) of all view, which details see Sec. 3.1. Then we encode camera foreground and background conditions into a set of camera foreground and background embedding \\(\\mathcal{F} = \\{\\mathbf{F}^v\\in \\mathbb{R}^{N\\times C}\\}\\) and \\(\\mathcal{B} = \\{\\mathbf{B}^v\\in \\mathbb{R}^{(H\\times W)\\times C}\\}\\), where \\(N\\) denotes the number of bounding boxes in each view. Through the Controller, each perspective can obtain semantic control in"
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "image",
|
| 517 |
+
"bbox": [
|
| 518 |
+
0.503,
|
| 519 |
+
0.089,
|
| 520 |
+
0.891,
|
| 521 |
+
0.328
|
| 522 |
+
],
|
| 523 |
+
"angle": 0,
|
| 524 |
+
"content": null
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "image_caption",
|
| 528 |
+
"bbox": [
|
| 529 |
+
0.52,
|
| 530 |
+
0.345,
|
| 531 |
+
0.871,
|
| 532 |
+
0.361
|
| 533 |
+
],
|
| 534 |
+
"angle": 0,
|
| 535 |
+
"content": "Figure 3. The camera projection process from BEV sketch."
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "text",
|
| 539 |
+
"bbox": [
|
| 540 |
+
0.498,
|
| 541 |
+
0.375,
|
| 542 |
+
0.895,
|
| 543 |
+
0.512
|
| 544 |
+
],
|
| 545 |
+
"angle": 0,
|
| 546 |
+
"content": "formation from the foreground and the background embedding of the corresponding camera view. This process results in the generation of geometry-consistent street-view latent features \\(\\mathcal{G}_t = \\{\\mathbf{G}_t^v\\in \\mathbb{R}^{H\\times W\\times C}\\}\\). Next, the geometry-consistent features \\(\\mathcal{G}_t\\) are fed into Coordinator. The Coordinator employs a novel cross-view-cross-element attention mechanism to enhance adjacent views' consistency, yielding the appearance-consistent street-view latent features \\(\\mathcal{A}_t = \\{\\mathbf{A}_t^v\\in \\mathbb{R}^{H\\times W\\times C}\\}\\)."
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "text",
|
| 550 |
+
"bbox": [
|
| 551 |
+
0.498,
|
| 552 |
+
0.513,
|
| 553 |
+
0.896,
|
| 554 |
+
0.65
|
| 555 |
+
],
|
| 556 |
+
"angle": 0,
|
| 557 |
+
"content": "Then BEVControl employs the cross-attention mechanism of the diffusion model to handle the text prompt, allowing us to control the generated images' environmental factors (e.g., weather and lighting conditions). Then BEVControl repeats the execution of this UNet formed by stacking these three blocks \\( T \\) times. Eventually, the output is the generated street-view images \\( \\mathcal{Z}_0 = \\{\\mathbf{Z}_0^v \\in \\mathbb{R}^{H \\times W \\times C}\\} \\), which are geometry-consistent, appearance-consistent and caption-aligned."
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "title",
|
| 561 |
+
"bbox": [
|
| 562 |
+
0.5,
|
| 563 |
+
0.662,
|
| 564 |
+
0.618,
|
| 565 |
+
0.677
|
| 566 |
+
],
|
| 567 |
+
"angle": 0,
|
| 568 |
+
"content": "3.1. Controller"
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"type": "text",
|
| 572 |
+
"bbox": [
|
| 573 |
+
0.498,
|
| 574 |
+
0.687,
|
| 575 |
+
0.893,
|
| 576 |
+
0.761
|
| 577 |
+
],
|
| 578 |
+
"angle": 0,
|
| 579 |
+
"content": "Based on the internal and external parameters of different cameras, we project the foreground and background classes of the BEV sketch \\(S\\) onto the corresponding pixel coordinate system to obtain the camera foreground conditions \\(\\mathcal{M}\\) and background conditions \\(\\mathcal{R}\\)."
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "text",
|
| 583 |
+
"bbox": [
|
| 584 |
+
0.498,
|
| 585 |
+
0.763,
|
| 586 |
+
0.895,
|
| 587 |
+
0.903
|
| 588 |
+
],
|
| 589 |
+
"angle": 0,
|
| 590 |
+
"content": "We define the camera foreground conditions as \\(\\mathcal{M} = \\{\\mathbf{b}^v,\\mathbf{l}^v,\\mathbf{h}^v\\}\\), where \\(\\mathbf{b}^v\\in [0,1]^{N\\times 4},\\mathbf{l}^v\\) and \\(\\mathbf{h}^v\\in [-180,180)^{N\\times 1}\\) denotes the normalized pixel coordinates of the upper left and lower right corners, text label and heading degree of \\(N\\) boxes in the current perspective. The camera background conditions \\(\\mathcal{R} = \\{\\mathbf{R}^v\\in \\mathbb{R}^{H\\times W\\times 3}\\}\\), which are spatially aligned with the authentic camera images, representing the trend of the road. Then we extract the camera foreground embedding \\(\\mathcal{F} = \\{\\mathbf{F}^v\\in \\mathbb{R}^{N\\times C}\\}\\) and background"
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "page_number",
|
| 594 |
+
"bbox": [
|
| 595 |
+
0.48,
|
| 596 |
+
0.925,
|
| 597 |
+
0.492,
|
| 598 |
+
0.937
|
| 599 |
+
],
|
| 600 |
+
"angle": 0,
|
| 601 |
+
"content": "4"
|
| 602 |
+
}
|
| 603 |
+
],
|
| 604 |
+
[
|
| 605 |
+
{
|
| 606 |
+
"type": "text",
|
| 607 |
+
"bbox": [
|
| 608 |
+
0.077,
|
| 609 |
+
0.09,
|
| 610 |
+
0.393,
|
| 611 |
+
0.108
|
| 612 |
+
],
|
| 613 |
+
"angle": 0,
|
| 614 |
+
"content": "embedding \\(\\mathcal{B} = \\{\\mathbf{B}^v\\in \\mathbb{R}^{(H\\times W)\\times C}\\}\\) as below:"
|
| 615 |
+
},
|
| 616 |
+
{
|
| 617 |
+
"type": "equation",
|
| 618 |
+
"bbox": [
|
| 619 |
+
0.139,
|
| 620 |
+
0.119,
|
| 621 |
+
0.406,
|
| 622 |
+
0.136
|
| 623 |
+
],
|
| 624 |
+
"angle": 0,
|
| 625 |
+
"content": "\\[\n\\mathbf {F} ^ {v} = \\operatorname {l i n e a r} (\\operatorname {f e} (\\mathbf {b} ^ {v}) + \\operatorname {c t e} (\\mathbf {l} ^ {v}) + \\operatorname {f e} (\\mathbf {h} ^ {v})),\n\\]"
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"type": "text",
|
| 629 |
+
"bbox": [
|
| 630 |
+
0.448,
|
| 631 |
+
0.133,
|
| 632 |
+
0.469,
|
| 633 |
+
0.147
|
| 634 |
+
],
|
| 635 |
+
"angle": 0,
|
| 636 |
+
"content": "(1)"
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "equation",
|
| 640 |
+
"bbox": [
|
| 641 |
+
0.221,
|
| 642 |
+
0.147,
|
| 643 |
+
0.326,
|
| 644 |
+
0.162
|
| 645 |
+
],
|
| 646 |
+
"angle": 0,
|
| 647 |
+
"content": "\\[\n\\mathbf {B} ^ {v} = \\operatorname {c n n} \\left(\\mathbf {R} ^ {v}\\right),\n\\]"
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"type": "text",
|
| 651 |
+
"bbox": [
|
| 652 |
+
0.076,
|
| 653 |
+
0.173,
|
| 654 |
+
0.471,
|
| 655 |
+
0.279
|
| 656 |
+
],
|
| 657 |
+
"angle": 0,
|
| 658 |
+
"content": "where fe denotes Fourier Embedder [17], cte denotes CLIP Text Encoder [20], and cnn denotes a pre-trained CNN network [16]. Based on the existing extensive pre-trained diffusion model [14,22], we inject the foreground and background embedding \\(\\mathcal{F}\\) and \\(\\mathcal{B}\\) by adding two trainable self-attention layer to the UNet architecture. The calculation formula is shown below:"
|
| 659 |
+
},
|
| 660 |
+
{
|
| 661 |
+
"type": "equation",
|
| 662 |
+
"bbox": [
|
| 663 |
+
0.1,
|
| 664 |
+
0.292,
|
| 665 |
+
0.471,
|
| 666 |
+
0.31
|
| 667 |
+
],
|
| 668 |
+
"angle": 0,
|
| 669 |
+
"content": "\\[\n\\mathbf {G} _ {t} ^ {v} = \\mathbf {Z} _ {t} ^ {v} + \\alpha \\cdot \\operatorname {s a} ([ \\mathbf {Z} _ {t} ^ {v}, \\mathbf {F} ^ {v} ]) + \\beta \\cdot \\operatorname {s a} ([ \\mathbf {Z} _ {t} ^ {v}, \\mathbf {B} ^ {v} ]), \\tag {2}\n\\]"
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"type": "text",
|
| 673 |
+
"bbox": [
|
| 674 |
+
0.076,
|
| 675 |
+
0.321,
|
| 676 |
+
0.471,
|
| 677 |
+
0.457
|
| 678 |
+
],
|
| 679 |
+
"angle": 0,
|
| 680 |
+
"content": "where \\([\\cdot]\\) denotes the concatenation operation. sa denotes the self-attention block. \\(\\alpha\\) and \\(\\beta\\) are trainable parameters initialized to 0. The introduced self-attention layer can effectively find the mapping relationship between visual latent features and various camera condition embedding. Therefore, the controller can utilize spatial hints to output a set of latent features \\(\\mathcal{G}_t = \\{\\mathbf{G}_t^v\\in \\mathbb{R}^{H\\times W\\times C}\\}\\), which geometry is consistent with the corresponding camera foreground and background conditions."
|
| 681 |
+
},
|
| 682 |
+
{
|
| 683 |
+
"type": "title",
|
| 684 |
+
"bbox": [
|
| 685 |
+
0.077,
|
| 686 |
+
0.467,
|
| 687 |
+
0.21,
|
| 688 |
+
0.482
|
| 689 |
+
],
|
| 690 |
+
"angle": 0,
|
| 691 |
+
"content": "3.2. Coordinator"
|
| 692 |
+
},
|
| 693 |
+
{
|
| 694 |
+
"type": "text",
|
| 695 |
+
"bbox": [
|
| 696 |
+
0.076,
|
| 697 |
+
0.491,
|
| 698 |
+
0.47,
|
| 699 |
+
0.536
|
| 700 |
+
],
|
| 701 |
+
"angle": 0,
|
| 702 |
+
"content": "Taking \\(\\mathcal{G}_t = \\{\\mathbf{G}_t^v\\}\\) as input, we employ the Coordinator to enhance the consistency of different views and make them look like somebody capture them from the same scene."
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"type": "text",
|
| 706 |
+
"bbox": [
|
| 707 |
+
0.076,
|
| 708 |
+
0.537,
|
| 709 |
+
0.471,
|
| 710 |
+
0.779
|
| 711 |
+
],
|
| 712 |
+
"angle": 0,
|
| 713 |
+
"content": "Specifically, we propose a novel cross-view-cross-element attention mechanism that enables context interaction between the different views. Sufficient context interaction makes the semantics of visual elements in various perspectives uniform. According to the characteristics of ring-shaped cameras, each camera has the highest correlation with its adjacent cameras. Therefore, we carefully design each view to interact only with the contextual information of adjacent views, reducing the demand for computing resources. In particular, we let all camera views learn the context of their adjacent views in parallel. The context comprises two layers of information: global level and local level. The global level represents the entire latent feature of the previous perspective, while the local level refers to the specific element feature. Taking adjacent view \\( v \\) and \\( u \\) as an example, the learning context is \\( \\mathbf{k} \\), \\( \\mathbf{v} \\) as shown below for view \\( v \\):"
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "equation",
|
| 717 |
+
"bbox": [
|
| 718 |
+
0.217,
|
| 719 |
+
0.79,
|
| 720 |
+
0.329,
|
| 721 |
+
0.807
|
| 722 |
+
],
|
| 723 |
+
"angle": 0,
|
| 724 |
+
"content": "\\[\n\\mathbf {q} = \\operatorname {l i n e a r} \\left(\\mathbf {G} _ {t} ^ {v}\\right),\n\\]"
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "equation",
|
| 728 |
+
"bbox": [
|
| 729 |
+
0.186,
|
| 730 |
+
0.816,
|
| 731 |
+
0.47,
|
| 732 |
+
0.833
|
| 733 |
+
],
|
| 734 |
+
"angle": 0,
|
| 735 |
+
"content": "\\[\n\\mathbf {k} = \\operatorname {l i n e a r} \\left(\\left[ \\mathbf {G} _ {t} ^ {u}, \\mathbf {F} ^ {u}, \\mathbf {B} ^ {u} \\right]\\right), \\tag {3}\n\\]"
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "equation",
|
| 739 |
+
"bbox": [
|
| 740 |
+
0.186,
|
| 741 |
+
0.843,
|
| 742 |
+
0.36,
|
| 743 |
+
0.86
|
| 744 |
+
],
|
| 745 |
+
"angle": 0,
|
| 746 |
+
"content": "\\[\n\\mathbf {v} = \\operatorname {l i n e a r} \\left(\\left[ \\mathbf {G} _ {t} ^ {u}, \\mathbf {F} ^ {u}, \\mathbf {B} ^ {u} \\right]\\right),\n\\]"
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "text",
|
| 750 |
+
"bbox": [
|
| 751 |
+
0.076,
|
| 752 |
+
0.871,
|
| 753 |
+
0.471,
|
| 754 |
+
0.9
|
| 755 |
+
],
|
| 756 |
+
"angle": 0,
|
| 757 |
+
"content": "where linear modules above are independent of each other, and we set \\( u = 1 \\), \\( v = V \\) or \\( v = u + 1 \\) to enforce \\( u \\) and"
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "text",
|
| 761 |
+
"bbox": [
|
| 762 |
+
0.5,
|
| 763 |
+
0.092,
|
| 764 |
+
0.894,
|
| 765 |
+
0.122
|
| 766 |
+
],
|
| 767 |
+
"angle": 0,
|
| 768 |
+
"content": "\\(v\\) as the adjacent views. The context interaction process of adjacent views is formulated as:"
|
| 769 |
+
},
|
| 770 |
+
{
|
| 771 |
+
"type": "equation",
|
| 772 |
+
"bbox": [
|
| 773 |
+
0.58,
|
| 774 |
+
0.129,
|
| 775 |
+
0.894,
|
| 776 |
+
0.148
|
| 777 |
+
],
|
| 778 |
+
"angle": 0,
|
| 779 |
+
"content": "\\[\n\\mathbf {A} _ {t} ^ {v} = \\mathbf {G} _ {t} ^ {v} + \\mathbf {v} ^ {\\top} \\cdot \\operatorname {s o f t m a x} (\\mathbf {k} \\cdot \\mathbf {q} ^ {\\top}). \\tag {4}\n\\]"
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"type": "text",
|
| 783 |
+
"bbox": [
|
| 784 |
+
0.498,
|
| 785 |
+
0.155,
|
| 786 |
+
0.895,
|
| 787 |
+
0.291
|
| 788 |
+
],
|
| 789 |
+
"angle": 0,
|
| 790 |
+
"content": "We perform the above operation on all views in parallel, resulting in a set of street-view latent feature \\(\\mathcal{A}_t = \\{\\mathbf{A}_t^v\\in\\) \\(\\mathbb{R}^{H\\times W\\times C}\\}\\). By interacting between the global and local levels, global information, such as environmental conditions and weather, and local information, such as object appearance and identity, can be transmitted from the previous to the following perspective. Thus the cross-view-cross-element attention effectively improves the appearance consistency of the street-view images."
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "title",
|
| 794 |
+
"bbox": [
|
| 795 |
+
0.5,
|
| 796 |
+
0.299,
|
| 797 |
+
0.681,
|
| 798 |
+
0.315
|
| 799 |
+
],
|
| 800 |
+
"angle": 0,
|
| 801 |
+
"content": "3.3. Training Objective"
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "text",
|
| 805 |
+
"bbox": [
|
| 806 |
+
0.498,
|
| 807 |
+
0.322,
|
| 808 |
+
0.894,
|
| 809 |
+
0.444
|
| 810 |
+
],
|
| 811 |
+
"angle": 0,
|
| 812 |
+
"content": "By repeatedly applying the above UNet \\(\\epsilon_{\\theta}\\) in the latent space, we can obtain street-view images with gradually reduced noise. By adding \\(t\\) step noise \\(\\epsilon\\) to the original clear images \\(\\mathcal{Z}_0\\), we obtain a noisy version \\(\\mathcal{Z}_t\\) of the images. We train \\(\\epsilon_{\\theta}\\) to predict the noise we added. Following the training objective of the original LDM [22], we finetune the pretrained diffusion model [14] to adapt to new conditions \\(c\\) (e.g. BEV sketch and text prompt):"
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "equation",
|
| 816 |
+
"bbox": [
|
| 817 |
+
0.539,
|
| 818 |
+
0.451,
|
| 819 |
+
0.893,
|
| 820 |
+
0.473
|
| 821 |
+
],
|
| 822 |
+
"angle": 0,
|
| 823 |
+
"content": "\\[\n\\min _ {\\theta} \\mathcal {L} = \\mathbb {E} _ {\\mathcal {Z} _ {0}, \\epsilon \\sim \\mathcal {N} (\\mathbf {0}, \\mathbf {I}), t, c} \\left[ \\| \\epsilon - \\epsilon_ {\\theta} \\left(\\mathcal {Z} _ {t}, t, c\\right) \\| _ {2} ^ {2} \\right], \\tag {5}\n\\]"
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "text",
|
| 827 |
+
"bbox": [
|
| 828 |
+
0.498,
|
| 829 |
+
0.481,
|
| 830 |
+
0.894,
|
| 831 |
+
0.572
|
| 832 |
+
],
|
| 833 |
+
"angle": 0,
|
| 834 |
+
"content": "where time step \\( t \\) is uniformly sampled from \\( [1, T] \\), and \\( \\theta \\) refers to the newly added layer in the UNet. We only train the newly introduced layer while freezing the layers of the original diffusion model. This approach reduces memory consumption and avoids knowledge forgetting and model collapsing."
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "title",
|
| 838 |
+
"bbox": [
|
| 839 |
+
0.499,
|
| 840 |
+
0.585,
|
| 841 |
+
0.889,
|
| 842 |
+
0.602
|
| 843 |
+
],
|
| 844 |
+
"angle": 0,
|
| 845 |
+
"content": "4. Evaluation Metrics for Content Controlling"
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "text",
|
| 849 |
+
"bbox": [
|
| 850 |
+
0.498,
|
| 851 |
+
0.61,
|
| 852 |
+
0.895,
|
| 853 |
+
0.791
|
| 854 |
+
],
|
| 855 |
+
"angle": 0,
|
| 856 |
+
"content": "Recent street view image generation works [24] only evaluate the generation quality based on scene-level metrics such as FID, vehicle mIoU, and road mIoU. However, we found that using only these metrics cannot evaluate the true generation ability of the generative network. As shown in the Figure 4, the reported qualitative and quantitative results simultaneously indicate that several sets of generated street view images with similar FID scores have vastly different fine-grained control abilities over foreground and background. Therefore in this section, we introduce the evaluation metrics in our experiment for measuring the controlling power of the generative network."
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "text",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.498,
|
| 862 |
+
0.795,
|
| 863 |
+
0.895,
|
| 864 |
+
0.901
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": "Evaluation Metrics for Realism, Diversity, and Consistency Given the street-view images \\(\\{\\mathbf{X}_v' \\mid v = 1, \\dots, V\\}\\) from \\(V\\) perspectives output by the image decoder of CLIP [20], we use the Frechet Inception Distance (FID) [10] to measure the realism and diversity of the generated street-view images. We compute the FID between the latent features of the generated and real images, which capture the"
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "page_number",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.481,
|
| 873 |
+
0.925,
|
| 874 |
+
0.49,
|
| 875 |
+
0.937
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": "5"
|
| 879 |
+
}
|
| 880 |
+
],
|
| 881 |
+
[
|
| 882 |
+
{
|
| 883 |
+
"type": "image",
|
| 884 |
+
"bbox": [
|
| 885 |
+
0.084,
|
| 886 |
+
0.09,
|
| 887 |
+
0.468,
|
| 888 |
+
0.498
|
| 889 |
+
],
|
| 890 |
+
"angle": 0,
|
| 891 |
+
"content": null
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"type": "image_caption",
|
| 895 |
+
"bbox": [
|
| 896 |
+
0.123,
|
| 897 |
+
0.517,
|
| 898 |
+
0.424,
|
| 899 |
+
0.53
|
| 900 |
+
],
|
| 901 |
+
"angle": 0,
|
| 902 |
+
"content": "Figure 4. Comparison of detail evaluation metrics."
|
| 903 |
+
},
|
| 904 |
+
{
|
| 905 |
+
"type": "text",
|
| 906 |
+
"bbox": [
|
| 907 |
+
0.077,
|
| 908 |
+
0.542,
|
| 909 |
+
0.47,
|
| 910 |
+
0.603
|
| 911 |
+
],
|
| 912 |
+
"angle": 0,
|
| 913 |
+
"content": "same perspective's foreground and background contents. Here, we employ the Inception-V3 network [25] to extract the latent features of the generated and real images. We compute the average FID score \\( S_{\\mathrm{FID}} \\in \\mathbb{R} \\) as:"
|
| 914 |
+
},
|
| 915 |
+
{
|
| 916 |
+
"type": "equation",
|
| 917 |
+
"bbox": [
|
| 918 |
+
0.157,
|
| 919 |
+
0.611,
|
| 920 |
+
0.47,
|
| 921 |
+
0.652
|
| 922 |
+
],
|
| 923 |
+
"angle": 0,
|
| 924 |
+
"content": "\\[\nS _ {\\mathrm {F I D}} = \\frac {1}{V} \\sum_ {v = 1} ^ {V} \\operatorname {f i d} \\left(\\sigma \\left(\\mathbf {X} _ {v}\\right), \\sigma \\left(\\mathbf {X} _ {v} ^ {\\prime}\\right)\\right). \\tag {6}\n\\]"
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"type": "text",
|
| 928 |
+
"bbox": [
|
| 929 |
+
0.077,
|
| 930 |
+
0.66,
|
| 931 |
+
0.469,
|
| 932 |
+
0.737
|
| 933 |
+
],
|
| 934 |
+
"angle": 0,
|
| 935 |
+
"content": "\\(\\{\\mathbf{X}_v \\in \\mathbb{R}^{H \\times W \\times 3} \\mid v = 1, \\dots, V\\}\\) are the real images. We denote \\(\\sigma\\) as the Inception-V3 network. \\(\\sigma(\\mathbf{X}_v), \\sigma(\\mathbf{X}_v') \\in \\mathbb{R}^C\\) are the latent features of the \\(v^{th}\\) perspective's generated and real images. A lower FID score \\(S_{\\mathrm{FID}}\\) means that the generated contents are more realistic and diverse."
|
| 936 |
+
},
|
| 937 |
+
{
|
| 938 |
+
"type": "text",
|
| 939 |
+
"bbox": [
|
| 940 |
+
0.076,
|
| 941 |
+
0.738,
|
| 942 |
+
0.47,
|
| 943 |
+
0.813
|
| 944 |
+
],
|
| 945 |
+
"angle": 0,
|
| 946 |
+
"content": "To evaluate the visual consistency between the generated street-view images, we compute the CLIP score [20], based on the latent features of the overlap between the adjacent perspectives of the generated street-view images. We calculate the CLIP score \\( S_{\\mathrm{CLIP}} \\in \\mathbb{R} \\) as:"
|
| 947 |
+
},
|
| 948 |
+
{
|
| 949 |
+
"type": "equation",
|
| 950 |
+
"bbox": [
|
| 951 |
+
0.148,
|
| 952 |
+
0.82,
|
| 953 |
+
0.47,
|
| 954 |
+
0.856
|
| 955 |
+
],
|
| 956 |
+
"angle": 0,
|
| 957 |
+
"content": "\\[\nS _ {\\mathrm {C L I P}} = \\frac {1}{V} \\sum_ {u, v} \\operatorname {c l i p} \\left(\\psi \\left(\\mathbf {X} _ {u} ^ {\\prime}\\right), \\psi \\left(\\mathbf {X} _ {v} ^ {\\prime}\\right)\\right), \\tag {7}\n\\]"
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"type": "equation",
|
| 961 |
+
"bbox": [
|
| 962 |
+
0.169,
|
| 963 |
+
0.861,
|
| 964 |
+
0.395,
|
| 965 |
+
0.876
|
| 966 |
+
],
|
| 967 |
+
"angle": 0,
|
| 968 |
+
"content": "\\[\ns. t., u = 1, v = V \\text {o r} v = u + 1,\n\\]"
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "text",
|
| 972 |
+
"bbox": [
|
| 973 |
+
0.077,
|
| 974 |
+
0.886,
|
| 975 |
+
0.469,
|
| 976 |
+
0.902
|
| 977 |
+
],
|
| 978 |
+
"angle": 0,
|
| 979 |
+
"content": "where \\(\\mathbf{X}_u^{\\prime},\\mathbf{X}_v^{\\prime}\\) are the generated street-view images of the"
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "text",
|
| 983 |
+
"bbox": [
|
| 984 |
+
0.499,
|
| 985 |
+
0.091,
|
| 986 |
+
0.892,
|
| 987 |
+
0.138
|
| 988 |
+
],
|
| 989 |
+
"angle": 0,
|
| 990 |
+
"content": "adjacent perspectives. We denote \\(\\psi (\\mathbf{X}_{v - 1}^{\\prime}),\\psi (\\mathbf{X}_v^{\\prime})\\in \\mathbb{R}^{C}\\) as latent features of the overlap between \\(\\mathbf{X}_u^\\prime ,\\mathbf{X}_v^\\prime\\) . A higher CLIP score means satisfactory visual consistency."
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"type": "text",
|
| 994 |
+
"bbox": [
|
| 995 |
+
0.499,
|
| 996 |
+
0.141,
|
| 997 |
+
0.893,
|
| 998 |
+
0.457
|
| 999 |
+
],
|
| 1000 |
+
"angle": 0,
|
| 1001 |
+
"content": "Evaluation Metrics for Foreground and Background Controlling We employ the official detection metrics of the nuScenes dataset, i.e., the mean average precision (mAP), the nuScenes detection score (NDS), and the mean average orientation error (mAOE), for measuring the foreground controlling score. We denote the scores of mAP, NDS, and mAOE as \\( S_{\\mathrm{AP}} \\), \\( S_{\\mathrm{NDS}} \\), and \\( S_{\\mathrm{AOE}} \\). Specifically, based on the generated street-view images \\( \\{\\mathbf{X}_v' \\mid v = 1,\\dots,V\\} \\), we use BEVFormer [15] trained on the nuScenes dataset to detect the foreground objects on the BEV layouts. We achieve the scores \\( S_{\\mathrm{AP}} \\), \\( S_{\\mathrm{NDS}} \\), and \\( S_{\\mathrm{AOE}} \\) of foreground object detection by comparing the detection results with the BEV layout used for generating the street-view images. We use CVT [37] trained on the nuScenes dataset to segment the foreground on the BEV layouts and report the foreground mean intersection-over-union (fIoU) performance denoted as \\( S_{\\mathrm{floU}} \\). To evaluate the background controlling power of the generative network, we employ CVT to segment the background contents on the BEV layouts and report the performance in terms of the background mean intersection-over-union (bIoU) denoted as \\( S_{\\mathrm{bloU}} \\)."
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"type": "text",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
0.499,
|
| 1007 |
+
0.459,
|
| 1008 |
+
0.894,
|
| 1009 |
+
0.534
|
| 1010 |
+
],
|
| 1011 |
+
"angle": 0,
|
| 1012 |
+
"content": "We remark that higher scores of \\( S_{\\mathrm{AP}} \\), \\( S_{\\mathrm{NDS}} \\), \\( S_{\\mathrm{fIoU}} \\), \\( S_{\\mathrm{bIoU}} \\), and a lower score of \\( S_{\\mathrm{AOE}} \\) mean a good controlling power of the generative network, which produces the foreground and background contents corresponding to the ground-truth annotations in the BEV layouts."
|
| 1013 |
+
},
|
| 1014 |
+
{
|
| 1015 |
+
"type": "text",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
0.499,
|
| 1018 |
+
0.539,
|
| 1019 |
+
0.892,
|
| 1020 |
+
0.628
|
| 1021 |
+
],
|
| 1022 |
+
"angle": 0,
|
| 1023 |
+
"content": "Overall Evaluation Metric We propose a combinatorial metric to summarize the above metrics that measure the controlling power of the generative network from separate aspects. We name this combinatorial metric as the overall controlling score (OCS) denoted \\( S_{\\mathrm{OCS}} \\). We compute \\( S_{\\mathrm{OCS}} \\) as:"
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "equation",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
0.52,
|
| 1029 |
+
0.634,
|
| 1030 |
+
0.892,
|
| 1031 |
+
0.667
|
| 1032 |
+
],
|
| 1033 |
+
"angle": 0,
|
| 1034 |
+
"content": "\\[\nS _ {\\mathrm {O C S}} = \\frac {U _ {\\mathrm {F I D}}}{S _ {\\mathrm {F I D}}} + \\frac {S _ {\\mathrm {C L I P}}}{U _ {\\mathrm {C L I P}}} + \\frac {S _ {\\mathrm {N D S}}}{U _ {\\mathrm {N D S}}} + \\frac {S _ {\\mathrm {f l o U}}}{U _ {\\mathrm {f l o U}}} + \\frac {S _ {\\mathrm {b l o U}}}{U _ {\\mathrm {b l o U}}}. \\tag {8}\n\\]"
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "text",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
0.499,
|
| 1040 |
+
0.674,
|
| 1041 |
+
0.894,
|
| 1042 |
+
0.795
|
| 1043 |
+
],
|
| 1044 |
+
"angle": 0,
|
| 1045 |
+
"content": "The scores \\(\\{S_{\\mathrm{FID}}, S_{\\mathrm{CLIP}}, S_{\\mathrm{NDS}}, S_{\\mathrm{floU}}, S_{\\mathrm{bIoU}}\\}\\) are achieved by using BEVFormer to detect and CVT to segment street-view contents on the BEV layouts, according to the generated images. We define another set of reference scores \\(\\{U_{\\mathrm{FID}}, U_{\\mathrm{CLIP}}, U_{\\mathrm{NDS}}, U_{\\mathrm{floU}}, U_{\\mathrm{bIoU}}\\}\\), which are detection and segmentation performances based on the authentic images. A high score of \\(S_{\\mathrm{OCS}}\\) means the entire controlling power is strong."
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "title",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
0.5,
|
| 1051 |
+
0.808,
|
| 1052 |
+
0.706,
|
| 1053 |
+
0.824
|
| 1054 |
+
],
|
| 1055 |
+
"angle": 0,
|
| 1056 |
+
"content": "5. Experimental Results"
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "title",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
0.5,
|
| 1062 |
+
0.833,
|
| 1063 |
+
0.596,
|
| 1064 |
+
0.847
|
| 1065 |
+
],
|
| 1066 |
+
"angle": 0,
|
| 1067 |
+
"content": "5.1. Dataset"
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "text",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
0.499,
|
| 1073 |
+
0.856,
|
| 1074 |
+
0.894,
|
| 1075 |
+
0.901
|
| 1076 |
+
],
|
| 1077 |
+
"angle": 0,
|
| 1078 |
+
"content": "We use the public nuScenes dataset [5] to examine the effectiveness of our method. nuScenes contains 1,000 examples of street-view scenes. There are 700/150/150 train"
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "page_number",
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
0.48,
|
| 1084 |
+
0.926,
|
| 1085 |
+
0.491,
|
| 1086 |
+
0.937
|
| 1087 |
+
],
|
| 1088 |
+
"angle": 0,
|
| 1089 |
+
"content": "6"
|
| 1090 |
+
}
|
| 1091 |
+
],
|
| 1092 |
+
[
|
| 1093 |
+
{
|
| 1094 |
+
"type": "table",
|
| 1095 |
+
"bbox": [
|
| 1096 |
+
0.08,
|
| 1097 |
+
0.089,
|
| 1098 |
+
0.894,
|
| 1099 |
+
0.244
|
| 1100 |
+
],
|
| 1101 |
+
"angle": 0,
|
| 1102 |
+
"content": "<table><tr><td rowspan=\"3\">Method</td><td>Real & Diverse</td><td>Consistency</td><td colspan=\"4\">Foreground Control</td><td>Background Control</td><td rowspan=\"3\">\\( S_{OCS} \\uparrow \\)</td></tr><tr><td rowspan=\"2\">\\( S_{FID} \\downarrow \\)</td><td rowspan=\"2\">\\( S_{CLIP} \\uparrow \\)</td><td colspan=\"3\">Detection</td><td>Segmentation</td><td>Segmentation</td></tr><tr><td>\\( S_{AP} \\uparrow \\)</td><td>\\( S_{NDS} \\uparrow \\)</td><td>\\( S_{AOE} \\downarrow \\)</td><td>\\( S_{floU} \\uparrow \\)</td><td>\\( S_{bIoU} \\uparrow \\)</td></tr><tr><td>Reference-score</td><td>0.01</td><td>87.96</td><td>36.04</td><td>44.10</td><td>0.42</td><td>34.83</td><td>74.33</td><td>5.00</td></tr><tr><td>BEVGen [24]</td><td>25.54</td><td>-</td><td>-</td><td>-</td><td>-</td><td>5.89</td><td>50.20</td><td>-</td></tr><tr><td>LayoutDiffusion [36]</td><td>29.64</td><td>79.80</td><td>3.68</td><td>14.68</td><td>1.31</td><td>15.51</td><td>35.31</td><td>2.16</td></tr><tr><td>GLIGEN [14]</td><td>31.34</td><td>78.80</td><td>15.42</td><td>22.35</td><td>1.22</td><td>22.02</td><td>38.12</td><td>2.55</td></tr><tr><td>BEVControl</td><td>24.85(↓ 6.49)</td><td>82.70(↑ 3.9)</td><td>19.64(↑ 4.22)</td><td>28.68(↑ 6.33)</td><td>0.78(↓ 0.44)</td><td>26.80(↑ 4.78)</td><td>60.80(↑ 22.68)</td><td>3.18(↑ 0.63)</td></tr></table>"
|
| 1103 |
+
},
|
| 1104 |
+
{
|
| 1105 |
+
"type": "table_caption",
|
| 1106 |
+
"bbox": [
|
| 1107 |
+
0.076,
|
| 1108 |
+
0.254,
|
| 1109 |
+
0.893,
|
| 1110 |
+
0.283
|
| 1111 |
+
],
|
| 1112 |
+
"angle": 0,
|
| 1113 |
+
"content": "Table 1. We compare BEVControl with state-of-the-art methods on the validation subset of nuScenes. The results measure the controlling power of different methods. \\( \\downarrow / \\uparrow \\) means a smaller/larger value of the metric represents a better performance."
|
| 1114 |
+
},
|
| 1115 |
+
{
|
| 1116 |
+
"type": "text",
|
| 1117 |
+
"bbox": [
|
| 1118 |
+
0.076,
|
| 1119 |
+
0.309,
|
| 1120 |
+
0.471,
|
| 1121 |
+
0.414
|
| 1122 |
+
],
|
| 1123 |
+
"angle": 0,
|
| 1124 |
+
"content": "ing/validation/testing examples. Each example records about 40 frames of BEV layouts. Each frame of the BEV layout is associated with six street-view RGB images, which are captured by an ego vehicle's side, front, and back cameras. We follow the convention [24] to sample 600 frames of BEV layouts from the validation set, forming a validation subset to evaluate our method."
|
| 1125 |
+
},
|
| 1126 |
+
{
|
| 1127 |
+
"type": "text",
|
| 1128 |
+
"bbox": [
|
| 1129 |
+
0.076,
|
| 1130 |
+
0.416,
|
| 1131 |
+
0.472,
|
| 1132 |
+
0.552
|
| 1133 |
+
],
|
| 1134 |
+
"angle": 0,
|
| 1135 |
+
"content": "The objects in each BEV layout are annotated as the foreground and background. For object detection, the foreground includes ten categories (i.e., car, bus, truck, trailer, motorcycle, bicycle, construction vehicle, pedestrian, barrier, and traffic cone), while the background is the road. For object segmentation, the categories of car, bus, truck, trailer, motorcycle, bicycle, and construction vehicle are merged into the vehicle category. Thus, each BEV layout contains the binary categories of vehicle and road."
|
| 1136 |
+
},
|
| 1137 |
+
{
|
| 1138 |
+
"type": "title",
|
| 1139 |
+
"bbox": [
|
| 1140 |
+
0.077,
|
| 1141 |
+
0.567,
|
| 1142 |
+
0.295,
|
| 1143 |
+
0.582
|
| 1144 |
+
],
|
| 1145 |
+
"angle": 0,
|
| 1146 |
+
"content": "5.2. Visual Element Control"
|
| 1147 |
+
},
|
| 1148 |
+
{
|
| 1149 |
+
"type": "text",
|
| 1150 |
+
"bbox": [
|
| 1151 |
+
0.076,
|
| 1152 |
+
0.593,
|
| 1153 |
+
0.472,
|
| 1154 |
+
0.775
|
| 1155 |
+
],
|
| 1156 |
+
"angle": 0,
|
| 1157 |
+
"content": "In Table 1, we compare BEVControl with the recent methods [14,24,36], which can also generate the street-view images based on the BEV layout. Given a BEV layout, each method generates a set of street-view images from 6 perspectives. The results in Table 1 measure the quality of the generated images and the controlling power of the compared methods on the background foreground objects. We also report the performance improvement of BEVControl relative to the GLIGEN [14] in the last row. BEVControl achieves a higher OGS than other methods (see the right-most column of Table 1). Below, we evaluate the detailed performances of controlling various visual elements."
|
| 1158 |
+
},
|
| 1159 |
+
{
|
| 1160 |
+
"type": "text",
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
0.076,
|
| 1163 |
+
0.78,
|
| 1164 |
+
0.472,
|
| 1165 |
+
0.901
|
| 1166 |
+
],
|
| 1167 |
+
"angle": 0,
|
| 1168 |
+
"content": "Realism and Diversity In Table 1 \"Real & Diverse\", we measure the realism and diversity of the image data generated by different methods in terms of the Frechet Inception Distance (FID). BEVControl achieves 24.85 FID, outperforming other methods. We also compare the street-view images generated differently in Figure 5 and 6, where BEVControl produces a higher image quality than the compared methods."
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "text",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
0.498,
|
| 1174 |
+
0.309,
|
| 1175 |
+
0.895,
|
| 1176 |
+
0.506
|
| 1177 |
+
],
|
| 1178 |
+
"angle": 0,
|
| 1179 |
+
"content": "Foreground Control In Table 1 \"Foreground Control\", we examine the controlling power of different methods on the foreground objects. In this examination, we use BEVFormer [15] to detect the ten categories of foreground objects on the BEV layouts, reporting the performance in terms of mAP, NDS, and mAOE. We use CVT [37] to segment the foreground (i.e., vehicle) on the BEV layouts, whose performance is reported in terms of mIoU. In the first row of Table 1 \"Reference-score\", we report the detection performances of BEVFormer and segmentation performances of CVT on the original validation subset. These results can be regarded as the Reference-score performance of BEVFormer and CVT for measuring the controlling power of BEVControl."
|
| 1180 |
+
},
|
| 1181 |
+
{
|
| 1182 |
+
"type": "text",
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
0.498,
|
| 1185 |
+
0.527,
|
| 1186 |
+
0.897,
|
| 1187 |
+
0.679
|
| 1188 |
+
],
|
| 1189 |
+
"angle": 0,
|
| 1190 |
+
"content": "Based on the data generated by BEVControl, BEVFormer and CVT achieve better detection and segmentation performances than other generative models. It demonstrates a more substantial controlling power of BEVControl on the foreground objects. We compare the generated foreground objects by different methods in Figure 5, where BEVControl satisfactorily yields the foreground objects according to the ground-truth annotations. Furthermore, Figure 7 demonstrates the generation capability of BEVControl for user-drawn BEV sketches of different vehicle orientations."
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "text",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
0.498,
|
| 1196 |
+
0.705,
|
| 1197 |
+
0.897,
|
| 1198 |
+
0.901
|
| 1199 |
+
],
|
| 1200 |
+
"angle": 0,
|
| 1201 |
+
"content": "Background Control In Table 1 \"Background Control\", we study the controlling power of different methods on the background objects. Again, we use the trained CVT, which uses the generated street-view images to segment the background (i.e., road) on the BEV layouts. We report the segmentation accuracy mIoU on the road category. We also compare the generated roads by different methods in Figure 6. Compared to the street-view images generated by other methods, those generated by BEVControl lead to a better segmentation accuracy, which means a more substantial controlling power of BEVControl on the background objects. Additionally, Figure 8 displays the generation capability of BEVControl for user-edited BEV sketches of different road traffic situations."
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "page_number",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
0.48,
|
| 1207 |
+
0.925,
|
| 1208 |
+
0.492,
|
| 1209 |
+
0.936
|
| 1210 |
+
],
|
| 1211 |
+
"angle": 0,
|
| 1212 |
+
"content": "7"
|
| 1213 |
+
}
|
| 1214 |
+
],
|
| 1215 |
+
[
|
| 1216 |
+
{
|
| 1217 |
+
"type": "table",
|
| 1218 |
+
"bbox": [
|
| 1219 |
+
0.08,
|
| 1220 |
+
0.089,
|
| 1221 |
+
0.472,
|
| 1222 |
+
0.21
|
| 1223 |
+
],
|
| 1224 |
+
"angle": 0,
|
| 1225 |
+
"content": "<table><tr><td rowspan=\"2\">Controller</td><td colspan=\"2\">FC</td><td>BC</td><td rowspan=\"2\">Socs ↑</td></tr><tr><td>SNDS ↑</td><td>SfloU ↑</td><td>SbIoU ↑</td></tr><tr><td>Reference-score</td><td>44.10</td><td>34.83</td><td>74.33</td><td>5.00</td></tr><tr><td>foreground</td><td>25.23</td><td>22.50</td><td>41.70</td><td>2.69</td></tr><tr><td>background</td><td>3.70</td><td>3.53</td><td>49.71</td><td>1.74</td></tr><tr><td>both w/o separation</td><td>26.87</td><td>23.78</td><td>52.30</td><td>2.90</td></tr><tr><td>both w/ separation</td><td>28.68</td><td>26.80</td><td>60.80</td><td>3.18</td></tr></table>"
|
| 1226 |
+
},
|
| 1227 |
+
{
|
| 1228 |
+
"type": "table_footnote",
|
| 1229 |
+
"bbox": [
|
| 1230 |
+
0.076,
|
| 1231 |
+
0.221,
|
| 1232 |
+
0.47,
|
| 1233 |
+
0.276
|
| 1234 |
+
],
|
| 1235 |
+
"angle": 0,
|
| 1236 |
+
"content": "Table 2. Different strategies of using the foreground and background hints for controlling the visual elements. The evaluation metrics (i.e., FID, scores of foreground and background control) reported in this table are the same to those in Table 1."
|
| 1237 |
+
},
|
| 1238 |
+
{
|
| 1239 |
+
"type": "title",
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
0.077,
|
| 1242 |
+
0.302,
|
| 1243 |
+
0.339,
|
| 1244 |
+
0.318
|
| 1245 |
+
],
|
| 1246 |
+
"angle": 0,
|
| 1247 |
+
"content": "5.3. Ablation Study on Controller"
|
| 1248 |
+
},
|
| 1249 |
+
{
|
| 1250 |
+
"type": "text",
|
| 1251 |
+
"bbox": [
|
| 1252 |
+
0.076,
|
| 1253 |
+
0.325,
|
| 1254 |
+
0.47,
|
| 1255 |
+
0.446
|
| 1256 |
+
],
|
| 1257 |
+
"angle": 0,
|
| 1258 |
+
"content": "The controller of BEVControl regards the bounding boxes and the road sketches as hints. The diffusion model uses these hints to generate the foreground and background objects in the street-view images. Here, we experiment with different strategies of using the foreground and background hints to examine their effect on controlling the visual elements in the generated street-view images. We report the quantitative results in Table 2."
|
| 1259 |
+
},
|
| 1260 |
+
{
|
| 1261 |
+
"type": "text",
|
| 1262 |
+
"bbox": [
|
| 1263 |
+
0.076,
|
| 1264 |
+
0.446,
|
| 1265 |
+
0.47,
|
| 1266 |
+
0.567
|
| 1267 |
+
],
|
| 1268 |
+
"angle": 0,
|
| 1269 |
+
"content": "First, we use the foreground or background hint only, reporting the scores of FID, foreground control (FC), and background control (BC) in the second and third rows of Table 2. Without foreground or background hint, we degrade the realism and diversity of the generated images, while the score of foreground or background control also decreases consequently. These results demonstrate the importance of using the foreground and background hints together."
|
| 1270 |
+
},
|
| 1271 |
+
{
|
| 1272 |
+
"type": "text",
|
| 1273 |
+
"bbox": [
|
| 1274 |
+
0.076,
|
| 1275 |
+
0.567,
|
| 1276 |
+
0.471,
|
| 1277 |
+
0.809
|
| 1278 |
+
],
|
| 1279 |
+
"angle": 0,
|
| 1280 |
+
"content": "Next, we compare various strategies for using both foreground and background hints. We evaluate an alternative method that employs the foreground and background hints without separately controlling the visual elements. We use a single attention layer to jointly embed the foreground and background hints into latent space. The controller uses the latent embedding of these hints for outputting an information-controlled feature map. The coordinator relies on the information-controlled feature map to generate the street-view images. We report the foreground or background control scores in the fourth row of Table 2. Though the scores are higher than those achieved using the foreground or background hint alone, they still lag behind the results of the entire controller in the fifth row. The complete controller uses separate network streams to enable a more focused control of the foreground and background objects."
|
| 1281 |
+
},
|
| 1282 |
+
{
|
| 1283 |
+
"type": "title",
|
| 1284 |
+
"bbox": [
|
| 1285 |
+
0.077,
|
| 1286 |
+
0.818,
|
| 1287 |
+
0.355,
|
| 1288 |
+
0.833
|
| 1289 |
+
],
|
| 1290 |
+
"angle": 0,
|
| 1291 |
+
"content": "5.4. Ablation Study on Coordinator"
|
| 1292 |
+
},
|
| 1293 |
+
{
|
| 1294 |
+
"type": "text",
|
| 1295 |
+
"bbox": [
|
| 1296 |
+
0.076,
|
| 1297 |
+
0.84,
|
| 1298 |
+
0.47,
|
| 1299 |
+
0.902
|
| 1300 |
+
],
|
| 1301 |
+
"angle": 0,
|
| 1302 |
+
"content": "The coordinator utilizes the CVCE attention to enhance the visual consistency between the generated street-view images. Here, we use the street-view images from various perspectives to compute the CLIP score for measuring visual"
|
| 1303 |
+
},
|
| 1304 |
+
{
|
| 1305 |
+
"type": "table",
|
| 1306 |
+
"bbox": [
|
| 1307 |
+
0.553,
|
| 1308 |
+
0.089,
|
| 1309 |
+
0.842,
|
| 1310 |
+
0.184
|
| 1311 |
+
],
|
| 1312 |
+
"angle": 0,
|
| 1313 |
+
"content": "<table><tr><td>Coordinator</td><td>\\( S_{CLIP} \\uparrow \\)</td></tr><tr><td>Reference-score</td><td>87.96</td></tr><tr><td>w/o coordinator</td><td>79.50</td></tr><tr><td>w/ CV, w/o CE</td><td>82.30</td></tr><tr><td>w/ CVCE</td><td>82.70</td></tr></table>"
|
| 1314 |
+
},
|
| 1315 |
+
{
|
| 1316 |
+
"type": "table_footnote",
|
| 1317 |
+
"bbox": [
|
| 1318 |
+
0.498,
|
| 1319 |
+
0.195,
|
| 1320 |
+
0.892,
|
| 1321 |
+
0.251
|
| 1322 |
+
],
|
| 1323 |
+
"angle": 0,
|
| 1324 |
+
"content": "Table 3. Different strategies of using the coordinator for yielding the street-view images from different perspectives. We report the results as CLIP scores, which measure the visual consistency of the street-view images."
|
| 1325 |
+
},
|
| 1326 |
+
{
|
| 1327 |
+
"type": "text",
|
| 1328 |
+
"bbox": [
|
| 1329 |
+
0.498,
|
| 1330 |
+
0.278,
|
| 1331 |
+
0.891,
|
| 1332 |
+
0.308
|
| 1333 |
+
],
|
| 1334 |
+
"angle": 0,
|
| 1335 |
+
"content": "consistency. In Table 3, we compare the CLIP scores for the street-view images generated by different alternatives."
|
| 1336 |
+
},
|
| 1337 |
+
{
|
| 1338 |
+
"type": "text",
|
| 1339 |
+
"bbox": [
|
| 1340 |
+
0.498,
|
| 1341 |
+
0.309,
|
| 1342 |
+
0.892,
|
| 1343 |
+
0.43
|
| 1344 |
+
],
|
| 1345 |
+
"angle": 0,
|
| 1346 |
+
"content": "We remove the coordinator from BEVControl, which has the controller alone. This alternative significantly degrades the visual consistency of the generated street-view images (see the CLIP score in the second row of Table 3). We improve the CLIP score by adding the coordinator with the cross-view attention but without the cross-element attention (see the third row). This result demonstrates the positive impact of cross-view attention on visual consistency."
|
| 1347 |
+
},
|
| 1348 |
+
{
|
| 1349 |
+
"type": "title",
|
| 1350 |
+
"bbox": [
|
| 1351 |
+
0.499,
|
| 1352 |
+
0.439,
|
| 1353 |
+
0.847,
|
| 1354 |
+
0.456
|
| 1355 |
+
],
|
| 1356 |
+
"angle": 0,
|
| 1357 |
+
"content": "5.5. Data Augmentation for Object Detection"
|
| 1358 |
+
},
|
| 1359 |
+
{
|
| 1360 |
+
"type": "text",
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
0.498,
|
| 1363 |
+
0.463,
|
| 1364 |
+
0.892,
|
| 1365 |
+
0.629
|
| 1366 |
+
],
|
| 1367 |
+
"angle": 0,
|
| 1368 |
+
"content": "Based on each BEV layout from the training set of the nuScenes dataset, we again employ BEVControl to generate a set of street-view images. Note that the BEV layout and the generated street-view images can be used together for augmenting the training set of nuScenes. We use the generated data for training BEVFormer for object detection on the BEV layout from the train subset. We report the performances of object detection in Table 4. Compared to the BEVFormer trained without data augmentation (see the first row), the counterpart with data augmentation yields better results (see the second row)."
|
| 1369 |
+
},
|
| 1370 |
+
{
|
| 1371 |
+
"type": "table",
|
| 1372 |
+
"bbox": [
|
| 1373 |
+
0.538,
|
| 1374 |
+
0.642,
|
| 1375 |
+
0.857,
|
| 1376 |
+
0.701
|
| 1377 |
+
],
|
| 1378 |
+
"angle": 0,
|
| 1379 |
+
"content": "<table><tr><td>Method</td><td>SAP↑</td><td>SNDS↑</td><td>SAOE↓</td></tr><tr><td>w/o augmentation</td><td>37.00</td><td>47.90</td><td>0.66</td></tr><tr><td>w/ augmentation</td><td>38.96</td><td>49.19</td><td>0.42</td></tr></table>"
|
| 1380 |
+
},
|
| 1381 |
+
{
|
| 1382 |
+
"type": "table_footnote",
|
| 1383 |
+
"bbox": [
|
| 1384 |
+
0.498,
|
| 1385 |
+
0.711,
|
| 1386 |
+
0.892,
|
| 1387 |
+
0.752
|
| 1388 |
+
],
|
| 1389 |
+
"angle": 0,
|
| 1390 |
+
"content": "Table 4. Application of using the generated street-view images for augmenting the training data. We report the detection performances of BEVFormer on the validation set."
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "title",
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
0.5,
|
| 1396 |
+
0.785,
|
| 1397 |
+
0.619,
|
| 1398 |
+
0.801
|
| 1399 |
+
],
|
| 1400 |
+
"angle": 0,
|
| 1401 |
+
"content": "6. Conclusion"
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "text",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
0.498,
|
| 1407 |
+
0.81,
|
| 1408 |
+
0.892,
|
| 1409 |
+
0.902
|
| 1410 |
+
],
|
| 1411 |
+
"angle": 0,
|
| 1412 |
+
"content": "Given the BEV layout as the hint, the most advanced generative networks can synthesize the street-view images with realistic and diverse appearances, thus enriching the data for training the BEV perception model and profiting the autonomous driving. This paper advocates the significance of strengthening the controlling power of the generative"
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "page_number",
|
| 1416 |
+
"bbox": [
|
| 1417 |
+
0.48,
|
| 1418 |
+
0.925,
|
| 1419 |
+
0.49,
|
| 1420 |
+
0.936
|
| 1421 |
+
],
|
| 1422 |
+
"angle": 0,
|
| 1423 |
+
"content": "8"
|
| 1424 |
+
}
|
| 1425 |
+
],
|
| 1426 |
+
[
|
| 1427 |
+
{
|
| 1428 |
+
"type": "image",
|
| 1429 |
+
"bbox": [
|
| 1430 |
+
0.095,
|
| 1431 |
+
0.101,
|
| 1432 |
+
0.874,
|
| 1433 |
+
0.369
|
| 1434 |
+
],
|
| 1435 |
+
"angle": 0,
|
| 1436 |
+
"content": null
|
| 1437 |
+
},
|
| 1438 |
+
{
|
| 1439 |
+
"type": "image",
|
| 1440 |
+
"bbox": [
|
| 1441 |
+
0.107,
|
| 1442 |
+
0.379,
|
| 1443 |
+
0.849,
|
| 1444 |
+
0.655
|
| 1445 |
+
],
|
| 1446 |
+
"angle": 0,
|
| 1447 |
+
"content": null
|
| 1448 |
+
},
|
| 1449 |
+
{
|
| 1450 |
+
"type": "image_caption",
|
| 1451 |
+
"bbox": [
|
| 1452 |
+
0.076,
|
| 1453 |
+
0.684,
|
| 1454 |
+
0.893,
|
| 1455 |
+
0.713
|
| 1456 |
+
],
|
| 1457 |
+
"angle": 0,
|
| 1458 |
+
"content": "Figure 5. The visualization of foreground controlling generation. Compared to other methods, ours can generate objects that correspond more closely to the bounding box sketch conditions, especially the accurate orientation."
|
| 1459 |
+
},
|
| 1460 |
+
{
|
| 1461 |
+
"type": "text",
|
| 1462 |
+
"bbox": [
|
| 1463 |
+
0.076,
|
| 1464 |
+
0.757,
|
| 1465 |
+
0.473,
|
| 1466 |
+
0.894
|
| 1467 |
+
],
|
| 1468 |
+
"angle": 0,
|
| 1469 |
+
"content": "network for BEV perception. We propose a novel generative network, BEVControl, which relies on the sketches of the BEV layout to synthesize the background and foreground elements in the street-view images. By depending on more focused hints, BEVControl enables accurate control of the background and foreground elements, whose visual consistency across multiple perspectives is maintained by the cross-view-cross-element attention. Compared to the contemporary methods, a better controlling power allows"
|
| 1470 |
+
},
|
| 1471 |
+
{
|
| 1472 |
+
"type": "text",
|
| 1473 |
+
"bbox": [
|
| 1474 |
+
0.5,
|
| 1475 |
+
0.757,
|
| 1476 |
+
0.851,
|
| 1477 |
+
0.773
|
| 1478 |
+
],
|
| 1479 |
+
"angle": 0,
|
| 1480 |
+
"content": "BEVControl to yield richer data for BEV perception."
|
| 1481 |
+
},
|
| 1482 |
+
{
|
| 1483 |
+
"type": "text",
|
| 1484 |
+
"bbox": [
|
| 1485 |
+
0.499,
|
| 1486 |
+
0.811,
|
| 1487 |
+
0.892,
|
| 1488 |
+
0.901
|
| 1489 |
+
],
|
| 1490 |
+
"angle": 0,
|
| 1491 |
+
"content": "In future work, we will investigate how to better control more kinds of visual elements like lighting and weather in the generated images rather than the background and foreground only. In addition to generating street-view images, we will also study how to transfer the idea of BEVControl to create more general scenes."
|
| 1492 |
+
},
|
| 1493 |
+
{
|
| 1494 |
+
"type": "page_number",
|
| 1495 |
+
"bbox": [
|
| 1496 |
+
0.48,
|
| 1497 |
+
0.925,
|
| 1498 |
+
0.492,
|
| 1499 |
+
0.937
|
| 1500 |
+
],
|
| 1501 |
+
"angle": 0,
|
| 1502 |
+
"content": "9"
|
| 1503 |
+
}
|
| 1504 |
+
],
|
| 1505 |
+
[
|
| 1506 |
+
{
|
| 1507 |
+
"type": "image",
|
| 1508 |
+
"bbox": [
|
| 1509 |
+
0.095,
|
| 1510 |
+
0.101,
|
| 1511 |
+
0.874,
|
| 1512 |
+
0.658
|
| 1513 |
+
],
|
| 1514 |
+
"angle": 0,
|
| 1515 |
+
"content": null
|
| 1516 |
+
},
|
| 1517 |
+
{
|
| 1518 |
+
"type": "image_caption",
|
| 1519 |
+
"bbox": [
|
| 1520 |
+
0.076,
|
| 1521 |
+
0.684,
|
| 1522 |
+
0.893,
|
| 1523 |
+
0.713
|
| 1524 |
+
],
|
| 1525 |
+
"angle": 0,
|
| 1526 |
+
"content": "Figure 6. The visualization of background controlling generation. Compared to other methods, ours can generate street views that correspond more closely to the road sketch conditions."
|
| 1527 |
+
},
|
| 1528 |
+
{
|
| 1529 |
+
"type": "title",
|
| 1530 |
+
"bbox": [
|
| 1531 |
+
0.079,
|
| 1532 |
+
0.756,
|
| 1533 |
+
0.174,
|
| 1534 |
+
0.771
|
| 1535 |
+
],
|
| 1536 |
+
"angle": 0,
|
| 1537 |
+
"content": "References"
|
| 1538 |
+
},
|
| 1539 |
+
{
|
| 1540 |
+
"type": "ref_text",
|
| 1541 |
+
"bbox": [
|
| 1542 |
+
0.085,
|
| 1543 |
+
0.784,
|
| 1544 |
+
0.471,
|
| 1545 |
+
0.839
|
| 1546 |
+
],
|
| 1547 |
+
"angle": 0,
|
| 1548 |
+
"content": "[1] Adil Kaan Akan and Fatma Güney. Stretchbev: Stretching future instance prediction spatially and temporally. In European Conference on Computer Vision, pages 444-460. Springer, 2022. 2"
|
| 1549 |
+
},
|
| 1550 |
+
{
|
| 1551 |
+
"type": "ref_text",
|
| 1552 |
+
"bbox": [
|
| 1553 |
+
0.085,
|
| 1554 |
+
0.845,
|
| 1555 |
+
0.472,
|
| 1556 |
+
0.901
|
| 1557 |
+
],
|
| 1558 |
+
"angle": 0,
|
| 1559 |
+
"content": "[2] Omri Avrahami, Thomas Hayes, Oran Gafni, Sonal Gupta, Yaniv Taigman, Devi Parikh, Dani Lischinski, Ohad Fried, and Xi Yin. Spatext: Spatio-textual representation for controllable image generation. In Proceedings of the IEEE/CVF"
|
| 1560 |
+
},
|
| 1561 |
+
{
|
| 1562 |
+
"type": "list",
|
| 1563 |
+
"bbox": [
|
| 1564 |
+
0.085,
|
| 1565 |
+
0.784,
|
| 1566 |
+
0.472,
|
| 1567 |
+
0.901
|
| 1568 |
+
],
|
| 1569 |
+
"angle": 0,
|
| 1570 |
+
"content": null
|
| 1571 |
+
},
|
| 1572 |
+
{
|
| 1573 |
+
"type": "ref_text",
|
| 1574 |
+
"bbox": [
|
| 1575 |
+
0.533,
|
| 1576 |
+
0.758,
|
| 1577 |
+
0.892,
|
| 1578 |
+
0.786
|
| 1579 |
+
],
|
| 1580 |
+
"angle": 0,
|
| 1581 |
+
"content": "Conference on Computer Vision and Pattern Recognition, pages 18370-18380, 2023. 3"
|
| 1582 |
+
},
|
| 1583 |
+
{
|
| 1584 |
+
"type": "ref_text",
|
| 1585 |
+
"bbox": [
|
| 1586 |
+
0.509,
|
| 1587 |
+
0.788,
|
| 1588 |
+
0.894,
|
| 1589 |
+
0.829
|
| 1590 |
+
],
|
| 1591 |
+
"angle": 0,
|
| 1592 |
+
"content": "[3] Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel. Multidiffusion: Fusing diffusion paths for controlled image generation. 2023. 3"
|
| 1593 |
+
},
|
| 1594 |
+
{
|
| 1595 |
+
"type": "ref_text",
|
| 1596 |
+
"bbox": [
|
| 1597 |
+
0.509,
|
| 1598 |
+
0.831,
|
| 1599 |
+
0.895,
|
| 1600 |
+
0.901
|
| 1601 |
+
],
|
| 1602 |
+
"angle": 0,
|
| 1603 |
+
"content": "[4] Dina Bashkirova, José Lezama, Kihyuk Sohn, Kate Saenko, and Irfan Essa. Masksketch: Unpaired structure-guided masked image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1879-1889, 2023. 3"
|
| 1604 |
+
},
|
| 1605 |
+
{
|
| 1606 |
+
"type": "list",
|
| 1607 |
+
"bbox": [
|
| 1608 |
+
0.509,
|
| 1609 |
+
0.758,
|
| 1610 |
+
0.895,
|
| 1611 |
+
0.901
|
| 1612 |
+
],
|
| 1613 |
+
"angle": 0,
|
| 1614 |
+
"content": null
|
| 1615 |
+
},
|
| 1616 |
+
{
|
| 1617 |
+
"type": "page_number",
|
| 1618 |
+
"bbox": [
|
| 1619 |
+
0.477,
|
| 1620 |
+
0.925,
|
| 1621 |
+
0.495,
|
| 1622 |
+
0.937
|
| 1623 |
+
],
|
| 1624 |
+
"angle": 0,
|
| 1625 |
+
"content": "10"
|
| 1626 |
+
}
|
| 1627 |
+
],
|
| 1628 |
+
[
|
| 1629 |
+
{
|
| 1630 |
+
"type": "image",
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
0.097,
|
| 1633 |
+
0.101,
|
| 1634 |
+
0.874,
|
| 1635 |
+
0.421
|
| 1636 |
+
],
|
| 1637 |
+
"angle": 0,
|
| 1638 |
+
"content": null
|
| 1639 |
+
},
|
| 1640 |
+
{
|
| 1641 |
+
"type": "image_caption",
|
| 1642 |
+
"bbox": [
|
| 1643 |
+
0.202,
|
| 1644 |
+
0.449,
|
| 1645 |
+
0.766,
|
| 1646 |
+
0.464
|
| 1647 |
+
],
|
| 1648 |
+
"angle": 0,
|
| 1649 |
+
"content": "Figure 7. The visualization of foreground controlling generation in various vehicle orientation."
|
| 1650 |
+
},
|
| 1651 |
+
{
|
| 1652 |
+
"type": "image",
|
| 1653 |
+
"bbox": [
|
| 1654 |
+
0.095,
|
| 1655 |
+
0.507,
|
| 1656 |
+
0.877,
|
| 1657 |
+
0.807
|
| 1658 |
+
],
|
| 1659 |
+
"angle": 0,
|
| 1660 |
+
"content": null
|
| 1661 |
+
},
|
| 1662 |
+
{
|
| 1663 |
+
"type": "image_caption",
|
| 1664 |
+
"bbox": [
|
| 1665 |
+
0.223,
|
| 1666 |
+
0.834,
|
| 1667 |
+
0.746,
|
| 1668 |
+
0.848
|
| 1669 |
+
],
|
| 1670 |
+
"angle": 0,
|
| 1671 |
+
"content": "Figure 8. The visualization of background controlling generation in various road sketch."
|
| 1672 |
+
},
|
| 1673 |
+
{
|
| 1674 |
+
"type": "page_number",
|
| 1675 |
+
"bbox": [
|
| 1676 |
+
0.477,
|
| 1677 |
+
0.925,
|
| 1678 |
+
0.493,
|
| 1679 |
+
0.936
|
| 1680 |
+
],
|
| 1681 |
+
"angle": 0,
|
| 1682 |
+
"content": "11"
|
| 1683 |
+
}
|
| 1684 |
+
],
|
| 1685 |
+
[
|
| 1686 |
+
{
|
| 1687 |
+
"type": "ref_text",
|
| 1688 |
+
"bbox": [
|
| 1689 |
+
0.087,
|
| 1690 |
+
0.092,
|
| 1691 |
+
0.472,
|
| 1692 |
+
0.176
|
| 1693 |
+
],
|
| 1694 |
+
"angle": 0,
|
| 1695 |
+
"content": "[5] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 2, 6"
|
| 1696 |
+
},
|
| 1697 |
+
{
|
| 1698 |
+
"type": "ref_text",
|
| 1699 |
+
"bbox": [
|
| 1700 |
+
0.087,
|
| 1701 |
+
0.178,
|
| 1702 |
+
0.472,
|
| 1703 |
+
0.234
|
| 1704 |
+
],
|
| 1705 |
+
"angle": 0,
|
| 1706 |
+
"content": "[6] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 2"
|
| 1707 |
+
},
|
| 1708 |
+
{
|
| 1709 |
+
"type": "ref_text",
|
| 1710 |
+
"bbox": [
|
| 1711 |
+
0.087,
|
| 1712 |
+
0.235,
|
| 1713 |
+
0.472,
|
| 1714 |
+
0.289
|
| 1715 |
+
],
|
| 1716 |
+
"angle": 0,
|
| 1717 |
+
"content": "[7] Jiaxin Cheng, Xiao Liang, Xingjian Shi, Tong He, Tianjun Xiao, and Mu Li. Layoutdiffuse: Adapting foundational diffusion models for layout-to-image generation. arXiv preprint arXiv:2302.08908, 2023. 3"
|
| 1718 |
+
},
|
| 1719 |
+
{
|
| 1720 |
+
"type": "ref_text",
|
| 1721 |
+
"bbox": [
|
| 1722 |
+
0.087,
|
| 1723 |
+
0.291,
|
| 1724 |
+
0.472,
|
| 1725 |
+
0.348
|
| 1726 |
+
],
|
| 1727 |
+
"angle": 0,
|
| 1728 |
+
"content": "[8] Ernie Chu, Shuo-Yen Lin, and Jun-Cheng Chen. Video controlnet: Towards temporally consistent synthetic-to-real video translation using conditional image diffusion models. arXiv preprint arXiv:2305.19193, 2023. 4"
|
| 1729 |
+
},
|
| 1730 |
+
{
|
| 1731 |
+
"type": "ref_text",
|
| 1732 |
+
"bbox": [
|
| 1733 |
+
0.087,
|
| 1734 |
+
0.349,
|
| 1735 |
+
0.472,
|
| 1736 |
+
0.404
|
| 1737 |
+
],
|
| 1738 |
+
"angle": 0,
|
| 1739 |
+
"content": "[9] Cusuh Ham, James Hays, Jingwan Lu, Krishna Kumar Singh, Zhifei Zhang, and Tobias Hinz. Modulating pretrained diffusion models for multimodal image synthesis. arXiv preprint arXiv:2302.12764, 2023. 3"
|
| 1740 |
+
},
|
| 1741 |
+
{
|
| 1742 |
+
"type": "ref_text",
|
| 1743 |
+
"bbox": [
|
| 1744 |
+
0.079,
|
| 1745 |
+
0.406,
|
| 1746 |
+
0.472,
|
| 1747 |
+
0.475
|
| 1748 |
+
],
|
| 1749 |
+
"angle": 0,
|
| 1750 |
+
"content": "[10] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5"
|
| 1751 |
+
},
|
| 1752 |
+
{
|
| 1753 |
+
"type": "ref_text",
|
| 1754 |
+
"bbox": [
|
| 1755 |
+
0.079,
|
| 1756 |
+
0.477,
|
| 1757 |
+
0.472,
|
| 1758 |
+
0.56
|
| 1759 |
+
],
|
| 1760 |
+
"angle": 0,
|
| 1761 |
+
"content": "[11] Anthony Hu, Zak Murez, Nikhil Mohan, Sofia Dudas, Jeffrey Hawke, Vijay Badrinarayanan, Roberto Cipolla, and Alex Kendall. Fiery: Future instance prediction in bird's-eye view from surround monocular cameras. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15273-15282, 2021. 2"
|
| 1762 |
+
},
|
| 1763 |
+
{
|
| 1764 |
+
"type": "ref_text",
|
| 1765 |
+
"bbox": [
|
| 1766 |
+
0.079,
|
| 1767 |
+
0.562,
|
| 1768 |
+
0.472,
|
| 1769 |
+
0.617
|
| 1770 |
+
],
|
| 1771 |
+
"angle": 0,
|
| 1772 |
+
"content": "[12] Lianghua Huang, Di Chen, Yu Liu, Yujun Shen, Deli Zhao, and Jingren Zhou.Composer: Creative and controllable image synthesis with composable conditions. arXiv preprint arXiv:2302.09778, 2023. 3"
|
| 1773 |
+
},
|
| 1774 |
+
{
|
| 1775 |
+
"type": "ref_text",
|
| 1776 |
+
"bbox": [
|
| 1777 |
+
0.079,
|
| 1778 |
+
0.619,
|
| 1779 |
+
0.472,
|
| 1780 |
+
0.688
|
| 1781 |
+
],
|
| 1782 |
+
"angle": 0,
|
| 1783 |
+
"content": "[13] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 4"
|
| 1784 |
+
},
|
| 1785 |
+
{
|
| 1786 |
+
"type": "ref_text",
|
| 1787 |
+
"bbox": [
|
| 1788 |
+
0.079,
|
| 1789 |
+
0.689,
|
| 1790 |
+
0.472,
|
| 1791 |
+
0.759
|
| 1792 |
+
],
|
| 1793 |
+
"angle": 0,
|
| 1794 |
+
"content": "[14] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 3, 5, 7"
|
| 1795 |
+
},
|
| 1796 |
+
{
|
| 1797 |
+
"type": "ref_text",
|
| 1798 |
+
"bbox": [
|
| 1799 |
+
0.079,
|
| 1800 |
+
0.761,
|
| 1801 |
+
0.472,
|
| 1802 |
+
0.83
|
| 1803 |
+
],
|
| 1804 |
+
"angle": 0,
|
| 1805 |
+
"content": "[15] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In European conference on computer vision, pages 1-18. Springer, 2022. 2, 6, 7"
|
| 1806 |
+
},
|
| 1807 |
+
{
|
| 1808 |
+
"type": "ref_text",
|
| 1809 |
+
"bbox": [
|
| 1810 |
+
0.079,
|
| 1811 |
+
0.832,
|
| 1812 |
+
0.472,
|
| 1813 |
+
0.9
|
| 1814 |
+
],
|
| 1815 |
+
"angle": 0,
|
| 1816 |
+
"content": "[16] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11976-11986, 2022. 5"
|
| 1817 |
+
},
|
| 1818 |
+
{
|
| 1819 |
+
"type": "list",
|
| 1820 |
+
"bbox": [
|
| 1821 |
+
0.079,
|
| 1822 |
+
0.092,
|
| 1823 |
+
0.472,
|
| 1824 |
+
0.9
|
| 1825 |
+
],
|
| 1826 |
+
"angle": 0,
|
| 1827 |
+
"content": null
|
| 1828 |
+
},
|
| 1829 |
+
{
|
| 1830 |
+
"type": "ref_text",
|
| 1831 |
+
"bbox": [
|
| 1832 |
+
0.503,
|
| 1833 |
+
0.092,
|
| 1834 |
+
0.895,
|
| 1835 |
+
0.161
|
| 1836 |
+
],
|
| 1837 |
+
"angle": 0,
|
| 1838 |
+
"content": "[17] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 5"
|
| 1839 |
+
},
|
| 1840 |
+
{
|
| 1841 |
+
"type": "ref_text",
|
| 1842 |
+
"bbox": [
|
| 1843 |
+
0.503,
|
| 1844 |
+
0.164,
|
| 1845 |
+
0.895,
|
| 1846 |
+
0.219
|
| 1847 |
+
],
|
| 1848 |
+
"angle": 0,
|
| 1849 |
+
"content": "[18] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023. 3"
|
| 1850 |
+
},
|
| 1851 |
+
{
|
| 1852 |
+
"type": "ref_text",
|
| 1853 |
+
"bbox": [
|
| 1854 |
+
0.503,
|
| 1855 |
+
0.221,
|
| 1856 |
+
0.895,
|
| 1857 |
+
0.289
|
| 1858 |
+
],
|
| 1859 |
+
"angle": 0,
|
| 1860 |
+
"content": "[19] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 3"
|
| 1861 |
+
},
|
| 1862 |
+
{
|
| 1863 |
+
"type": "ref_text",
|
| 1864 |
+
"bbox": [
|
| 1865 |
+
0.503,
|
| 1866 |
+
0.291,
|
| 1867 |
+
0.895,
|
| 1868 |
+
0.374
|
| 1869 |
+
],
|
| 1870 |
+
"angle": 0,
|
| 1871 |
+
"content": "[20] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 5, 6"
|
| 1872 |
+
},
|
| 1873 |
+
{
|
| 1874 |
+
"type": "ref_text",
|
| 1875 |
+
"bbox": [
|
| 1876 |
+
0.503,
|
| 1877 |
+
0.376,
|
| 1878 |
+
0.895,
|
| 1879 |
+
0.43
|
| 1880 |
+
],
|
| 1881 |
+
"angle": 0,
|
| 1882 |
+
"content": "[21] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3"
|
| 1883 |
+
},
|
| 1884 |
+
{
|
| 1885 |
+
"type": "ref_text",
|
| 1886 |
+
"bbox": [
|
| 1887 |
+
0.503,
|
| 1888 |
+
0.433,
|
| 1889 |
+
0.895,
|
| 1890 |
+
0.503
|
| 1891 |
+
],
|
| 1892 |
+
"angle": 0,
|
| 1893 |
+
"content": "[22] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 3, 4, 5"
|
| 1894 |
+
},
|
| 1895 |
+
{
|
| 1896 |
+
"type": "ref_text",
|
| 1897 |
+
"bbox": [
|
| 1898 |
+
0.503,
|
| 1899 |
+
0.504,
|
| 1900 |
+
0.895,
|
| 1901 |
+
0.587
|
| 1902 |
+
],
|
| 1903 |
+
"angle": 0,
|
| 1904 |
+
"content": "[23] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 3"
|
| 1905 |
+
},
|
| 1906 |
+
{
|
| 1907 |
+
"type": "ref_text",
|
| 1908 |
+
"bbox": [
|
| 1909 |
+
0.503,
|
| 1910 |
+
0.589,
|
| 1911 |
+
0.895,
|
| 1912 |
+
0.63
|
| 1913 |
+
],
|
| 1914 |
+
"angle": 0,
|
| 1915 |
+
"content": "[24] Alexander Swerdlow, Runsheng Xu, and Bolei Zhou. Street-view image generation from a bird's-eye view layout. arXiv preprint arXiv:2301.04634, 2023. 2, 4, 5, 7"
|
| 1916 |
+
},
|
| 1917 |
+
{
|
| 1918 |
+
"type": "ref_text",
|
| 1919 |
+
"bbox": [
|
| 1920 |
+
0.503,
|
| 1921 |
+
0.632,
|
| 1922 |
+
0.895,
|
| 1923 |
+
0.7
|
| 1924 |
+
],
|
| 1925 |
+
"angle": 0,
|
| 1926 |
+
"content": "[25] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2818-2826, 2016. 6"
|
| 1927 |
+
},
|
| 1928 |
+
{
|
| 1929 |
+
"type": "ref_text",
|
| 1930 |
+
"bbox": [
|
| 1931 |
+
0.503,
|
| 1932 |
+
0.702,
|
| 1933 |
+
0.895,
|
| 1934 |
+
0.758
|
| 1935 |
+
],
|
| 1936 |
+
"angle": 0,
|
| 1937 |
+
"content": "[26] Shitao Tang, Fuyang Zhang, Jiacheng Chen, Peng Wang, and Yasutaka Furukawa. Mvdiffusion: Enabling holistic multi-view image generation with correspondence-aware diffusion. arXiv preprint arXiv:2307.01097, 2023. 3"
|
| 1938 |
+
},
|
| 1939 |
+
{
|
| 1940 |
+
"type": "ref_text",
|
| 1941 |
+
"bbox": [
|
| 1942 |
+
0.503,
|
| 1943 |
+
0.759,
|
| 1944 |
+
0.895,
|
| 1945 |
+
0.801
|
| 1946 |
+
],
|
| 1947 |
+
"angle": 0,
|
| 1948 |
+
"content": "[27] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 2"
|
| 1949 |
+
},
|
| 1950 |
+
{
|
| 1951 |
+
"type": "ref_text",
|
| 1952 |
+
"bbox": [
|
| 1953 |
+
0.503,
|
| 1954 |
+
0.802,
|
| 1955 |
+
0.895,
|
| 1956 |
+
0.843
|
| 1957 |
+
],
|
| 1958 |
+
"angle": 0,
|
| 1959 |
+
"content": "[28] Andrey Voynov, Kfir Aberman, and Daniel Cohen-Or. Sketch-guided text-to-image diffusion models. arXiv preprint arXiv:2211.13752, 2022. 3"
|
| 1960 |
+
},
|
| 1961 |
+
{
|
| 1962 |
+
"type": "ref_text",
|
| 1963 |
+
"bbox": [
|
| 1964 |
+
0.503,
|
| 1965 |
+
0.845,
|
| 1966 |
+
0.895,
|
| 1967 |
+
0.9
|
| 1968 |
+
],
|
| 1969 |
+
"angle": 0,
|
| 1970 |
+
"content": "[29] Weilun Wang, Jianmin Bao, Wengang Zhou, Dongdong Chen, Dong Chen, Lu Yuan, and Houqiang Li. Semantic image synthesis via diffusion models. arXiv preprint arXiv:2207.00050, 2022.3"
|
| 1971 |
+
},
|
| 1972 |
+
{
|
| 1973 |
+
"type": "list",
|
| 1974 |
+
"bbox": [
|
| 1975 |
+
0.503,
|
| 1976 |
+
0.092,
|
| 1977 |
+
0.895,
|
| 1978 |
+
0.9
|
| 1979 |
+
],
|
| 1980 |
+
"angle": 0,
|
| 1981 |
+
"content": null
|
| 1982 |
+
},
|
| 1983 |
+
{
|
| 1984 |
+
"type": "page_number",
|
| 1985 |
+
"bbox": [
|
| 1986 |
+
0.477,
|
| 1987 |
+
0.925,
|
| 1988 |
+
0.495,
|
| 1989 |
+
0.937
|
| 1990 |
+
],
|
| 1991 |
+
"angle": 0,
|
| 1992 |
+
"content": "12"
|
| 1993 |
+
}
|
| 1994 |
+
],
|
| 1995 |
+
[
|
| 1996 |
+
{
|
| 1997 |
+
"type": "ref_text",
|
| 1998 |
+
"bbox": [
|
| 1999 |
+
0.08,
|
| 2000 |
+
0.092,
|
| 2001 |
+
0.47,
|
| 2002 |
+
0.161
|
| 2003 |
+
],
|
| 2004 |
+
"angle": 0,
|
| 2005 |
+
"content": "[30] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Weixian Lei, Yuchao Gu, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. arXiv preprint arXiv:2212.11565, 2022.4"
|
| 2006 |
+
},
|
| 2007 |
+
{
|
| 2008 |
+
"type": "ref_text",
|
| 2009 |
+
"bbox": [
|
| 2010 |
+
0.08,
|
| 2011 |
+
0.163,
|
| 2012 |
+
0.47,
|
| 2013 |
+
0.219
|
| 2014 |
+
],
|
| 2015 |
+
"angle": 0,
|
| 2016 |
+
"content": "[31] Han Xue, Zhiwu Huang, Qianru Sun, Li Song, and Wenjun Zhang. Freestyle layout-to-image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14256-14266, 2023. 3"
|
| 2017 |
+
},
|
| 2018 |
+
{
|
| 2019 |
+
"type": "ref_text",
|
| 2020 |
+
"bbox": [
|
| 2021 |
+
0.08,
|
| 2022 |
+
0.22,
|
| 2023 |
+
0.47,
|
| 2024 |
+
0.29
|
| 2025 |
+
],
|
| 2026 |
+
"angle": 0,
|
| 2027 |
+
"content": "[32] Zhengyuan Yang, Jianfeng Wang, Zhe Gan, Linjie Li, Kevin Lin, Chenfei Wu, Nan Duan, Zicheng Liu, Ce Liu, Michael Zeng, et al. Reco: Region-controlled text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14246-14255, 2023. 3"
|
| 2028 |
+
},
|
| 2029 |
+
{
|
| 2030 |
+
"type": "ref_text",
|
| 2031 |
+
"bbox": [
|
| 2032 |
+
0.08,
|
| 2033 |
+
0.291,
|
| 2034 |
+
0.47,
|
| 2035 |
+
0.331
|
| 2036 |
+
],
|
| 2037 |
+
"angle": 0,
|
| 2038 |
+
"content": "[33] Lvmin Zhang and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. arXiv preprint arXiv:2302.05543, 2023. 3"
|
| 2039 |
+
},
|
| 2040 |
+
{
|
| 2041 |
+
"type": "ref_text",
|
| 2042 |
+
"bbox": [
|
| 2043 |
+
0.08,
|
| 2044 |
+
0.333,
|
| 2045 |
+
0.47,
|
| 2046 |
+
0.387
|
| 2047 |
+
],
|
| 2048 |
+
"angle": 0,
|
| 2049 |
+
"content": "[34] Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. Controlvideo: Training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077, 2023. 4"
|
| 2050 |
+
},
|
| 2051 |
+
{
|
| 2052 |
+
"type": "ref_text",
|
| 2053 |
+
"bbox": [
|
| 2054 |
+
0.08,
|
| 2055 |
+
0.39,
|
| 2056 |
+
0.47,
|
| 2057 |
+
0.457
|
| 2058 |
+
],
|
| 2059 |
+
"angle": 0,
|
| 2060 |
+
"content": "[35] Yunpeng Zhang, Zheng Zhu, Wenzhao Zheng, Junjie Huang, Guan Huang, Jie Zhou, and Jiwen Lu. **Reverse: Unified perception and prediction in birds-eye-view for vision-centric autonomous driving. arXiv preprint arXiv:2205.09743, 2022.2**"
|
| 2061 |
+
},
|
| 2062 |
+
{
|
| 2063 |
+
"type": "ref_text",
|
| 2064 |
+
"bbox": [
|
| 2065 |
+
0.08,
|
| 2066 |
+
0.46,
|
| 2067 |
+
0.47,
|
| 2068 |
+
0.53
|
| 2069 |
+
],
|
| 2070 |
+
"angle": 0,
|
| 2071 |
+
"content": "[36] Guangcong Zheng, Xianpan Zhou, Xuewei Li, Zhongang Qi, Ying Shan, and Xi Li. Layoutdiffusion: Controllable diffusion model for layout-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22490-22499, 2023. 3, 7"
|
| 2072 |
+
},
|
| 2073 |
+
{
|
| 2074 |
+
"type": "ref_text",
|
| 2075 |
+
"bbox": [
|
| 2076 |
+
0.08,
|
| 2077 |
+
0.531,
|
| 2078 |
+
0.47,
|
| 2079 |
+
0.587
|
| 2080 |
+
],
|
| 2081 |
+
"angle": 0,
|
| 2082 |
+
"content": "[37] Brady Zhou and Philipp Krahenbuhl. Cross-view transformers for real-time map-view semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13760-13769, 2022. 2, 6, 7"
|
| 2083 |
+
},
|
| 2084 |
+
{
|
| 2085 |
+
"type": "list",
|
| 2086 |
+
"bbox": [
|
| 2087 |
+
0.08,
|
| 2088 |
+
0.092,
|
| 2089 |
+
0.47,
|
| 2090 |
+
0.587
|
| 2091 |
+
],
|
| 2092 |
+
"angle": 0,
|
| 2093 |
+
"content": null
|
| 2094 |
+
},
|
| 2095 |
+
{
|
| 2096 |
+
"type": "page_number",
|
| 2097 |
+
"bbox": [
|
| 2098 |
+
0.478,
|
| 2099 |
+
0.925,
|
| 2100 |
+
0.495,
|
| 2101 |
+
0.937
|
| 2102 |
+
],
|
| 2103 |
+
"angle": 0,
|
| 2104 |
+
"content": "13"
|
| 2105 |
+
}
|
| 2106 |
+
]
|
| 2107 |
+
]
|
data/2023/2308_01xxx/2308.01661/7263487d-f861-4e28-bbc9-bb783bebeb71_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55ff6dfd1784148c54ddede5a35fbfcf507b99c39622ec019e52cfc1306c483c
|
| 3 |
+
size 30205973
|
data/2023/2308_01xxx/2308.01661/full.md
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# BEVControl: Accurately Controlling Street-view Elements with Multi-perspective Consistency via BEV Sketch Layout
|
| 2 |
+
|
| 3 |
+
Kairui Yang $^{1*}$ Enhui Ma $^{1*}$ Jibin Peng $^{1}$ Qing Guo $^{2}$ Jianping Wu $^{3}$ Di Lin $^{1\dagger}$ Kaicheng Yu $^{4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Tianjin University $^{2}$ IHPC and CFAR, Agency for Science, Technology and Research, Singapore $^{3}$ Tsinghua University $^{4}$ Westlake University
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
(a) Vanilla generative method
|
| 9 |
+
|
| 10 |
+

|
| 11 |
+
(b) Two-stage BEVControl
|
| 12 |
+
Figure 1. Comparison between different generative networks hinted by Bird's Eye View (BEV) segmentation layout v.s. sketch layout. (a) Vanilla generative pipeline feeds a semantic segmentation style input into a generative network and outputs reasonable multi-view images. However, we discover that it fails to generate accurate object-level details. For example, we show a common failure of a state-of-the-art algorithm where the generated vehicle has reversed heading compared to the target 3D bounding box. In addition, editing the semantic segmentation style input is a hard task and requires non-trivial human effort. (b) To this end, we propose a two-stage method that provides finer background and foreground geometry control, dubbed BEVControl. It supports sketch style input that enables fast and easy editing. In addition, our BEVControl decouples visual consistency into two sub-goals: achieving geometry consistency between street and bird's-eye views through the Controller; and achieving appearance consistency between street views through the Coordinator.
|
| 13 |
+
|
| 14 |
+
# Abstract
|
| 15 |
+
|
| 16 |
+
Using synthesized images to boost the performance of perception models is a long-standing research challenge in computer vision. It becomes more eminent in visual-centric autonomous driving systems with multi-view cameras as some long-tail scenarios can never be collected. Guided by the BEV segmentation layouts, the existing generative networks seem to synthesize photo-realistic street-view images when evaluated solely on scene-level metrics. However, once zoom-in, they usually fail to produce accurate foreground and background details such as heading. To this end, we pro
|
| 17 |
+
|
| 18 |
+
pose a two-stage generative method, dubbed BEVControl, that can generate accurate foreground and background contents. In contrast to segmentation-like input, it also supports sketch style input, which is more flexible for humans to edit. In addition, we propose a comprehensive multi-level evaluation protocol to fairly compare the quality of the generated scene, foreground object, and background geometry. Our extensive experiments show that our BEVControl surpasses the state-of-the-art method, BEVGen, by a significant margin, from 5.89 to 26.80 on foreground segmentation mIoU. In addition, we show that using images generated by BEVControl to train the downstream perception model, it achieves on average 1.29 improvement in NDS score.
|
| 19 |
+
|
| 20 |
+
# 1. Introduction
|
| 21 |
+
|
| 22 |
+
BEV perception for autonomous driving has become popular. It requires understanding the objects in the streets captured from multiple cameras' views, where the things should correspond to the positions from the bird's-eye perspective. The street and bird's-eye views allow the autonomous driving car to broadly sense the objects, thus advancing the progress on an array of downstream applications (e.g., street-view object recognition [15, 37] and traffic flow prediction [1, 11, 35]).
|
| 23 |
+
|
| 24 |
+
In today's age of deep learning, reliable BEV perception heavily relies on deep networks trained on many street-view images and the corresponding BEV segmentation layouts, to enable the autonomous car's self-control. To achieve large-scale data for BEV perception, someone may employ autonomous vehicles to travel around the city while recording the street-view images by multiple cameras and mapping the objects to the BEV segmentation layout. Undoubtedly, this solution reduces the human effort for data collection. Yet, autonomous cars without perfect self-control may give rise to traffic congestion or even fatal accident. Moreover, someone must annotate objects across the street and bird's-eye views at an expensive cost. Extra effort is needed to double-check the consistency of annotation across various views.
|
| 25 |
+
|
| 26 |
+
Rather than laboriously collecting street-view images from the natural environment and annotating multi-view photos, there are many works [24] resort to the fast-growing family of generative networks [6, 27] for creating new street-view images with a realistic style, which augment the training data for BEV perception. As illustrated in Figure 1(a), these methods feed the BEV layout into the generative network. The BEV segmentation layout provides the semantic categories and spatial distribution of the objects in the street for the generative network, thus controlling the content of the generated images. Even with an identical BEV layout, the generative network can randomly associate diverse appearances to the objects already appearing in the street.
|
| 27 |
+
|
| 28 |
+
In spite of the success of the generative networks, the current methods consider less about two critical issues when generating street-view images based on the BEV segmentation layouts. First, the BEV segmentation layout can be analogy to the panoptic segmentation map, where the background stuff and foreground objects unanimously have the pixel-wise annotations in details. It is inconvenient to edit the details of the BEV segmentation layout, thus disallowing many layouts with diversity to be produced for further enriching the street-view data. Second, the existing methods generally focus on improving the visual consistency between various street views and the geometric correspondence between the street and bird's-eye views. Nevertheless, only reasonable visual consistency and geometric correspondence are inadequate for the data augmentation, which also requires a diversity of visual elements (e.g., road layout, lane line,
|
| 29 |
+
|
| 30 |
+
and vehicle position/heading) in the street-view images to enhance the generalization power of the BEV perception models. For this purpose, the generative network should accurately control the visual elements to achieve data diversity.
|
| 31 |
+
|
| 32 |
+
This paper proposes BEVControl, which has a strong power for controlling the visual elements of the generated street-view images based on the BEV sketch layout. We illustrate the architecture of BEVControl in Figure 1(b). BEVControl has the controller and coordinator. The controller relies on the sketches of the background (e.g., road layout and lane line) and foreground elements (e.g., vehicle and pedestrian), which are easier to be edited than the pixel-wise annotations on the segmentation layout, to control the appearances and geometric properties of these two kinds of elements in the generated street-view images separately. The coordinator attends to the underlying correlation between the background and foreground elements, whose visual consistency across different views is preserved.
|
| 33 |
+
|
| 34 |
+
The controller regards the background and foreground elements' sketches as hints. Here, the sketch and bounding boxes mainly represent the geometric shapes of the background and foreground elements. They are mapped from the identical BEV sketch layout, thus preserving the geometric correspondence between the elements across the street and bird's-eye views. With the hints attending to the background and foreground elements respectively, the controller employs the diffusion model to compute the latent feature maps of the street-view images, which represent various perspectives captured by multiple vehicle cameras. We feed these street-view feature maps to the coordinator. The coordinator uses a novel cross-view-cross-element attention mechanism to comprehensively model the context of visual elements in different views. It uses the context to enhance the visual consistency between the visual elements from multiple street perspectives, eventually producing street-view images.
|
| 35 |
+
|
| 36 |
+
We extract the BEV sketches from the public dataset, nuScenes [5], to drive BEVControl to generate the street-view images for the classical object detection task. In contrast to the current methods that primarily mind the usefulness of the generated data for improving the performances on down-stream tasks, we extensively evaluate the controlling power of BEVControl, which helps to yield richer training data and achieve state-of-the-art object detection performance on nuScenes. We brief our contributions below:
|
| 37 |
+
|
| 38 |
+
- We use the cost-effective BEV sketch layouts to easier produce a large amount of street-view images.
|
| 39 |
+
- We propose the sketch-based BEVControl, which has a strong control of the background and foreground elements in the generated street-view images.
|
| 40 |
+
- BEVControl remarkably augments the training dataset, which helps to achieve state-of-the-art object detection results on nuScenes.
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
Figure 2. (a) Overview of BEVControl. It takes inputs as an edit-friendly BEV sketch $S$ , multi-view noisy images $\mathcal{Z}_t$ and text prompt, generating multi-view images $\mathcal{Z}_0$ . BEVControl is a UNet structure generative network composed of a sequence of modules. Each module has two elements, controller and coordinator. Each controller takes input from BEV sketch features extracted from the projection module. See Fig. 3 for more details. Text features are encoded cross-attention as in [22]. (b) Details of Controller. A controller module takes in the foreground and background location information of the camera views sketch in a self-attention manner and outputs the geometry-consistent street view features $\mathcal{G}_t$ concerning the BEV sketch $S$ . (c) Details of Coordinator. A coordinator module leverages a novel cross-view-cross-element attention mechanism that enables context interaction across views, outputting the appearance-consistent street view features $\mathcal{A}_t$ .
|
| 46 |
+
|
| 47 |
+
# 2. Related Work
|
| 48 |
+
|
| 49 |
+
The literature on image generation is vast [22, 26]. We mainly survey the approaches to the conditional generation of images with visual consistency. These approaches are closely relevant to our work because they also leverage various types of image information to control the image contents.
|
| 50 |
+
|
| 51 |
+
Image Generation via Multi-modal Information The recent progress in image generation is primarily attributed to the generative networks pre-trained on large-scale image data. Amidst a broad range of generative networks, the family of diffusion models [19, 21-23] lead a fashion of using multi-modal information for generating the image contents. The latent diffusion model [22] is a framework for producing images based on text. Note that the text-based information roughly specifies the image contents, disallowing a fine-grained control of the image contents. To address the above problem, the multi-modal image information like layout images [7, 14, 32, 36], semantic segmentation maps [2, 3, 9, 18, 29, 31], object sketches [4, 12, 18, 28, 33],
|
| 52 |
+
|
| 53 |
+
and depth images [12, 33] have been involved for hinting the image generation.
|
| 54 |
+
|
| 55 |
+
Generally, the above methods concentrate on generating a single image, where the image contents' semantic categories are aligned with the hints. This paper considers a more complex setting where the street-view image contents of multiple perspectives are generated. In addition to the semantic categories, we should accurately control the geometric properties of the generated street-view images. This goal is non-trivial, especially when the geometric patterns of the foreground and background contents are diverse. To achieve this goal, we resort to the appropriate modalities for controlling the foreground and background contents, thus enhancing the controlling power of the generative network.
|
| 56 |
+
|
| 57 |
+
Multi-view Image Generation with Visual Consistency The visual consistency is a natural property of the authentic images of multiple views. Similarly, we should preserve the visual consistency across multi-view images generated by the deep network. For this purpose, MVD-iffusion [26] uses the cross-view attention mechanism to
|
| 58 |
+
|
| 59 |
+
create panoramic images from text, maintaining the global correspondence of multi-view images. The video generation methods [8, 13, 30, 34] use the temporal cross-frame attention to preserve the visual consistency across distinct views of image contents at different moments. BEVGen [24] is a contemporary work that generates multi-view images of the street based on the BEV segmentation layout. It employs an auto-regressive transformer with cross-view attention to maintain visual consistency across multi-view images.
|
| 60 |
+
|
| 61 |
+
The above methods usually work well when the global appearances of multi-view images coincide. But they are less effective for preserving the multi-view consistency when more accurate control of the individual contents (e.g., the orientations of different cars) is desired. This is because the independent operations of content control easily lead to inconsistency across other contents in the same venue. In contrast to the existing methods, we propose cross-view cross-object attention, which remarkably augments the visual consistency of the generated multi-view images.
|
| 62 |
+
|
| 63 |
+
# 3. BEVControl
|
| 64 |
+
|
| 65 |
+
We illustrate the overall architecture of BEVControl in Figure 2. Following the LDM [22], BEVControl is a classic UNet structure consisting of an encoder and a decoder. They are composed of three modules stacked multiple times: controller, coordinator, and text cross-attention. We process all image features in the latent space, so the image features below specifically refer to those in the latent space.
|
| 66 |
+
|
| 67 |
+
At first, BEVControl takes the edit-friendly BEV sketch $S \in \mathbb{R}^{K \times K \times 5}$ , text description, and street-view noisy images $\mathcal{Z}_t = \{\mathbf{Z}_t^v \in \mathbb{R}^{H \times W \times C} \mid v = 1, \dots, V\}$ as input. Here, $V$ denotes the number of perspective views. All sets denoted by $\{\cdot\}$ represent $V$ viewpoints in the method below to foster better readability. $S$ is an editable canvas, which supports editing the objects within a $160 \times 160$ -meter range around the ego car. The five channels of $S$ represent the background sketch (road line), pixel coordinates of the box's center point, text label, and heading of foreground objects, respectively. In training, $\mathcal{Z}_t$ is a noisy version of street-view authentic images $\mathcal{Z}_0$ by forward diffusion process of [22]. In inference, $\mathcal{Z}_t$ is street-view noise sampled from $\mathcal{N}(0, \mathbf{I})$ . $H, W$ , and $C$ expressly represent the spatial resolutions and channels of latent features.
|
| 68 |
+
|
| 69 |
+
BEVControl first projects the BEV sketch $\mathcal{S}$ onto the 2D camera space as shown in Figure 3, computing a set of camera foreground conditions $\mathcal{M} = \{\mathbf{b}^v,\mathbf{l}^v,\mathbf{h}^v\}$ and background conditions $\mathcal{R} = \{\mathbf{R}^v\}$ of all view, which details see Sec. 3.1. Then we encode camera foreground and background conditions into a set of camera foreground and background embedding $\mathcal{F} = \{\mathbf{F}^v\in \mathbb{R}^{N\times C}\}$ and $\mathcal{B} = \{\mathbf{B}^v\in \mathbb{R}^{(H\times W)\times C}\}$ , where $N$ denotes the number of bounding boxes in each view. Through the Controller, each perspective can obtain semantic control in
|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
Figure 3. The camera projection process from BEV sketch.
|
| 73 |
+
|
| 74 |
+
formation from the foreground and the background embedding of the corresponding camera view. This process results in the generation of geometry-consistent street-view latent features $\mathcal{G}_t = \{\mathbf{G}_t^v\in \mathbb{R}^{H\times W\times C}\}$ . Next, the geometry-consistent features $\mathcal{G}_t$ are fed into Coordinator. The Coordinator employs a novel cross-view-cross-element attention mechanism to enhance adjacent views' consistency, yielding the appearance-consistent street-view latent features $\mathcal{A}_t = \{\mathbf{A}_t^v\in \mathbb{R}^{H\times W\times C}\}$ .
|
| 75 |
+
|
| 76 |
+
Then BEVControl employs the cross-attention mechanism of the diffusion model to handle the text prompt, allowing us to control the generated images' environmental factors (e.g., weather and lighting conditions). Then BEVControl repeats the execution of this UNet formed by stacking these three blocks $T$ times. Eventually, the output is the generated street-view images $\mathcal{Z}_0 = \{\mathbf{Z}_0^v \in \mathbb{R}^{H \times W \times C}\}$ , which are geometry-consistent, appearance-consistent and caption-aligned.
|
| 77 |
+
|
| 78 |
+
# 3.1. Controller
|
| 79 |
+
|
| 80 |
+
Based on the internal and external parameters of different cameras, we project the foreground and background classes of the BEV sketch $S$ onto the corresponding pixel coordinate system to obtain the camera foreground conditions $\mathcal{M}$ and background conditions $\mathcal{R}$ .
|
| 81 |
+
|
| 82 |
+
We define the camera foreground conditions as $\mathcal{M} = \{\mathbf{b}^v,\mathbf{l}^v,\mathbf{h}^v\}$ , where $\mathbf{b}^v\in [0,1]^{N\times 4},\mathbf{l}^v$ and $\mathbf{h}^v\in [-180,180)^{N\times 1}$ denotes the normalized pixel coordinates of the upper left and lower right corners, text label and heading degree of $N$ boxes in the current perspective. The camera background conditions $\mathcal{R} = \{\mathbf{R}^v\in \mathbb{R}^{H\times W\times 3}\}$ , which are spatially aligned with the authentic camera images, representing the trend of the road. Then we extract the camera foreground embedding $\mathcal{F} = \{\mathbf{F}^v\in \mathbb{R}^{N\times C}\}$ and background
|
| 83 |
+
|
| 84 |
+
embedding $\mathcal{B} = \{\mathbf{B}^v\in \mathbb{R}^{(H\times W)\times C}\}$ as below:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\mathbf {F} ^ {v} = \operatorname {l i n e a r} (\operatorname {f e} (\mathbf {b} ^ {v}) + \operatorname {c t e} (\mathbf {l} ^ {v}) + \operatorname {f e} (\mathbf {h} ^ {v})),
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
(1)
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
\mathbf {B} ^ {v} = \operatorname {c n n} \left(\mathbf {R} ^ {v}\right),
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
where fe denotes Fourier Embedder [17], cte denotes CLIP Text Encoder [20], and cnn denotes a pre-trained CNN network [16]. Based on the existing extensive pre-trained diffusion model [14,22], we inject the foreground and background embedding $\mathcal{F}$ and $\mathcal{B}$ by adding two trainable self-attention layer to the UNet architecture. The calculation formula is shown below:
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\mathbf {G} _ {t} ^ {v} = \mathbf {Z} _ {t} ^ {v} + \alpha \cdot \operatorname {s a} ([ \mathbf {Z} _ {t} ^ {v}, \mathbf {F} ^ {v} ]) + \beta \cdot \operatorname {s a} ([ \mathbf {Z} _ {t} ^ {v}, \mathbf {B} ^ {v} ]), \tag {2}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
where $[\cdot]$ denotes the concatenation operation. sa denotes the self-attention block. $\alpha$ and $\beta$ are trainable parameters initialized to 0. The introduced self-attention layer can effectively find the mapping relationship between visual latent features and various camera condition embedding. Therefore, the controller can utilize spatial hints to output a set of latent features $\mathcal{G}_t = \{\mathbf{G}_t^v\in \mathbb{R}^{H\times W\times C}\}$ , which geometry is consistent with the corresponding camera foreground and background conditions.
|
| 103 |
+
|
| 104 |
+
# 3.2. Coordinator
|
| 105 |
+
|
| 106 |
+
Taking $\mathcal{G}_t = \{\mathbf{G}_t^v\}$ as input, we employ the Coordinator to enhance the consistency of different views and make them look like somebody capture them from the same scene.
|
| 107 |
+
|
| 108 |
+
Specifically, we propose a novel cross-view-cross-element attention mechanism that enables context interaction between the different views. Sufficient context interaction makes the semantics of visual elements in various perspectives uniform. According to the characteristics of ring-shaped cameras, each camera has the highest correlation with its adjacent cameras. Therefore, we carefully design each view to interact only with the contextual information of adjacent views, reducing the demand for computing resources. In particular, we let all camera views learn the context of their adjacent views in parallel. The context comprises two layers of information: global level and local level. The global level represents the entire latent feature of the previous perspective, while the local level refers to the specific element feature. Taking adjacent view $v$ and $u$ as an example, the learning context is $\mathbf{k}$ , $\mathbf{v}$ as shown below for view $v$ :
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\mathbf {q} = \operatorname {l i n e a r} \left(\mathbf {G} _ {t} ^ {v}\right),
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\mathbf {k} = \operatorname {l i n e a r} \left(\left[ \mathbf {G} _ {t} ^ {u}, \mathbf {F} ^ {u}, \mathbf {B} ^ {u} \right]\right), \tag {3}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathbf {v} = \operatorname {l i n e a r} \left(\left[ \mathbf {G} _ {t} ^ {u}, \mathbf {F} ^ {u}, \mathbf {B} ^ {u} \right]\right),
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where linear modules above are independent of each other, and we set $u = 1$ , $v = V$ or $v = u + 1$ to enforce $u$ and
|
| 123 |
+
|
| 124 |
+
$v$ as the adjacent views. The context interaction process of adjacent views is formulated as:
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\mathbf {A} _ {t} ^ {v} = \mathbf {G} _ {t} ^ {v} + \mathbf {v} ^ {\top} \cdot \operatorname {s o f t m a x} (\mathbf {k} \cdot \mathbf {q} ^ {\top}). \tag {4}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
We perform the above operation on all views in parallel, resulting in a set of street-view latent feature $\mathcal{A}_t = \{\mathbf{A}_t^v\in$ $\mathbb{R}^{H\times W\times C}\}$ . By interacting between the global and local levels, global information, such as environmental conditions and weather, and local information, such as object appearance and identity, can be transmitted from the previous to the following perspective. Thus the cross-view-cross-element attention effectively improves the appearance consistency of the street-view images.
|
| 131 |
+
|
| 132 |
+
# 3.3. Training Objective
|
| 133 |
+
|
| 134 |
+
By repeatedly applying the above UNet $\epsilon_{\theta}$ in the latent space, we can obtain street-view images with gradually reduced noise. By adding $t$ step noise $\epsilon$ to the original clear images $\mathcal{Z}_0$ , we obtain a noisy version $\mathcal{Z}_t$ of the images. We train $\epsilon_{\theta}$ to predict the noise we added. Following the training objective of the original LDM [22], we finetune the pretrained diffusion model [14] to adapt to new conditions $c$ (e.g. BEV sketch and text prompt):
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\min _ {\theta} \mathcal {L} = \mathbb {E} _ {\mathcal {Z} _ {0}, \epsilon \sim \mathcal {N} (\mathbf {0}, \mathbf {I}), t, c} \left[ \| \epsilon - \epsilon_ {\theta} \left(\mathcal {Z} _ {t}, t, c\right) \| _ {2} ^ {2} \right], \tag {5}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
where time step $t$ is uniformly sampled from $[1, T]$ , and $\theta$ refers to the newly added layer in the UNet. We only train the newly introduced layer while freezing the layers of the original diffusion model. This approach reduces memory consumption and avoids knowledge forgetting and model collapsing.
|
| 141 |
+
|
| 142 |
+
# 4. Evaluation Metrics for Content Controlling
|
| 143 |
+
|
| 144 |
+
Recent street view image generation works [24] only evaluate the generation quality based on scene-level metrics such as FID, vehicle mIoU, and road mIoU. However, we found that using only these metrics cannot evaluate the true generation ability of the generative network. As shown in the Figure 4, the reported qualitative and quantitative results simultaneously indicate that several sets of generated street view images with similar FID scores have vastly different fine-grained control abilities over foreground and background. Therefore in this section, we introduce the evaluation metrics in our experiment for measuring the controlling power of the generative network.
|
| 145 |
+
|
| 146 |
+
Evaluation Metrics for Realism, Diversity, and Consistency Given the street-view images $\{\mathbf{X}_v' \mid v = 1, \dots, V\}$ from $V$ perspectives output by the image decoder of CLIP [20], we use the Frechet Inception Distance (FID) [10] to measure the realism and diversity of the generated street-view images. We compute the FID between the latent features of the generated and real images, which capture the
|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
Figure 4. Comparison of detail evaluation metrics.
|
| 150 |
+
|
| 151 |
+
same perspective's foreground and background contents. Here, we employ the Inception-V3 network [25] to extract the latent features of the generated and real images. We compute the average FID score $S_{\mathrm{FID}} \in \mathbb{R}$ as:
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
S _ {\mathrm {F I D}} = \frac {1}{V} \sum_ {v = 1} ^ {V} \operatorname {f i d} \left(\sigma \left(\mathbf {X} _ {v}\right), \sigma \left(\mathbf {X} _ {v} ^ {\prime}\right)\right). \tag {6}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+
$\{\mathbf{X}_v \in \mathbb{R}^{H \times W \times 3} \mid v = 1, \dots, V\}$ are the real images. We denote $\sigma$ as the Inception-V3 network. $\sigma(\mathbf{X}_v), \sigma(\mathbf{X}_v') \in \mathbb{R}^C$ are the latent features of the $v^{th}$ perspective's generated and real images. A lower FID score $S_{\mathrm{FID}}$ means that the generated contents are more realistic and diverse.
|
| 158 |
+
|
| 159 |
+
To evaluate the visual consistency between the generated street-view images, we compute the CLIP score [20], based on the latent features of the overlap between the adjacent perspectives of the generated street-view images. We calculate the CLIP score $S_{\mathrm{CLIP}} \in \mathbb{R}$ as:
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
S _ {\mathrm {C L I P}} = \frac {1}{V} \sum_ {u, v} \operatorname {c l i p} \left(\psi \left(\mathbf {X} _ {u} ^ {\prime}\right), \psi \left(\mathbf {X} _ {v} ^ {\prime}\right)\right), \tag {7}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
s. t., u = 1, v = V \text {o r} v = u + 1,
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
where $\mathbf{X}_u^{\prime},\mathbf{X}_v^{\prime}$ are the generated street-view images of the
|
| 170 |
+
|
| 171 |
+
adjacent perspectives. We denote $\psi (\mathbf{X}_{v - 1}^{\prime}),\psi (\mathbf{X}_v^{\prime})\in \mathbb{R}^{C}$ as latent features of the overlap between $\mathbf{X}_u^\prime ,\mathbf{X}_v^\prime$ . A higher CLIP score means satisfactory visual consistency.
|
| 172 |
+
|
| 173 |
+
Evaluation Metrics for Foreground and Background Controlling We employ the official detection metrics of the nuScenes dataset, i.e., the mean average precision (mAP), the nuScenes detection score (NDS), and the mean average orientation error (mAOE), for measuring the foreground controlling score. We denote the scores of mAP, NDS, and mAOE as $S_{\mathrm{AP}}$ , $S_{\mathrm{NDS}}$ , and $S_{\mathrm{AOE}}$ . Specifically, based on the generated street-view images $\{\mathbf{X}_v' \mid v = 1,\dots,V\}$ , we use BEVFormer [15] trained on the nuScenes dataset to detect the foreground objects on the BEV layouts. We achieve the scores $S_{\mathrm{AP}}$ , $S_{\mathrm{NDS}}$ , and $S_{\mathrm{AOE}}$ of foreground object detection by comparing the detection results with the BEV layout used for generating the street-view images. We use CVT [37] trained on the nuScenes dataset to segment the foreground on the BEV layouts and report the foreground mean intersection-over-union (fIoU) performance denoted as $S_{\mathrm{floU}}$ . To evaluate the background controlling power of the generative network, we employ CVT to segment the background contents on the BEV layouts and report the performance in terms of the background mean intersection-over-union (bIoU) denoted as $S_{\mathrm{bloU}}$ .
|
| 174 |
+
|
| 175 |
+
We remark that higher scores of $S_{\mathrm{AP}}$ , $S_{\mathrm{NDS}}$ , $S_{\mathrm{fIoU}}$ , $S_{\mathrm{bIoU}}$ , and a lower score of $S_{\mathrm{AOE}}$ mean a good controlling power of the generative network, which produces the foreground and background contents corresponding to the ground-truth annotations in the BEV layouts.
|
| 176 |
+
|
| 177 |
+
Overall Evaluation Metric We propose a combinatorial metric to summarize the above metrics that measure the controlling power of the generative network from separate aspects. We name this combinatorial metric as the overall controlling score (OCS) denoted $S_{\mathrm{OCS}}$ . We compute $S_{\mathrm{OCS}}$ as:
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
S _ {\mathrm {O C S}} = \frac {U _ {\mathrm {F I D}}}{S _ {\mathrm {F I D}}} + \frac {S _ {\mathrm {C L I P}}}{U _ {\mathrm {C L I P}}} + \frac {S _ {\mathrm {N D S}}}{U _ {\mathrm {N D S}}} + \frac {S _ {\mathrm {f l o U}}}{U _ {\mathrm {f l o U}}} + \frac {S _ {\mathrm {b l o U}}}{U _ {\mathrm {b l o U}}}. \tag {8}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
The scores $\{S_{\mathrm{FID}}, S_{\mathrm{CLIP}}, S_{\mathrm{NDS}}, S_{\mathrm{floU}}, S_{\mathrm{bIoU}}\}$ are achieved by using BEVFormer to detect and CVT to segment street-view contents on the BEV layouts, according to the generated images. We define another set of reference scores $\{U_{\mathrm{FID}}, U_{\mathrm{CLIP}}, U_{\mathrm{NDS}}, U_{\mathrm{floU}}, U_{\mathrm{bIoU}}\}$ , which are detection and segmentation performances based on the authentic images. A high score of $S_{\mathrm{OCS}}$ means the entire controlling power is strong.
|
| 184 |
+
|
| 185 |
+
# 5. Experimental Results
|
| 186 |
+
|
| 187 |
+
# 5.1. Dataset
|
| 188 |
+
|
| 189 |
+
We use the public nuScenes dataset [5] to examine the effectiveness of our method. nuScenes contains 1,000 examples of street-view scenes. There are 700/150/150 train
|
| 190 |
+
|
| 191 |
+
<table><tr><td rowspan="3">Method</td><td>Real & Diverse</td><td>Consistency</td><td colspan="4">Foreground Control</td><td>Background Control</td><td rowspan="3">\( S_{OCS} \uparrow \)</td></tr><tr><td rowspan="2">\( S_{FID} \downarrow \)</td><td rowspan="2">\( S_{CLIP} \uparrow \)</td><td colspan="3">Detection</td><td>Segmentation</td><td>Segmentation</td></tr><tr><td>\( S_{AP} \uparrow \)</td><td>\( S_{NDS} \uparrow \)</td><td>\( S_{AOE} \downarrow \)</td><td>\( S_{floU} \uparrow \)</td><td>\( S_{bIoU} \uparrow \)</td></tr><tr><td>Reference-score</td><td>0.01</td><td>87.96</td><td>36.04</td><td>44.10</td><td>0.42</td><td>34.83</td><td>74.33</td><td>5.00</td></tr><tr><td>BEVGen [24]</td><td>25.54</td><td>-</td><td>-</td><td>-</td><td>-</td><td>5.89</td><td>50.20</td><td>-</td></tr><tr><td>LayoutDiffusion [36]</td><td>29.64</td><td>79.80</td><td>3.68</td><td>14.68</td><td>1.31</td><td>15.51</td><td>35.31</td><td>2.16</td></tr><tr><td>GLIGEN [14]</td><td>31.34</td><td>78.80</td><td>15.42</td><td>22.35</td><td>1.22</td><td>22.02</td><td>38.12</td><td>2.55</td></tr><tr><td>BEVControl</td><td>24.85(↓ 6.49)</td><td>82.70(↑ 3.9)</td><td>19.64(↑ 4.22)</td><td>28.68(↑ 6.33)</td><td>0.78(↓ 0.44)</td><td>26.80(↑ 4.78)</td><td>60.80(↑ 22.68)</td><td>3.18(↑ 0.63)</td></tr></table>
|
| 192 |
+
|
| 193 |
+
Table 1. We compare BEVControl with state-of-the-art methods on the validation subset of nuScenes. The results measure the controlling power of different methods. $\downarrow / \uparrow$ means a smaller/larger value of the metric represents a better performance.
|
| 194 |
+
|
| 195 |
+
ing/validation/testing examples. Each example records about 40 frames of BEV layouts. Each frame of the BEV layout is associated with six street-view RGB images, which are captured by an ego vehicle's side, front, and back cameras. We follow the convention [24] to sample 600 frames of BEV layouts from the validation set, forming a validation subset to evaluate our method.
|
| 196 |
+
|
| 197 |
+
The objects in each BEV layout are annotated as the foreground and background. For object detection, the foreground includes ten categories (i.e., car, bus, truck, trailer, motorcycle, bicycle, construction vehicle, pedestrian, barrier, and traffic cone), while the background is the road. For object segmentation, the categories of car, bus, truck, trailer, motorcycle, bicycle, and construction vehicle are merged into the vehicle category. Thus, each BEV layout contains the binary categories of vehicle and road.
|
| 198 |
+
|
| 199 |
+
# 5.2. Visual Element Control
|
| 200 |
+
|
| 201 |
+
In Table 1, we compare BEVControl with the recent methods [14,24,36], which can also generate the street-view images based on the BEV layout. Given a BEV layout, each method generates a set of street-view images from 6 perspectives. The results in Table 1 measure the quality of the generated images and the controlling power of the compared methods on the background foreground objects. We also report the performance improvement of BEVControl relative to the GLIGEN [14] in the last row. BEVControl achieves a higher OGS than other methods (see the right-most column of Table 1). Below, we evaluate the detailed performances of controlling various visual elements.
|
| 202 |
+
|
| 203 |
+
Realism and Diversity In Table 1 "Real & Diverse", we measure the realism and diversity of the image data generated by different methods in terms of the Frechet Inception Distance (FID). BEVControl achieves 24.85 FID, outperforming other methods. We also compare the street-view images generated differently in Figure 5 and 6, where BEVControl produces a higher image quality than the compared methods.
|
| 204 |
+
|
| 205 |
+
Foreground Control In Table 1 "Foreground Control", we examine the controlling power of different methods on the foreground objects. In this examination, we use BEVFormer [15] to detect the ten categories of foreground objects on the BEV layouts, reporting the performance in terms of mAP, NDS, and mAOE. We use CVT [37] to segment the foreground (i.e., vehicle) on the BEV layouts, whose performance is reported in terms of mIoU. In the first row of Table 1 "Reference-score", we report the detection performances of BEVFormer and segmentation performances of CVT on the original validation subset. These results can be regarded as the Reference-score performance of BEVFormer and CVT for measuring the controlling power of BEVControl.
|
| 206 |
+
|
| 207 |
+
Based on the data generated by BEVControl, BEVFormer and CVT achieve better detection and segmentation performances than other generative models. It demonstrates a more substantial controlling power of BEVControl on the foreground objects. We compare the generated foreground objects by different methods in Figure 5, where BEVControl satisfactorily yields the foreground objects according to the ground-truth annotations. Furthermore, Figure 7 demonstrates the generation capability of BEVControl for user-drawn BEV sketches of different vehicle orientations.
|
| 208 |
+
|
| 209 |
+
Background Control In Table 1 "Background Control", we study the controlling power of different methods on the background objects. Again, we use the trained CVT, which uses the generated street-view images to segment the background (i.e., road) on the BEV layouts. We report the segmentation accuracy mIoU on the road category. We also compare the generated roads by different methods in Figure 6. Compared to the street-view images generated by other methods, those generated by BEVControl lead to a better segmentation accuracy, which means a more substantial controlling power of BEVControl on the background objects. Additionally, Figure 8 displays the generation capability of BEVControl for user-edited BEV sketches of different road traffic situations.
|
| 210 |
+
|
| 211 |
+
<table><tr><td rowspan="2">Controller</td><td colspan="2">FC</td><td>BC</td><td rowspan="2">Socs ↑</td></tr><tr><td>SNDS ↑</td><td>SfloU ↑</td><td>SbIoU ↑</td></tr><tr><td>Reference-score</td><td>44.10</td><td>34.83</td><td>74.33</td><td>5.00</td></tr><tr><td>foreground</td><td>25.23</td><td>22.50</td><td>41.70</td><td>2.69</td></tr><tr><td>background</td><td>3.70</td><td>3.53</td><td>49.71</td><td>1.74</td></tr><tr><td>both w/o separation</td><td>26.87</td><td>23.78</td><td>52.30</td><td>2.90</td></tr><tr><td>both w/ separation</td><td>28.68</td><td>26.80</td><td>60.80</td><td>3.18</td></tr></table>
|
| 212 |
+
|
| 213 |
+
Table 2. Different strategies of using the foreground and background hints for controlling the visual elements. The evaluation metrics (i.e., FID, scores of foreground and background control) reported in this table are the same to those in Table 1.
|
| 214 |
+
|
| 215 |
+
# 5.3. Ablation Study on Controller
|
| 216 |
+
|
| 217 |
+
The controller of BEVControl regards the bounding boxes and the road sketches as hints. The diffusion model uses these hints to generate the foreground and background objects in the street-view images. Here, we experiment with different strategies of using the foreground and background hints to examine their effect on controlling the visual elements in the generated street-view images. We report the quantitative results in Table 2.
|
| 218 |
+
|
| 219 |
+
First, we use the foreground or background hint only, reporting the scores of FID, foreground control (FC), and background control (BC) in the second and third rows of Table 2. Without foreground or background hint, we degrade the realism and diversity of the generated images, while the score of foreground or background control also decreases consequently. These results demonstrate the importance of using the foreground and background hints together.
|
| 220 |
+
|
| 221 |
+
Next, we compare various strategies for using both foreground and background hints. We evaluate an alternative method that employs the foreground and background hints without separately controlling the visual elements. We use a single attention layer to jointly embed the foreground and background hints into latent space. The controller uses the latent embedding of these hints for outputting an information-controlled feature map. The coordinator relies on the information-controlled feature map to generate the street-view images. We report the foreground or background control scores in the fourth row of Table 2. Though the scores are higher than those achieved using the foreground or background hint alone, they still lag behind the results of the entire controller in the fifth row. The complete controller uses separate network streams to enable a more focused control of the foreground and background objects.
|
| 222 |
+
|
| 223 |
+
# 5.4. Ablation Study on Coordinator
|
| 224 |
+
|
| 225 |
+
The coordinator utilizes the CVCE attention to enhance the visual consistency between the generated street-view images. Here, we use the street-view images from various perspectives to compute the CLIP score for measuring visual
|
| 226 |
+
|
| 227 |
+
<table><tr><td>Coordinator</td><td>\( S_{CLIP} \uparrow \)</td></tr><tr><td>Reference-score</td><td>87.96</td></tr><tr><td>w/o coordinator</td><td>79.50</td></tr><tr><td>w/ CV, w/o CE</td><td>82.30</td></tr><tr><td>w/ CVCE</td><td>82.70</td></tr></table>
|
| 228 |
+
|
| 229 |
+
Table 3. Different strategies of using the coordinator for yielding the street-view images from different perspectives. We report the results as CLIP scores, which measure the visual consistency of the street-view images.
|
| 230 |
+
|
| 231 |
+
consistency. In Table 3, we compare the CLIP scores for the street-view images generated by different alternatives.
|
| 232 |
+
|
| 233 |
+
We remove the coordinator from BEVControl, which has the controller alone. This alternative significantly degrades the visual consistency of the generated street-view images (see the CLIP score in the second row of Table 3). We improve the CLIP score by adding the coordinator with the cross-view attention but without the cross-element attention (see the third row). This result demonstrates the positive impact of cross-view attention on visual consistency.
|
| 234 |
+
|
| 235 |
+
# 5.5. Data Augmentation for Object Detection
|
| 236 |
+
|
| 237 |
+
Based on each BEV layout from the training set of the nuScenes dataset, we again employ BEVControl to generate a set of street-view images. Note that the BEV layout and the generated street-view images can be used together for augmenting the training set of nuScenes. We use the generated data for training BEVFormer for object detection on the BEV layout from the train subset. We report the performances of object detection in Table 4. Compared to the BEVFormer trained without data augmentation (see the first row), the counterpart with data augmentation yields better results (see the second row).
|
| 238 |
+
|
| 239 |
+
<table><tr><td>Method</td><td>SAP↑</td><td>SNDS↑</td><td>SAOE↓</td></tr><tr><td>w/o augmentation</td><td>37.00</td><td>47.90</td><td>0.66</td></tr><tr><td>w/ augmentation</td><td>38.96</td><td>49.19</td><td>0.42</td></tr></table>
|
| 240 |
+
|
| 241 |
+
Table 4. Application of using the generated street-view images for augmenting the training data. We report the detection performances of BEVFormer on the validation set.
|
| 242 |
+
|
| 243 |
+
# 6. Conclusion
|
| 244 |
+
|
| 245 |
+
Given the BEV layout as the hint, the most advanced generative networks can synthesize the street-view images with realistic and diverse appearances, thus enriching the data for training the BEV perception model and profiting the autonomous driving. This paper advocates the significance of strengthening the controlling power of the generative
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
Figure 5. The visualization of foreground controlling generation. Compared to other methods, ours can generate objects that correspond more closely to the bounding box sketch conditions, especially the accurate orientation.
|
| 251 |
+
|
| 252 |
+
network for BEV perception. We propose a novel generative network, BEVControl, which relies on the sketches of the BEV layout to synthesize the background and foreground elements in the street-view images. By depending on more focused hints, BEVControl enables accurate control of the background and foreground elements, whose visual consistency across multiple perspectives is maintained by the cross-view-cross-element attention. Compared to the contemporary methods, a better controlling power allows
|
| 253 |
+
|
| 254 |
+
BEVControl to yield richer data for BEV perception.
|
| 255 |
+
|
| 256 |
+
In future work, we will investigate how to better control more kinds of visual elements like lighting and weather in the generated images rather than the background and foreground only. In addition to generating street-view images, we will also study how to transfer the idea of BEVControl to create more general scenes.
|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
Figure 6. The visualization of background controlling generation. Compared to other methods, ours can generate street views that correspond more closely to the road sketch conditions.
|
| 260 |
+
|
| 261 |
+
# References
|
| 262 |
+
|
| 263 |
+
[1] Adil Kaan Akan and Fatma Güney. Stretchbev: Stretching future instance prediction spatially and temporally. In European Conference on Computer Vision, pages 444-460. Springer, 2022. 2
|
| 264 |
+
[2] Omri Avrahami, Thomas Hayes, Oran Gafni, Sonal Gupta, Yaniv Taigman, Devi Parikh, Dani Lischinski, Ohad Fried, and Xi Yin. Spatext: Spatio-textual representation for controllable image generation. In Proceedings of the IEEE/CVF
|
| 265 |
+
|
| 266 |
+
Conference on Computer Vision and Pattern Recognition, pages 18370-18380, 2023. 3
|
| 267 |
+
[3] Omer Bar-Tal, Lior Yariv, Yaron Lipman, and Tali Dekel. Multidiffusion: Fusing diffusion paths for controlled image generation. 2023. 3
|
| 268 |
+
[4] Dina Bashkirova, José Lezama, Kihyuk Sohn, Kate Saenko, and Irfan Essa. Masksketch: Unpaired structure-guided masked image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1879-1889, 2023. 3
|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
Figure 7. The visualization of foreground controlling generation in various vehicle orientation.
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
Figure 8. The visualization of background controlling generation in various road sketch.
|
| 275 |
+
|
| 276 |
+
[5] Holger Caesar, Varun Bankiti, Alex H Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11621-11631, 2020. 2, 6
|
| 277 |
+
[6] Huiwen Chang, Han Zhang, Lu Jiang, Ce Liu, and William T Freeman. Maskgit: Masked generative image transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11315-11325, 2022. 2
|
| 278 |
+
[7] Jiaxin Cheng, Xiao Liang, Xingjian Shi, Tong He, Tianjun Xiao, and Mu Li. Layoutdiffuse: Adapting foundational diffusion models for layout-to-image generation. arXiv preprint arXiv:2302.08908, 2023. 3
|
| 279 |
+
[8] Ernie Chu, Shuo-Yen Lin, and Jun-Cheng Chen. Video controlnet: Towards temporally consistent synthetic-to-real video translation using conditional image diffusion models. arXiv preprint arXiv:2305.19193, 2023. 4
|
| 280 |
+
[9] Cusuh Ham, James Hays, Jingwan Lu, Krishna Kumar Singh, Zhifei Zhang, and Tobias Hinz. Modulating pretrained diffusion models for multimodal image synthesis. arXiv preprint arXiv:2302.12764, 2023. 3
|
| 281 |
+
[10] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. 5
|
| 282 |
+
[11] Anthony Hu, Zak Murez, Nikhil Mohan, Sofia Dudas, Jeffrey Hawke, Vijay Badrinarayanan, Roberto Cipolla, and Alex Kendall. Fiery: Future instance prediction in bird's-eye view from surround monocular cameras. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15273-15282, 2021. 2
|
| 283 |
+
[12] Lianghua Huang, Di Chen, Yu Liu, Yujun Shen, Deli Zhao, and Jingren Zhou.Composer: Creative and controllable image synthesis with composable conditions. arXiv preprint arXiv:2302.09778, 2023. 3
|
| 284 |
+
[13] Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439, 2023. 4
|
| 285 |
+
[14] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 3, 5, 7
|
| 286 |
+
[15] Zhiqi Li, Wenhai Wang, Hongyang Li, Enze Xie, Chonghao Sima, Tong Lu, Yu Qiao, and Jifeng Dai. Bevformer: Learning bird's-eye-view representation from multi-camera images via spatiotemporal transformers. In European conference on computer vision, pages 1-18. Springer, 2022. 2, 6, 7
|
| 287 |
+
[16] Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11976-11986, 2022. 5
|
| 288 |
+
|
| 289 |
+
[17] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 5
|
| 290 |
+
[18] Chong Mou, Xintao Wang, Liangbin Xie, Jian Zhang, Zhonggang Qi, Ying Shan, and Xiaohu Qie. T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453, 2023. 3
|
| 291 |
+
[19] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021. 3
|
| 292 |
+
[20] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 5, 6
|
| 293 |
+
[21] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 2022. 3
|
| 294 |
+
[22] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 3, 4, 5
|
| 295 |
+
[23] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 3
|
| 296 |
+
[24] Alexander Swerdlow, Runsheng Xu, and Bolei Zhou. Street-view image generation from a bird's-eye view layout. arXiv preprint arXiv:2301.04634, 2023. 2, 4, 5, 7
|
| 297 |
+
[25] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2818-2826, 2016. 6
|
| 298 |
+
[26] Shitao Tang, Fuyang Zhang, Jiacheng Chen, Peng Wang, and Yasutaka Furukawa. Mvdiffusion: Enabling holistic multi-view image generation with correspondence-aware diffusion. arXiv preprint arXiv:2307.01097, 2023. 3
|
| 299 |
+
[27] Aaron Van Den Oord, Oriol Vinyals, et al. Neural discrete representation learning. Advances in neural information processing systems, 30, 2017. 2
|
| 300 |
+
[28] Andrey Voynov, Kfir Aberman, and Daniel Cohen-Or. Sketch-guided text-to-image diffusion models. arXiv preprint arXiv:2211.13752, 2022. 3
|
| 301 |
+
[29] Weilun Wang, Jianmin Bao, Wengang Zhou, Dongdong Chen, Dong Chen, Lu Yuan, and Houqiang Li. Semantic image synthesis via diffusion models. arXiv preprint arXiv:2207.00050, 2022.3
|
| 302 |
+
|
| 303 |
+
[30] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Weixian Lei, Yuchao Gu, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation. arXiv preprint arXiv:2212.11565, 2022.4
|
| 304 |
+
[31] Han Xue, Zhiwu Huang, Qianru Sun, Li Song, and Wenjun Zhang. Freestyle layout-to-image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14256-14266, 2023. 3
|
| 305 |
+
[32] Zhengyuan Yang, Jianfeng Wang, Zhe Gan, Linjie Li, Kevin Lin, Chenfei Wu, Nan Duan, Zicheng Liu, Ce Liu, Michael Zeng, et al. Reco: Region-controlled text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14246-14255, 2023. 3
|
| 306 |
+
[33] Lvmin Zhang and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. arXiv preprint arXiv:2302.05543, 2023. 3
|
| 307 |
+
[34] Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. Controlvideo: Training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077, 2023. 4
|
| 308 |
+
[35] Yunpeng Zhang, Zheng Zhu, Wenzhao Zheng, Junjie Huang, Guan Huang, Jie Zhou, and Jiwen Lu. **Reverse: Unified perception and prediction in birds-eye-view for vision-centric autonomous driving. arXiv preprint arXiv:2205.09743, 2022.2**
|
| 309 |
+
[36] Guangcong Zheng, Xianpan Zhou, Xuewei Li, Zhongang Qi, Ying Shan, and Xi Li. Layoutdiffusion: Controllable diffusion model for layout-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22490-22499, 2023. 3, 7
|
| 310 |
+
[37] Brady Zhou and Philipp Krahenbuhl. Cross-view transformers for real-time map-view semantic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13760-13769, 2022. 2, 6, 7
|
data/2023/2308_01xxx/2308.01661/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1824da809c37423c2dbeed2b36c7b38e1b50b7b6813304aa6fdc634c767df607
|
| 3 |
+
size 1142354
|
data/2023/2308_01xxx/2308.01661/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01681/71f011e8-922f-4d9b-bc52-116e72695568_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01681/71f011e8-922f-4d9b-bc52-116e72695568_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01681/71f011e8-922f-4d9b-bc52-116e72695568_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f423381a2d7174184b9214fad9ae253b45398a30ad32411ea9927bf6fe2cf006
|
| 3 |
+
size 12443993
|
data/2023/2308_01xxx/2308.01681/full.md
ADDED
|
@@ -0,0 +1,609 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NBIAS: A Natural Language Processing Framework for BiAs Identification in Text
|
| 2 |
+
|
| 3 |
+
Shaina Raza $^{a,\ast}$ , Muskan Garg $^{b}$ , Deepak John Reji $^{c}$ , Syed Raza Bashir $^{d}$ , Chen Ding $^{d}$
|
| 4 |
+
|
| 5 |
+
$^{a}$ Vector Institute for Artificial Intelligence, Toronto, ON, Canada $^{b}$ Artificial Intelligence & Informatics, Mayo Clinic, Rochester, MN, USA $^{c}$ Environmental Resources Management, Bengaluru, Karnataka, India $^{d}$ Toronto Metropolitan University, Toronto, ON, Canada
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Bias in textual data can lead to skewed interpretations and outcomes when the data is used. These biases could perpetuate stereotypes, discrimination, or other forms of unfair treatment. An algorithm trained on biased data may end up making decisions that disproportionately impact a certain group of people. Therefore, it is crucial to detect and remove these biases to ensure the fair and ethical use of data. To this end, we develop a comprehensive and robust framework NBIAS that consists of four main layers: data, corpus construction, model development and an evaluation layer. The dataset is constructed by collecting diverse data from various domains, including social media, healthcare, and job hiring portals. As such, we applied a transformer-based token classification model that is able to identify bias words/ phrases through a unique named entity $BIAS$ . In the evaluation procedure, we incorporate a blend of quantitative and qualitative measures to gauge the effectiveness of our models. We achieve accuracy improvements ranging from $1\%$ to $8\%$ compared to baselines. We are also able to generate a robust understanding of the model functioning. The proposed approach is applicable to a variety of biases and contributes to the fair and ethical use of textual data.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
The recent surge in Natural Language Processing (NLP) applications, encompassing fields from recommendation systems to social justice and employment screening, has sparked a critical concern - the emergence of bias within these systems [1]. Instances of racial and gender bias have been increasingly reported [2], indicating an urgent need for scrutiny. These biases often originate from the training data used in NLP models, and a majority of these large datasets harbor inherent biases. Regrettably, many NLP practitioners lack the necessary awareness or knowledge to effectively identify and address these biases, highlighting a significant gap in the field.
|
| 14 |
+
|
| 15 |
+
Furthermore, there is a notable lack of discussion on data specifics - its origin, generation, and pre-processing - in many NLP publications. Given these circumstances, the importance of addressing biases in NLP applications cannot be overstated. These biases, if unchecked, not only compromise the validity of the models, but can also have unfavorable and detrimental consequences. The objective of this research is to provide insights into the detection of bias in NLP datasets, contributing to the development of more equitable and unbiased Artificial Intelligence (AI) systems.
|
| 16 |
+
|
| 17 |
+
Bias in text data is a pervasive and deeply-rooted issue. The bias in data often stems from cognitive predispositions that influences our dialogues, views, and understanding of information [3]. This bias can be explicit which are often seen in discriminatory language targeting certain racial or ethnic groups [4], as in social media. Implicit bias [5], on the other hand, subtly perpetuates prejudice through unintentional language use but is equally harmful.
|
| 18 |
+
|
| 19 |
+
The necessity for unbiased, trustworthy text data has grown across sectors like healthcare [6], social media [4, 7], and recruitment [8]. This data is essential for training NLP models for various downstream tasks, like formulating healthcare diagnoses and treatment plans, handling discriminatory language on social media, and promoting fair recruitment practices. Figure 1 illustrates the complexities of biases in text data in various domains, including job hiring, social media, and healthcare. These biases are primarily conveyed through lexical choices [9] and demand sophisticated detection methods, motivating this research. The primary aim of this study is to further foundational research on the fairness and reliability of the textual data.
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
Figure 1: Visual Representation of Implicit and Explicit Biases in Textual Data: Examples from Job Hiring, Social Media, and Healthcare.
|
| 23 |
+
|
| 24 |
+
Although NLP has advanced much, the state-of-the-art techniques [2, 10, 11] often concentrate on bias detection in specific domains and lack generalizability. To address this, our research offers a generalizable bias detection method proven effective across the various domains. We present NBIAS, a comprehensive framework for detecting bias in text data. This involves data preparation where bias-indicative terms are marked using a transformer-based token classification method like Named Entity Recognition (NER).
|
| 25 |
+
|
| 26 |
+
Current NER solutions can manage general [12], biomedical [13], and social media [14] entities, but often neglect $BIAS$ as a separate entity. To address this, we introduce a new entity type, $BIAS$ , to identify biased terms in text data. In this context, bias refers to unfair and often harmful favoritism or prejudice towards a particular group, person, or idea, which can manifest through profanity, unjustified criticism, or discriminatory language.
|
| 27 |
+
|
| 28 |
+
A key contribution of this study is the development of the first comprehensive framework for bias detection in text data. This framework is based on latest language model technology and incorporates four crucial layers: data gathering, corpus construction, model development, and rigorous evaluation. The specific contributions of the work are as follows:
|
| 29 |
+
|
| 30 |
+
1. Development of Annotated Datasets: Acknowledging the scarcity of bias annotations in text-based data, we designed a solution by gen
|
| 31 |
+
|
| 32 |
+
erating multiple annotated datasets. Our work fills a critical gap in the available resources, thereby providing a solid foundation for future research in the realm of bias detection.
|
| 33 |
+
|
| 34 |
+
2. Semi-Autonomous Labeling: To alleviate the time-intensive manual annotation process, we pioneered a novel methodology termed "semi-autonomous labeling". This strategy provides a faster and more efficient way of annotating bias-related terms within textual content. This innovative approach has significant implications for improving the speed and accuracy of bias detection.
|
| 35 |
+
3. Unique Entity Type - $BIAS$ : In an effort to enhance the precision of bias identification within text, we introduced a unique entity type, $BIAS$ . This new entity type is specifically designed for detecting biased words and phrases within the text data. This has the potential to dramatically improve the process of bias identification and quantification in text-based analysis.
|
| 36 |
+
4. Comprehensive Evaluation Process: We subjected our proposed framework to a thorough evaluation process, utilizing both quantitative and qualitative analysis methods. The results confirm the reliability and efficiency of our approach, making it compatible for its application in real-world scenarios. This rigorous evaluation sets a benchmark for assessing the efficacy of bias detection methodologies.
|
| 37 |
+
|
| 38 |
+
# 2. Related Work
|
| 39 |
+
|
| 40 |
+
# 2.1. Identifying Bias in NLP
|
| 41 |
+
|
| 42 |
+
One of the key challenges associated with NLP systems lies in the presence of bias, a manifestation of unfair and systematic discrimination observed in their outcomes [15]. Moreover, the past studies [16, 10, 11, 2, 17] have shown the societal and cultural prejudices are deeply embedded within the training data due to the presence of bias. As such, the biases, whether explicit or implicit, can significantly impact the functionality of the NLP systems, leading to skewed results and perpetuating existing societal biases. Thus, the detection and mitigation of these biases are crucial to promoting fairness and inclusiveness within NLP systems [7, 11].
|
| 43 |
+
|
| 44 |
+
Researchers have proposed and implemented various strategies to identify bias, including employing statistical methods to discover patterns of bias within the training data [2, 18]. Under this approach, specific words or
|
| 45 |
+
|
| 46 |
+
phrases that appear to be disproportionately associated with certain demographic groups, such as genders or races, are identified. For example, certain adjectives might be used more frequently in descriptions of women than men [2], or vice versa. The identification and debiasing of such patterns can highlight areas of potential bias, providing a starting point for efforts to eliminate these biases [19].
|
| 47 |
+
|
| 48 |
+
The field of bias detection in NLP has seen a surge of innovative methods in recent years, primarily leveraging advanced machine learning techniques. One such study has considered the combination of a speech detection system with an explanatory method to identify potential bias [20]. In this method, not only is the system trained to detect instances of hate speech, but it also provides a rationale or explanation for its classification decisions. Another area of research that has attracted considerable attention is the investigation of bias in event detection datasets and models [21]. Event detection tasks involve identifying and classifying real-world events within text data. These tasks can be susceptible to a range of bias-related issues, including data sparsity, labeling task, and annotations.
|
| 49 |
+
|
| 50 |
+
Additionally, NLP techniques have been employed to address various aspects of bias. For instance, in a related study [22] on gender bias and sentiment towards political leaders in the news were quantified using word embeddings and sentiment analysis. Another work focused on investigating ableist bias in NLP systems, particularly at the intersection of gender, race, and disability [23]. Similarly, a methodology was proposed to eliminate gender bias from word embeddings [24]. Furthermore, marked attribute bias in natural language inference was identified and analyzed, with an evaluation of existing methods for bias mitigation [9]. These studies provide a deep understanding of the social and cultural factors that contribute to bias identification.
|
| 51 |
+
|
| 52 |
+
Another work [25] presents bias analysis in NLP beyond demographic bias, focusing on predicting interpersonal group relationships using fine-grained emotions. A related study [26] evaluates gender bias in NLP research, highlighting the lack of explicit gender theorization. In another work, authors [27] introduce an effective bias-conflicting scoring method and gradient alignment strategy to identify and mitigate dataset biases. Overall, these studies underscore the importance of continuous efforts in identifying and mitigating biases in models to ensure fairness and equity.
|
| 53 |
+
|
| 54 |
+
# 2.2. Named Entity Recognition (NER)
|
| 55 |
+
|
| 56 |
+
Named Entity Recognition (NER) is a significant task in NLP that is aimed at identifying and classifying named entities within textual data. NER is a token classification task that focuses on identifying and classifying named entities such as individuals, organizations, and locations within a given text. In the past, many traditional methods have been employed for NER, each with its unique characteristics and benefits.
|
| 57 |
+
|
| 58 |
+
- Rule-based methods rely on predefined sets of rules to identify named entities [28]. This method usually employs regular expressions or dictionary-based techniques to extract entities. Although rule-based methods can be effective for well-defined and specific contexts, their performance can decrease in the face of variability and ambiguity in language usage.
|
| 59 |
+
- Supervised learning methods leverage annotated data to train a model for NER [14, 29]. These methods use statistical models such as Support Vector Machines (SVM), Conditional Random Fields (CRF), and others to classify the named entities. The performance of supervised learning methods can be impressive, given sufficient high-quality annotated data.
|
| 60 |
+
- Deep learning methods, which are more contemporary approaches, utilize complex architectures like recurrent neural networks (RNNs) and transformer-based language models to extract named entities [30, 13]. These methods have shown promising results in NER tasks, owing to their capacity to capture intricate language patterns and contextual information.
|
| 61 |
+
|
| 62 |
+
A recent study introduced a contrastive learning-based approach for multimodal NER [31]. This approach leverages both textual and non-textual data to identify and classify named entities, harnessing the complementary information offered by different modalities to improve the model's performance. Another research work into event detection from social media posts, evaluating the effectiveness of a pre-trained NER model followed by graph-based spectral clustering [32]. The study also explored transformer-based methods to weight the edges of the graph for event detection, further refining the detection process. A span-based NER model eliminates the need for label
|
| 63 |
+
|
| 64 |
+
dependency [32]. This approach addresses the issue of cascade label misclassifications, a common challenge in traditional NER models that depend on label sequences.
|
| 65 |
+
|
| 66 |
+
While our work on token classification is inspired by these studies, we identify a notable gap in the literature: the existing seminal work does not recognize $BIAS$ as an entity. In this work, we detect biased expressions within unstructured texts, designating them under the 'BIAS' entity label.
|
| 67 |
+
|
| 68 |
+
# 2.3. Data Annotation
|
| 69 |
+
|
| 70 |
+
Data annotation is a crucial task in NLP as it involves labeling and categorizing information to extract valuable insights from text data [33]. By enriching text data with relevant metadata, such as part-of-speech tags, named entity tags, and sentiment tags, data annotation provides contextual information that is essential for subsequent analysis [34]. Quality annotated data enhances model learning, boosting prediction accuracy. In contrast, inadequate annotations impede learning, resulting in subpar performance. Various methods of data annotation cater to different requirements of speed, quality, and computational resources:
|
| 71 |
+
|
| 72 |
+
- Manual annotation is carried out by human annotators who carefully review and label the data. This method typically yields high-quality results, given the nuanced understanding that humans have of language. However, manual annotation is often time-consuming and labor-intensive, and its feasibility may be limited by the availability of qualified annotators and financial resources [28].
|
| 73 |
+
- Semi-automatic annotation combines manual efforts with automated tools to accelerate the annotation process and minimize human error. These tools can range from rule-based systems to pre-trained machine learning models [35]. While semi-automatic annotation can improve efficiency, its accuracy may still depend on the quality of the automated tools and the manual review process.
|
| 74 |
+
- Automatic annotation leverages machine learning models and algorithms to annotate text data without human intervention [36]. Although automatic annotation can process vast amounts of data in a relatively short time, its accuracy may be compromised, particularly for complex or ambiguous texts. Therefore, a common practice is to
|
| 75 |
+
|
| 76 |
+
combine automatic annotation with manual review to ensure data quality.
|
| 77 |
+
|
| 78 |
+
Various strategies have been developed to address these challenges and optimize the annotation process. One study presents a comprehensive comparison of different annotation tools, highlighting their strengths and limitations [37]. Another research work proposes a method for automatically generating high-quality labeled data for NER tasks by leveraging existing knowledge bases [38]. A similar study has developed an annotation framework that combines statistical machine translation and human annotation to create a parallel corpus [39]. Other researchers have investigated methods for improving the reliability and consistency of manual annotations, such as developing guidelines and protocols for annotation tasks [40] or implementing quality control mechanisms to ensure data quality.
|
| 79 |
+
|
| 80 |
+
Ultimately, the choice of annotation method and tools will depend on the specific requirements of a project, such as the desired level of accuracy, the available resources, and the nature of the data being annotated. To this end, we employ a semi-automatic annotation strategy, integrating human proficiency with semi-supervised learning methodologies.
|
| 81 |
+
|
| 82 |
+
# 3. Proposed Framework for Bias Identification in Texts
|
| 83 |
+
|
| 84 |
+
In this section, we present NBIAS, an innovative framework designed to detect biases within textual data, as illustrated in Figure 2. The NBIAS framework is structured into four distinct layers: (i) the data collection layer, (ii) the corpus construction layer, (iii) the model development layer, and (iv) the evaluation Layer. Each layer is designed to collaborate seamlessly with the others, providing an effective and comprehensive approach for detecting biases in textual content.
|
| 85 |
+
|
| 86 |
+
# 3.1. Data Layer
|
| 87 |
+
|
| 88 |
+
The Data Layer serves as the framework's primary interface with the data for analysis. It handles data collection, pre-processing and data consolidation from a variety of sources, such as social media, online articles, and databases. This layer ensure adaptability and high performance for the entire framework.
|
| 89 |
+
|
| 90 |
+

|
| 91 |
+
Figure 2: NBIAS: A Natural Language Processing Framework for Bias Identification.
|
| 92 |
+
|
| 93 |
+
Data Gathering. Our study adopts a methodological data collection approach, incorporating diverse sources from various domains. To analyze biases in medical narratives and healthcare, we include data from two important clinical text databases: MIMIC-III [41] and MACCROBAT [37]. The MIMIC-III dataset is a publicly available database with de-identified health data from over 40,000 ICU patients. It offers rich clinical narratives, including nursing notes, radiology reports, and discharge summaries, enabling a deep understanding of biases in healthcare communication. The textual data were primarily obtained from the NOTEEVENTS table.
|
| 94 |
+
|
| 95 |
+
The MACCROBAT dataset provides valuable pediatric critical care data, including admission notes and medical summaries. It contains 200 original documents along with corresponding annotated versions centered around clinical case reports.
|
| 96 |
+
|
| 97 |
+
To detect bias in news articles and social media streams, we use the BABE (Bias Annotations By Experts) dataset [10]. This dataset includes 3700 articles and tweets, offering a comprehensive perspective on linguistic bias in media and public opinion. It features marked statements, enabling recognition of bias at both granular (word-level) and broader (sentence-level) scopes, covering diverse topics.
|
| 98 |
+
|
| 99 |
+
To examine biases in employment practices, we incorporate the Job Hiring/Recruitment dataset [42], comprising 20,000 job posts with titles, descriptions, and associated tags from various businesses. Each advertisement includes job details and manually assigned tags by recruiters, suggesting jobs to potential candidates with analogous skills.
|
| 100 |
+
|
| 101 |
+
Data Consolidation. After gathering and pre-processing data from various sources, all datasets are harmonized into a single consolidated dataframe. This dataframe includes the following columns:
|
| 102 |
+
|
| 103 |
+
- Dataset: Specifies the source dataset, such as MIMIC-III, MACCRO-BAT, Job Hiring, or BABE
|
| 104 |
+
- Text: Contains the actual textual data extracted from the respective datasets, including clinical notes, case reports, job descriptions, or annotated statements.
|
| 105 |
+
- Biased Words: Includes the words or phrases identified as biased in the text, crucial for granular bias detection.
|
| 106 |
+
- Aspect of Bias: Denotes the specific type or aspect of bias present in the text, categorized by gender, racial, or age biases, to understand the nature of the biases detected.
|
| 107 |
+
- Label: Indicates whether the text is biased or non-biased, serving as the target variable for the token classifier and for evaluation purposes.
|
| 108 |
+
|
| 109 |
+
```txt
|
| 110 |
+
A sample record in JSON format is shown below:
|
| 111 |
+
{ "Record": { "Dataset": "MIMIC-III", "Text": "Clinical notes of patient XYZ indicate a history of superficial hypertension due to overly emotional personality.", "BiasedWords": "superficial, overly emotional personality", "AspectOfBias": "age", "Label": "biased" }
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
In the consolidated dataframe, each row represents a unique sample from the original dataset, supplying information for bias detection and assessment. Further pre-processing is conducted to prepare the data for subsequent layers of the NBIAS framework, particularly the NLP model performing token classification.
|
| 115 |
+
|
| 116 |
+
Data Pre-processing. The pre-processing of textual data involves a series of sequential operations to refine and structure the data for machine learning algorithms. This includes tokenization, which involves breaking raw text into meaningful tokens (words or subwords) for semantic understanding and subsequent NLP tasks; text cleaning, which involves removing punctuation, numbers, special characters, and converting text to lowercase to ensure uniformity and clarity; and handling missing values, which involves identifying and appropriately managing missing data to avoid bias and improve model performance. These pre-processing steps convert raw text into a clean, structured format, enabling the NLP token classification model in the subsequent layer.
|
| 117 |
+
|
| 118 |
+
# 3.2. Corpus Construction
|
| 119 |
+
|
| 120 |
+
Our group, consisting of three seasoned professionals from the disciplines of healthcare, computer science, and journalism, was joined by two diligent students to perform the task of detecting and labeling bias in our dataset. Their collective role centered around the critical task of carefully annotating bias within our dataset. This endeavor is important to ensure the integrity and fairness of any subsequent analysis or research. The foundation for this task was based on comprehensive guidelines that clearly delineated the concept of bias in this context.
|
| 121 |
+
|
| 122 |
+
Bias, as per the instructions, was defined as any terminology or phraseology that could potentially provoke prejudiced comprehension or induce stereotyping, as mentioned in most of the literature [11, 7, 10] also. The factors from which biases could stem were identified as gender, race, socioeconomic status, age, or disability for this NLP work. Such biases could inadvertently skew the dataset and, consequently, the results derived from it. Thus, the identification and annotation of such biases are of high importance to uphold the accuracy and reliability of our dataset. Highlighting both explicit and implicit biases was emphasized as a critical part of our work.
|
| 123 |
+
|
| 124 |
+
Annotation Scheme. Under the light of these provided guidelines, our team proceeded by using a carefully compiled list of terms and phrases, known as "bias-indicative" lexicons. These lexicons provided a comprehensive guide to potential areas where bias could lurk within our dataset. A portion of this list is exhibited in Table 1 for reference. This bias-indicative lexicon served as a navigational tool for our team to identify and mark "BIAS" entities scattered within our textual data. These entities can be individual words or phrases that express or imply bias. This systematic approach ensured that we could account for most biases that exist in the data.
|
| 125 |
+
|
| 126 |
+
Table 1: Bias Dimensions and Fewer Sample Biased Words/Phrases Shown Due to Brevity
|
| 127 |
+
|
| 128 |
+
<table><tr><td>Bias Dimension</td><td>Biased Words/Phrases</td></tr><tr><td>Gender</td><td>‘hysterical’, ‘emotional’, ‘weak’, ‘bossy’, ‘fragile’, ‘nagging’, ‘man up’, ‘tomboy’</td></tr><tr><td>Race</td><td>‘inner city’, ‘illegal alien’, ‘thug’, ‘exotic’, ‘uncivilized’, ‘model minority’, ‘white trash’</td></tr><tr><td>Social Status</td><td>‘trailer park’, ‘lazy’, ‘freeloader’, ‘welfare queen’, ‘ghetto’, ‘lazy bum’, ‘filthy rich’</td></tr><tr><td>Age</td><td>‘senile’, ‘slow’, ‘old-fashioned’, ‘whippersnapper’, ‘elderly’, ‘young and naive’, ‘generation gap’</td></tr><tr><td>Disability</td><td>‘handicapped’, ‘crippled’, ‘invalid’, ‘sufferer’, ‘differently-abled’, ‘victim’</td></tr><tr><td>Religion</td><td>‘radical’, ‘terrorist’, ‘infidel’, ‘heathen’, ‘fanatic’, ‘holy roller’</td></tr><tr><td>Profession</td><td>‘greedy’, ‘dishonest’, ‘corrupt politician’, ‘crooked lawyer’, ‘greedy CEO’, ‘lazy government worker’</td></tr><tr><td>National</td><td>‘unpatriotic’, ‘alien’, ‘foreigner’, ‘outsider’, ‘immigrant’, ‘nationalist’</td></tr><tr><td>Education</td><td>‘uneducated’, ‘illiterate’, ‘dropout’, ‘underachiever’, ‘over-achiever’, ‘smarty-pants’</td></tr><tr><td>Body Size</td><td>‘fat’, ‘slob’, ‘skinny’, ‘lardass’, ‘beanpole’, ‘plus-sized’</td></tr></table>
|
| 129 |
+
|
| 130 |
+
We adopted the Inside-Outside-Beginning (IOB) annotation scheme [43] to classify and annotate 'BIAS' entities. This technique categorizes tokens in the text as the beginning (B), inside (I), or outside (O) of a bias entity. 'B' represents the first token of a bias entity, 'I' for tokens inside the entity, and 'O' for tokens not part of any bias entity. This approach ensured consistent and precise annotations, enhancing the reliability and accuracy of our study.
|
| 131 |
+
|
| 132 |
+
Annotation Approach. We leveraged semi-supervised learning methodologies [35, 13, 33] to enhance both efficiency and precision of the annotation process. The integration of BERT (Bidirectional Encoder Representations from Transformers), known for its superior text comprehension abilities, substantially improved our approach.
|
| 133 |
+
|
| 134 |
+
Our annotation process initiated with the manual tagging of $20\%$ of the complete dataset. This critical yet time-consuming task was strategically limited to a subset of data, ensuring a balance between accuracy and efficiency. The "BIAS" entities were carefully annotated in compliance with our predefined guidelines. This annotated subset then fed into our BERT model, serving as training data for the token-classification task. Once sufficiently trained, the model was assigned the task of predicting "BIAS" entities within the remaining $80\%$ of the data. The extensive dataset was effectively managed by breaking it down into $20\%$ increments, a process we refer to as "semi-autonomous labelling". Expert reviews cross-verified the "BIAS" entities labelled by the model. This combination of semi-supervised learning with expert validation enabled us to create an annotation process that is both optimized and trustworthy.
|
| 135 |
+
|
| 136 |
+
Working Instance. To demonstrate our annotation scheme, we consider the example sentence: "The overpriced product from the highly successful company was surprisingly popular". Table 2 presents the corresponding BIO format annotations for this sentence. Assuming the term "overpriced" holds potential bias, it would be tagged as "B" in the BIO scheme, indicating the start of a bias entity. All other tokens not part of this bias entity would be labeled "O". This example shows our extensive annotation process across our dataset. This approach allows us to quantify and comprehend biases in a consistent manner.
|
| 137 |
+
|
| 138 |
+
Resolving Discrepancies. An integral part of our process was addressing discrepancies between annotators, a common criteria in multi-person annotation tasks. We implemented a consensus-driven approach to uphold consistency and reliability in our annotations. Any disagreement was discussed collectively, considering each annotator's viewpoint and reaching a unified decision based on predefined annotation guidelines. This process ensured collective agreement on all annotations, minimizing potential bias or error and boosting reliability. This consensus strategy was uniformly applied across all data sources including the BABE, MIMIC, MACCROBAT, or Job Hiring datasets.
|
| 139 |
+
|
| 140 |
+
Table 2: Bias Annotation using BIO scheme
|
| 141 |
+
|
| 142 |
+
<table><tr><td>Word</td><td>Bias Annotation</td></tr><tr><td>The</td><td>O</td></tr><tr><td>overpriced</td><td>B-BIAS</td></tr><tr><td>product</td><td>O</td></tr><tr><td>from</td><td>O</td></tr><tr><td>the</td><td>O</td></tr><tr><td>highly</td><td>B-BIAS</td></tr><tr><td>successful</td><td>I-BIAS</td></tr><tr><td>company</td><td>O</td></tr><tr><td>was</td><td>O</td></tr><tr><td>surprisingly</td><td>B-BIAS</td></tr><tr><td>popular</td><td>I-BIAS</td></tr><tr><td>.</td><td>O</td></tr></table>
|
| 143 |
+
|
| 144 |
+
FAIR Principles. After reaching consensus on all annotations, we saved the final annotated data in the widely-accepted CoNLL-2003 format [44]. This format represents data in a tab-separated manner, associating each word with its part of speech tag, chunk tag, and named entity tag. Sentences are separated by an empty line, and each row corresponds to a token with its annotation.
|
| 145 |
+
|
| 146 |
+
The CoNLL-2003 format offers multiple benefits. It ensures compatibility with existing NLP tools and models, facilitating future analysis and model training. Additionally, it promotes collaboration and peer review by allowing easy data sharing and comprehension among researchers. Lastly, it enhances the reproducibility of our study, enabling others to use our data for model validation and findings replication. By adhering to the FAIR principles, our dataset is made Findable, Accountable, Interoperable, and Reusable, enhancing the transparency, accessibility, and reliability of our research.
|
| 147 |
+
|
| 148 |
+
Inter-Annotator Agreement. In our research, we placed considerable emphasis on establishing rigorous protocols to guarantee the reliability and consistency of the data annotations. Two independent reviewers were assigned to carefully assess the annotated data, promoting objective evaluation devoid of influence from initial annotators. Rather than relying on subjective judgment, we quantified their agreement through Cohen's Kappa coefficient—a statistical measure common in categorical data studies, accounting for potential chance agreement. Scores over 0.6 denote "substantial" agreement
|
| 149 |
+
|
| 150 |
+
and above 0.8 represent "almost perfect" agreement. Our reviewers attained a Cohen's Kappa score of $78\%$ , demonstrating high concordance on the annotations. This high score substantiates the uniformity, consistency, and quality of our annotations. Moreover, it demonstrates the objectivity of the assessment process, highlighting the well-built nature of our annotated data. This, in turn, enhances the trustworthiness of prospective findings drawn from this dataset.
|
| 151 |
+
|
| 152 |
+
# 3.3. Model Development Layer
|
| 153 |
+
|
| 154 |
+
In this layer, we leverage the BERT language model for token-classification and adapt it for the task of NER. The choice of BERT is motivated by its powerful capability of understanding both left and right context of a word, and its effectiveness in recognizing and classifying multi-word phrases. These features make it particularly well-suited for the complex task of bias detection and annotation in our text data.
|
| 155 |
+
|
| 156 |
+
The advantage of using BERT in NBIAS model development lies in its more effective token-level bias identification. NBIAS incorporates enhancements to the standard BERT architecture, such as modifications in the attention mechanism, loss function, and fine-tuning approaches, specifically tailored for better capturing biases in complex text data. The subsequent section provides a detailed explanation of the model development.
|
| 157 |
+
|
| 158 |
+
The token classifier architecture (shown in as the middle component in Figure 2) consists of a multi-layer bidirectional transformer encoder that captures contextual information from both directions. Given an input sequence $X = \{x_{1}, x_{2}, \ldots, x_{n}\}$ , the words are tokenized and embedded as shown in Equation (1):
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
E (X) = \left\{e \left(x _ {1}\right), e \left(x _ {2}\right), \dots , e \left(x _ {n}\right) \right\} \tag {1}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
where $E(X)$ represents the set of embedded representations for an input sequence $X$ , $X$ consists of $n$ words $\{x_{1}, x_{2}, \ldots, x_{n}\}$ , $e(x_{i})$ is the embedding function that maps each word $x_{i}$ from the input sequence to a continuous vector representation. The embedded input sequence is then passed through the transformer layers.
|
| 165 |
+
|
| 166 |
+
BERT employs self-attention mechanisms to weigh the importance of different words in the input sequence, enabling it to better identify and understand complex relationships between words. The self-attention Att score
|
| 167 |
+
|
| 168 |
+
between word $i$ and word $j$ is computed as shown in Equation (2):
|
| 169 |
+
|
| 170 |
+
$$
|
| 171 |
+
A t t (i, j) = \operatorname {S o f t m a x} \left(\frac {Q \left(e \left(x _ {i}\right)\right) \cdot K \left(e \left(x _ {j}\right)\right) ^ {T}}{\sqrt {d _ {k}}}\right) \tag {2}
|
| 172 |
+
$$
|
| 173 |
+
|
| 174 |
+
where $Q$ , $K$ are the query and key matrices, and $d_k$ is the key dimension.
|
| 175 |
+
|
| 176 |
+
Following the transformer encoder, the output after applying self-attention and passing through the bidirectional transformer encoder is represented, as shown in Equation (3):
|
| 177 |
+
|
| 178 |
+
$$
|
| 179 |
+
R (X) = \{r \left(x _ {1}\right), r \left(x _ {2}\right), \dots , r \left(x _ {n}\right) \} \tag {3}
|
| 180 |
+
$$
|
| 181 |
+
|
| 182 |
+
where $R(X)$ represents the set of contextualized representations for an input sequence $X$ . The function $r(x_{i})$ is the representation function that maps each word $x_{i}$ from the input sequence to a continuous vector representation after passing through the transformer encoder.
|
| 183 |
+
|
| 184 |
+
A linear layer with a softmax activation function is added for entity classification. This layer transforms the representations generated by the transformer encoder into a probability distribution over the possible output classes. To simplify our annotation and prediction task, we have merged the 'B' (Beginning) and 'I' (Inside) tags from the standard BIO tagging scheme into a single 'BIAS' tag. The 'BIAS' tag represents any part of a bias entity, while 'O' represents non-entity. The probability distribution is calculated as shown in Equation (4):
|
| 185 |
+
|
| 186 |
+
$$
|
| 187 |
+
P (y | x) = \operatorname {S o f t m a x} (W \cdot c (x) + b) \tag {4}
|
| 188 |
+
$$
|
| 189 |
+
|
| 190 |
+
where $W$ is the weight matrix, $b$ is the bias vector in the softmax function, and $P(y|x)$ is the probability distribution over the output classes 'BIAS' and 'O'. The final output of the model indicates the presence of biased words or phrases within the input sequence by labeling them as 'BIAS'. This simplification enables our model to recognize biased phrases more effectively, without differentiating between their start or continuation.
|
| 191 |
+
|
| 192 |
+
We show an example of the model output on a sample from the test set in Figure 3.
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
Figure 3:BIAS Annotation on a Piece of Text
|
| 196 |
+
|
| 197 |
+
The pseudocode algorithm steps for the NBIAS model development are given in Algorithm 1. As seen in Algorithm 1, the NBIAS model, built on BERT, tokenizes and contextualizes input text using transformer encoders. Through self-attention mechanisms, it weighs relationships between words and classifies each token as biased or unbiased using a softmax-activated linear layer.
|
| 198 |
+
|
| 199 |
+
Algorithm 1 NBIAS Model Development
|
| 200 |
+
Require: Text sequence $X = \{x_{1},x_{2},\ldots ,x_{n}\}$
|
| 201 |
+
1: Initialize BERT with token-classification architecture
|
| 202 |
+
2: Tokenize input sequence $X$
|
| 203 |
+
3: Embed input sequence: $E(X) = \{e(x_1),e(x_2),\dots ,e(x_n)\}$
|
| 204 |
+
4: for each token in $E(X)$ do
|
| 205 |
+
5: Compute self-attention:
|
| 206 |
+
6: Att(i,j) = Softmax $\left(\frac{Q(e(x_i))\times K(e(x_j))^T}{\sqrt{dk}}\right)$
|
| 207 |
+
7: end for
|
| 208 |
+
8: Pass $E(X)$ through bidirectional transformer encoder: $R(X) =$ $\{r(x_1),r(x_2),\ldots ,r(x_n)\}$
|
| 209 |
+
9: for each token representation in $R(X)$ do
|
| 210 |
+
10: Compute probability distribution: $P(y|x_{i}) = \mathrm{Softmax}(W\times c(x_{i}) + b)$
|
| 211 |
+
11: end for
|
| 212 |
+
12: for each token in $X$ do
|
| 213 |
+
13: if probability corresponds to BIAS then
|
| 214 |
+
14: label as 'BIAS'
|
| 215 |
+
15: else
|
| 216 |
+
16: label as 'O'
|
| 217 |
+
17: end if
|
| 218 |
+
18: end for
|
| 219 |
+
19: return the labeled sequence
|
| 220 |
+
|
| 221 |
+
# 3.4. Evaluation Layer
|
| 222 |
+
|
| 223 |
+
The evaluation layer plays a critical role in assessing the performance of our model. This layer encompasses both quantitative and qualitative evaluation methods, providing a comprehensive perspective on the model's performance.
|
| 224 |
+
|
| 225 |
+
Quantitative Evaluation. The quantitative evaluation is typically statistical in nature and involves the use of various metrics to numerically measure the model's performance. Metrics such as F1-score, AUC-ROC and accuracy are commonly used in this context. F1 score balances precision (the ability of the model to correctly identify positive instances) and recall (the ability of the model to identify all relevant instances), providing a single measure of the model's overall performance.
|
| 226 |
+
|
| 227 |
+
Qualitative Evaluations. In addition to these numerical measures, we also conduct a qualitative evaluation. This type of evaluation is more about the quality, relevance, and usefulness of the model's output. It involves an expert review of a subset of the model's predictions to measure how well the model is performing in practical terms. Factors such as the model's ability to correctly identify complex or subtle bias entities, and the interpretability of its output, are examined in the qualitative evaluation.
|
| 228 |
+
|
| 229 |
+
In our study, we focus on qualitative evaluations, specifically assessing model robustness and conducting perpetuation tests. Our robustness analysis [45] explores the model's stability under various conditions including adversarial inputs and data variations. Perpetuation tests [46] help us understand if the model inadvertently reinforces or introduces societal biases. We also consider a human evaluation, to assess the model's performance in real-world conditions.
|
| 230 |
+
|
| 231 |
+
# 4. Experimental Setup
|
| 232 |
+
|
| 233 |
+
In this section, we detail the settings, evaluation metrics, baselines and hyperparameters of our experimental design for replication and validation.
|
| 234 |
+
|
| 235 |
+
# 4.1. Dataset
|
| 236 |
+
|
| 237 |
+
Our study uses diverse datasets: MIMIC-III [41], MACCROBAT [37], BABE [10], and Job Hiring [42]. After annotation (detailed in Section 3.1 and 3.2), each dataset is split into training, validation, and test sets using an 80-10-10 ratio. The division allows for efficient model training, validation, and testing. Modifications are made for the MACCROBAT dataset to maintain balance despite its limited entries. Table 3 presents the detailed dataset information.
|
| 238 |
+
|
| 239 |
+
Table 3: Dataset Details with Training (train), Development (dev), Test (test) sets and Total Samples
|
| 240 |
+
|
| 241 |
+
<table><tr><td>Data Source</td><td>Domain</td><td>train</td><td>dev</td><td>test</td><td>Total</td></tr><tr><td>BABE</td><td>News/Social Media</td><td>15,300</td><td>1,700</td><td>1,700</td><td>18,700</td></tr><tr><td>MIMIC (Clinical)</td><td>Healthcare</td><td>1,800</td><td>200</td><td>200</td><td>2,200</td></tr><tr><td>MACCROBAT</td><td>Healthcare</td><td>160</td><td>-</td><td>40</td><td>200</td></tr><tr><td>Job Hiring</td><td>Occupational</td><td>16,000</td><td>2,000</td><td>2,000</td><td>20,000</td></tr><tr><td>Total</td><td></td><td>33,260</td><td>3,900</td><td>3,940</td><td>41,100</td></tr></table>
|
| 242 |
+
|
| 243 |
+
# 4.2. Hardware Settings
|
| 244 |
+
|
| 245 |
+
The experiments conducted in this study were performed on a dedicated research server with specific hardware configurations. The server was equipped with an Intel Xeon CPU E5-2690 v4 running at 2.60GHz, 128GB of RAM, and a powerful NVIDIA GeForce RTX 3090 GPU. The operating system installed on the server was Ubuntu 18.04 LTS. These hardware settings provided substantial computational power, enabling us to efficiently execute resource-intensive tasks, such as training complex machine learning algorithms and deep learning models.
|
| 246 |
+
|
| 247 |
+
# 4.3. Time measurements
|
| 248 |
+
|
| 249 |
+
Time measurements during the training, validation, and testing phases were recorded for our models across the diverse datasets. Utilizing our hardware setup, we ensured peak performance with minimized hardware-induced delays. Specifically, the BABE dataset took 4.5 hours for training with 30 minutes each for validation and testing. The MIMIC dataset required 2 hours of training, and 10 minutes for both validation and testing. For the smaller MACCROBAT dataset, training was completed in 0.5 hours, with validation and testing taking 5 minutes each. Lastly, the Job Hiring dataset took the longest at 5 hours for training and 40 minutes each for validation and testing.
|
| 250 |
+
|
| 251 |
+
# 4.4. Baselines
|
| 252 |
+
|
| 253 |
+
For the comparison of models for token classification model performance, we consider a range of diverse baseline approaches. These include BiLSTM-CRF, which combines BiLSTM and CRF [29]; BERT-CRF, a blend of BERT and CRF [47]; RoBERTa, an optimized variant of BERT [48]; BART-NER, an application of the BART model for NER [49]; CNN-NER, a CNN-based method
|
| 254 |
+
|
| 255 |
+
for capturing named entities [50]; and TENER, an NER model that utilizes an adapted Transformer Encoder for character and word-level features [51]. We also consider the few-shot NER models like [52] and model-agnostic meta-learning (MAML) [53] and zero-shot named entity typing (NET) [54] model.
|
| 256 |
+
|
| 257 |
+
The selected baselines represent a collection of different architectures such as BiLSTM, BERT, RoBERTa, BART, CNN, and Transformer Encoder, each combined with either the CRF or NER task. These models were chosen because they represent the state-of-the-art and constitute a robust set of baselines for comparing token classification model performance.
|
| 258 |
+
|
| 259 |
+
# 4.5. Hyperparameter Settings
|
| 260 |
+
|
| 261 |
+
The chosen hyperparameters for our token classifier are provided in Table 4.
|
| 262 |
+
|
| 263 |
+
Table 4: Hyperparameter Settings and Training Details
|
| 264 |
+
|
| 265 |
+
<table><tr><td>Parameter/Method</td><td>Details/Value</td></tr><tr><td>Model</td><td>bert-base-uncased</td></tr><tr><td>Optimizer</td><td>Adam</td></tr><tr><td>Learning Rate</td><td>1 × 10-2</td></tr><tr><td>Momentum</td><td>0.5</td></tr><tr><td>Weight Decay</td><td>0.01</td></tr><tr><td>Epochs</td><td>5</td></tr><tr><td>Batch Sizes</td><td>4, 8, 16, 32, 64</td></tr><tr><td>Batch Size (training)</td><td>16</td></tr><tr><td>Input Sequence Length</td><td>128 subword tokens</td></tr><tr><td>Dropout</td><td>Applied on input and hidden layers</td></tr><tr><td>Convergence Criteria</td><td>Negligible decrease in validation loss</td></tr><tr><td>Validation Strategy</td><td>Hold-out</td></tr><tr><td>Early Stopping</td><td>Implemented</td></tr><tr><td>Training Environment</td><td>Google Colab Pro</td></tr><tr><td>Hardware</td><td>NVIDIA Tesla T4 GPU</td></tr><tr><td>β1</td><td>0.9</td></tr><tr><td>β2</td><td>0.999</td></tr><tr><td>Epsilon</td><td>1 × 10-8</td></tr><tr><td>Hidden Units</td><td>(Leaky) Rectified Linear Units (ReLUUs)</td></tr></table>
|
| 266 |
+
|
| 267 |
+
In the comparative experiments with the baselines, the models were optimized using a learning rate between 1e-5 and 5e-5 over several training
|
| 268 |
+
|
| 269 |
+
epochs, typically 3 to 10. The batch size varied between 16 and 64, based on memory constraints, and the input sequence length was limited to 512 tokens. To prevent overfitting, we used regularization techniques such as dropout and weight decay. We generally employed the Adam or AdamW optimizer. All hyperparameters were fine-tuned according to the specific task requirements and dataset characteristics.
|
| 270 |
+
|
| 271 |
+
# 5. Results
|
| 272 |
+
|
| 273 |
+
# 5.1. Overall Performance
|
| 274 |
+
|
| 275 |
+
Table 5 presents a comprehensive comparison of our proposed method, NBIAS, with various baseline models in the token-classification task across three distinct categories: Social Media Bias, Health-related, and Job Hiring. Due to space constraints, we are only reporting the F1-scores in this overall comparison, which are the harmonic mean of precision and recall, as it is a commonly used single metric that combines both precision and recall. The F1-scores are expressed as percentages, accompanied by the standard deviation $(\pm)$ to indicate the variability in scores across five separate runs. The highest F1-score in each category is highlighted in bold to easily identify the best performing model.
|
| 276 |
+
|
| 277 |
+
Table 5: Comparison of Token Classification Models on Three Different Categories: Social Media Bias, Health-related, and Occupational. The performance metric used is F1-score (harmonic mean of precision and recall), expressed as a percentage, accompanied by the standard deviation $(\pm)$ indicating the variability in scores across 5 runs. The best score is highlighted in bold.
|
| 278 |
+
|
| 279 |
+
<table><tr><td>Model</td><td>Social Media</td><td>Health-related</td><td>Job Hiring</td></tr><tr><td>Rule-based [55]</td><td>65.4 ± 1.4</td><td>70.2 ± 0.7</td><td>72.3 ± 0.9</td></tr><tr><td>BiLSTM-CRF [29]</td><td>72.6 ± 1.0</td><td>75.8 ± 0.9</td><td>78.1 ± 0.8</td></tr><tr><td>BERT-CRF [47]</td><td>80.7 ± 1.3</td><td>82.3 ± 0.7</td><td>83.5 ± 0.6</td></tr><tr><td>RoBERTa [48]</td><td>82.8 ± 0.7</td><td>83.6 ± 0.9</td><td>80.5 ± 0.5</td></tr><tr><td>CNN-NER [50]</td><td>76.2 ± 1.1</td><td>78.1 ± 0.0</td><td>73.4 ± 0.9</td></tr><tr><td>BART-NER [49]</td><td>84.7 ± 0.9</td><td>84.2 ± 0.7</td><td>82.0 ± 0.8</td></tr><tr><td>TENER [51]</td><td>85.7 ± 0.5</td><td>86.4 ± 0.6</td><td>85.1 ± 0.5</td></tr><tr><td>Few-shot NER [52]</td><td>70.2 ± 3.4</td><td>73.1 ± 2.9</td><td>69.2 ± 1.7</td></tr><tr><td>NET [54]</td><td>70.1 ± 1.4</td><td>72.2 ± 1.2</td><td>67.1 ± 1.2</td></tr><tr><td>MAML [53]</td><td>62.1 ± 1.8</td><td>65.3 ± 1.2</td><td>60.5 ± 2.5</td></tr><tr><td>NBIAS</td><td>86.9 ± 0.2</td><td>89.1 ± 0.8</td><td>90.3 ± 0.4</td></tr></table>
|
| 280 |
+
|
| 281 |
+
The results presented in Table 5 conclusively demonstrate the performance of the NBIAS model in all tested scenarios. In the Social Media area, the NBIAS model got an F1-score of $86.9\%$ with a small deviation of $\pm 0.2$ . In the Health area, it performs even better with an F1-score of $89.1\%$ and a deviation of $\pm 0.8$ , which means the scores ranged between $88.3\%$ and $89.9\%$ . In the Job Hiring area, the model got an F1-score of $90.3\%$ , with scores ranging between $89.9\%$ and $90.7\%$ . These small deviations show that the model's performance is consistent in different tests.
|
| 282 |
+
|
| 283 |
+
Amongst the baselines models, the TENER model performs better. The BERT-CRF and RoBERTa models, on the other hand, exhibit good performances. Both the CNN-NER and BART-NER models also display satisfactory performance, although they comes behind the NBIAS and TENER models. Contrastingly, the Rule-based model underperforms compared to transformer and BiLSTM based baselines. The Few-shot NER, NET, and MAML models also showed average performance. Even though few-shot models can work well with just a few examples, the results show there is room for improvement. This could be achieved by creating custom training methods or tasks that are specific to a certain area.
|
| 284 |
+
|
| 285 |
+
Overall, the NBIAS model emerges as the most effective across all categories. While other BERT-based baselines may also attempt bias identification, NBIAS outperforms them due to its custom-designed model features optimized for this specific purpose. The performance gain could be in terms of better debiasing results, increased fairness in predictions, or improved overall model accuracy in scenarios where bias reduction is critical. These findings provide valuable insights for the future development and selection of token classification models across different domains.
|
| 286 |
+
|
| 287 |
+
Accuracy Analysis of Token Classification Models. Figure 4 shows how different models perform in classifying tokens over different test sets.
|
| 288 |
+
|
| 289 |
+
As depicted in Figure 4, the NBIAS model exhibits superior performance, achieving accuracy scores of $88.4\%$ in Social Media Bias, $90.6\%$ in Health-related texts, and $91.8\%$ in Job Hiring texts. Following closely are the TENER and BART-NER models in terms of accuracy. While other models such as RoBERTa, BERT-CRF, BiLSTM-CRF, and CNN-NER also demonstrate commendable performance, they fall short of the scores attained by NBIAS, TENER, and BART-NER in this experiment. Models like Few-shot NER, NET, and MAML, although not scoring the best, exhibit promising potential. Lastly, the Rule-based model, which relies on predefined rules
|
| 290 |
+
|
| 291 |
+

|
| 292 |
+
Figure 4: Comparative Accuracy Scores of Token Classification Models across Three Different Categories: Social Media, Health-related, and Job Hiring for Bias Text Identification
|
| 293 |
+
|
| 294 |
+
rather than learning from the data, still manages to perform above $60\%$ .
|
| 295 |
+
|
| 296 |
+
Overall, these results underscore the enhanced capability of the latest transformer-based models like BART and TENER to extract contextual information from text data, as evidenced by this experiment. Moreover, it affirms that a model carefully designed for bias detection, such as ours, can indeed yield highly effective results.
|
| 297 |
+
|
| 298 |
+
# 5.2. Performance Analysis using ROC Curves and AUC Scores
|
| 299 |
+
|
| 300 |
+
In this study, we compare the performance of different models in token classification tasks using Receiver Operating Characteristic (ROC) curves and corresponding Area Under the Curve (AUC) scores on Social media, Health-related, and Job Hiring data. Figures 5a, 5b, and 5c displays the AUC-ROC curves for all the baseline models and our NBIAS token classification model.
|
| 301 |
+
|
| 302 |
+
The results presented in Figure 5 show the superior capability of the NBIAS model, as evidenced by their better True Positive Rates at minimal False Positive Rates. While models like Rule-based, RoBERTa, Zero-shot and few-shot NER models demonstrated low-to-moderate performance, others such as TENER, BiLSTM-CRF, CNN-NER, BART-NER yielded commendable results, particularly in the early segments of their respective curves. All these models also exhibited better performance specifically in the health
|
| 303 |
+
|
| 304 |
+

|
| 305 |
+
(a) Models applied to Social Media Data.
|
| 306 |
+
|
| 307 |
+

|
| 308 |
+
(b) Models applied to Health-related Data.
|
| 309 |
+
Figure 5: ROC curves and AUC Scores for Various datasets (continued on next page)
|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
(c) Models applied to Job Hiring Data.
|
| 313 |
+
Figure 5: ROC Curves and AUC Scores for Various Datasets.
|
| 314 |
+
|
| 315 |
+
and job hiring datasets. Overall, these findings suggest that some models excel in specific domains. This could be attributed to several factors, including but not limited to:
|
| 316 |
+
|
| 317 |
+
1. Training on analogous data points that make the model more aware of the specific features of a domain.
|
| 318 |
+
2. The architecture of the model being inherently better suited for certain types of data.
|
| 319 |
+
3. Hyperparameter choices that resonate better with specific data characteristics.
|
| 320 |
+
4. Preprocessing and feature engineering steps that align closely with the requirements of a domain.
|
| 321 |
+
|
| 322 |
+
Thus, choosing the optimal model for a specific domain is important for achieving the best performance.
|
| 323 |
+
|
| 324 |
+
# 5.3. Confusion Matrix and Error Analysis
|
| 325 |
+
|
| 326 |
+
We present the results of the BIAS entity identification task for "Health-related Bias", "Political Bias", and "Occupational Bias" using NBIAS. The
|
| 327 |
+
|
| 328 |
+
model's performance is evaluated based on confusion matrices and error analysis (Table 6), providing insights into its strengths and limitations of the model.
|
| 329 |
+
|
| 330 |
+
Table 6: Confusion Matrix and Error Analysis for BIAS Entity Identification using NBIAS: The table presents the True Positives (TP), False Positives (FP), True Negatives (TN), and False Negatives (FN) for various bias types identified in the dataset, along with the Precision in percentage. The categorization of biases is based on a predefined analysis of the content and context in which they appear.
|
| 331 |
+
|
| 332 |
+
<table><tr><td>Dataset</td><td>Bias types</td><td>TP</td><td>FP</td><td>TN</td><td>FN</td><td>Precision</td></tr><tr><td rowspan="3">Health</td><td>healthy lifestyle</td><td>98</td><td>12</td><td>145</td><td>5</td><td>89.1%</td></tr><tr><td>medical advancements</td><td>85</td><td>56</td><td>142</td><td>15</td><td>60.2%</td></tr><tr><td>research findings</td><td>98</td><td>19</td><td>138</td><td>2</td><td>83.7%</td></tr><tr><td rowspan="3">Social Media</td><td>biased news source</td><td>112</td><td>10</td><td>157</td><td>8</td><td>91.8%</td></tr><tr><td>political affiliation</td><td>95</td><td>7</td><td>162</td><td>16</td><td>93.1%</td></tr><tr><td>political agenda</td><td>86</td><td>14</td><td>154</td><td>16</td><td>86.0%</td></tr><tr><td rowspan="3">Occupational</td><td>gender bias in hiring</td><td>63</td><td>5</td><td>172</td><td>8</td><td>92.7%</td></tr><tr><td>ethnicity bias in hiring</td><td>49</td><td>4</td><td>173</td><td>11</td><td>92.5%</td></tr><tr><td>age bias in hiring</td><td>45</td><td>8</td><td>170</td><td>13</td><td>84.2%</td></tr></table>
|
| 333 |
+
|
| 334 |
+
Health-related Bias: The NBIAS exhibits strong performance in identifying "healthy lifestyle" entities, achieving a precision of $89.1\%$ . However, it missed 5 instances of this entity, leading to false negatives. For "medical advancements", the precision is lower at $60.2\%$ , and the model identified 56 false positives, misclassifying non-biased terms as biased. On the other hand, the model achieved a relatively high precision of $83.7\%$ for "research findings" yet it missed 2 instances, resulting in false negatives. These findings suggest that the model performs well for more explicit health-related biases, but subtle biases and rare terms might pose challenges.
|
| 335 |
+
|
| 336 |
+
Social Media: Our NBIAS demonstrates high precision in identifying "biased news source" entities (91.8%), correctly capturing biased sources. However, it produced a few false positives, misclassifying some non-biased sources as biased. For "political affiliation" entities, the precision is 93.1%, indicating a reliable performance. However, some false positives occurred, classifying neutral statements as biased based on political association. For "political agenda" entities, the model achieved a precision of 86.0%, although it misclassified a few non-biased mentions as biased. These results highlight the model's ability to detect explicit political biases but also suggest room for improvement in handling ambiguous language.
|
| 337 |
+
|
| 338 |
+
Occupational Bias: In the "Occupational Bias" category, the NBIAS exhibits strong precision for identifying "gender bias in hiring" entities (92.7%), effectively capturing biased terms. However, it produced a few false positives, misclassifying neutral statements as biased based on gender. For "ethnicity bias in hiring" entities, the precision is 92.5%, indicating accurate identification. Yet, a few false positives occurred, misclassifying non-biased mentions as biased. The model achieved a precision of 84.2% for "age bias in hiring" entities. However, some neutral statements were misclassified as biased, revealing areas for enhancement. These findings suggest that the model can effectively identify biased occupational entities, but improvements are needed to reduce false positives.
|
| 339 |
+
|
| 340 |
+
# Actionable Insights:
|
| 341 |
+
|
| 342 |
+
- The proposed NER model demonstrates robust precision in identifying biased entities for all three categories with clear biases.
|
| 343 |
+
- Addressing false positives can enhance the model's discrimination between biased and non-biased entities. Fine-tuning the model to better understand nuanced language can be beneficial.
|
| 344 |
+
- Augmenting the training data with diverse instances of subtle biased entities can improve recall and help detect rare biased terms.
|
| 345 |
+
- Considering context-aware models, such as transformer-based models, might help tackle challenges arising from sarcasm and subtle biases more effectively.
|
| 346 |
+
|
| 347 |
+
Overall, these results provide valuable insights into the strengths of NBIAS and areas for improvement in identifying biased entities across different categories.
|
| 348 |
+
|
| 349 |
+
# 5.4. Ablation Study on the NBIAS Model
|
| 350 |
+
|
| 351 |
+
To understand the importance of different components in the NBIAS model, we conducted an ablation study on the combined dataframe from all the data sources. We systematically remove or replace elements/ components of the model to observe their influence on bias detection performance. The study assessed the following model variants:
|
| 352 |
+
|
| 353 |
+
- NBIAS Full: Original model with all features intact.
|
| 354 |
+
|
| 355 |
+
- NBIAS-NoAttn: Exclusion of the self-attention mechanism.
|
| 356 |
+
- NBIAS-GloVe: GloVe embeddings replace the BERT defaults.
|
| 357 |
+
- NBIAS -HalfBERT: A version with half the transformer layers.
|
| 358 |
+
- NBIAS-RandInit: Trained without leveraging the pre-trained BERT weights.
|
| 359 |
+
|
| 360 |
+
Table 7 illustrates the outcomes of the ablation study for the F1-score, precision, and recall metrics on the combined dataframe.
|
| 361 |
+
Table 7: Ablation Study Results for NBIAs. Bold means best score
|
| 362 |
+
|
| 363 |
+
<table><tr><td>Model Variant</td><td>Precision (%)</td><td>Recall (%)</td><td>F1-Score (%)</td></tr><tr><td>NBIAS -Full</td><td>94.8</td><td>95.6</td><td>95.2</td></tr><tr><td>NBIAS -NoAttn</td><td>89.5</td><td>91.0</td><td>90.1</td></tr><tr><td>NBIAS -GloVe</td><td>93.0</td><td>92.6</td><td>92.8</td></tr><tr><td>NBIAS -HalfBERT</td><td>93.7</td><td>93.3</td><td>93.5</td></tr><tr><td>NBIAS -RandInit</td><td>87.8</td><td>89.2</td><td>88.4</td></tr></table>
|
| 364 |
+
|
| 365 |
+
The analysis of the ablation study reveals some insightful observations. From Table 7, it is evident that the fully featured NBIAS-Full model outperforms all other variants, with a highest F1-Score of $95.2\%$ , highlighting the combined effect of all its components working together. The significant performance drop observed in the NBIAS-NoAttn model, which does not incorporate the self-attention mechanism. It shows the role that self-attention plays in capturing contextual relationships in the text for effective bias detection.
|
| 366 |
+
|
| 367 |
+
Additionally, the slight performance reduction in the NBIAS-GloVe model, which uses GloVe embeddings instead of the default BERT embeddings, suggests that BERT embeddings are better suited for this specific task, possibly because they are trained on a more diverse and comprehensive corpus. Similarly, the negligible performance variation in the NBIAS-HalfBERT model indicates that the model can achieve almost equivalent performance with half the transformer layers, which may be a crucial consideration in resource-constrained environments. However, it is also worth noting that this minimal reduction might lead to missing out on some complexities in the data that can only be captured with a deeper network. Lastly, the reduced performance of the NBIAS-RandInit model, which does not leverage pre-trained BERT
|
| 368 |
+
|
| 369 |
+
weights, highlights the significant benefits of transfer learning and the importance of initializing the model with pre-trained weights to achieve optimal performance. This is particularly important as it reduces the requirement of a large amount of labeled data and leverages the knowledge gained from pre-training on a large corpus.
|
| 370 |
+
|
| 371 |
+
In conclusion, the NBIAS model, with its full set of features, proves to be the most effective model for bias detection.
|
| 372 |
+
|
| 373 |
+
# 5.5. Robustness Testing
|
| 374 |
+
|
| 375 |
+
Robustness testing is a type of evaluation used to assess the performance and resilience of a system or model against various inputs or scenarios [56]. In the context of our testing, we programmatically measure the robustness of NBIAS using three key factors: case sensitivity, semantics, context and spelling. In Table 8, we showcase the robustness testing on a sample of 5 examples (due to brevity reasons) from the test set.
|
| 376 |
+
|
| 377 |
+
The results of the robustness testing of the model in Table 8 are summarized as:
|
| 378 |
+
|
| 379 |
+
- Spelling: The model partially passed the test. It was able to map "superior" to "superior" but failed with "un like".
|
| 380 |
+
- Semantics: The model successfully mapped “females” to “women” and “exaggerate” to “amplify”, thus passing the test.
|
| 381 |
+
- Case Sensitivity and Semantics: The model was case insensitive and identified synonyms, mapping “ASIANS” to “Asians” and “more prone” to “higher susceptibility”, passing the test.
|
| 382 |
+
- Semantics: The model passed another semantics test by mapping "females" to "women" and "not fit" to "not suitable".
|
| 383 |
+
- Context: The model partially passed the test by mapping "senior" to "older" and "resist" to "resistant", but it failed to capture the change in severity from "severely lack" to "lack".
|
| 384 |
+
|
| 385 |
+
Overall, the model showed strengths in identifying synonyms, being case insensitive, and recognizing some contextual changes, but had limitations in identifying misspellings and capturing changes in severity.
|
| 386 |
+
|
| 387 |
+
Table 8: Robustness Test on NBIAS Predictions: evaluating the model performance in identifying misspellings, synonyms, case sensitivity, semantics, and context. Assessment based on Pass: $\checkmark$ , Partial: $\bullet$ , Fail: $\times$
|
| 388 |
+
|
| 389 |
+
<table><tr><td>Original: Conservative views are superior, unlike liberal ideas.Test Case: Conservative views are superior, un like liberal ideas.Expected: Identifies misspellings.Actual: Maps "superior" to "superior", fails with "un like".Test Type: Spelling.Pass: •</td></tr><tr><td>Original: Women tend to exaggerate pain due to their emotional nature.Test Case: Females tend to amplify pain due to their emotional nature.Expected: Identifies synonyms.Actual: Correctly maps "females" to "women" and "exaggerate" to "amplify".Test Type: Semantics.Pass: ✓</td></tr><tr><td>Original: Asians are more prone to diabetes due to lifestyles.Test Case: ASIANS have a higher susceptibility to diabetes, due to lifestyle Expected: Case insensitive and identifies synonyms.Actual: Maps "ASIANS" to "Asians" and "more prone" to "higher susceptibility".Test Type: Case Sensitivity, Semantics.Pass: ✓</td></tr><tr><td>Original: Women are not fit for leadership roles.Test Case: Females are not suitable for leadership roles.Expected: Identifies synonyms.Actual: Maps "females" to "women" and "not fit" to "not suitable".Test Type: Semantics.Pass: ✓</td></tr><tr><td>Original: Senior employees resist change and severely lack technological skills.Test Case: Older employees are resistant to change and lack technological skills.Expected: Recognizes context and severity change.Actual: Maps "senior" to "older", "resist" to "resistant", fails to capture change in severity.Test Type: Context.Pass: •</td></tr></table>
|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
(a) Case Sensitivity Robustness Test.
|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
(b) Contexts Robustness Test.
|
| 396 |
+
Figure 6: Robustness test (continued on next page)
|
| 397 |
+
|
| 398 |
+
We also conducted the robustness testing on a test case of 300 samples manually prepared by the our team of 5 members to evaluate the robustness of our system and report the results in Figure 6.
|
| 399 |
+
|
| 400 |
+
As observed in Figure 6, the NBIAS model appears to be improving over time in all four robustness test categories: spelling, semantics, case sensitivity, and context. This is evident as the scores increase with each successive run, demonstrating the model's adaptability and improvement in its learning approach.
|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
(c) Semantics Robustness Test.
|
| 404 |
+
|
| 405 |
+

|
| 406 |
+
(d) Spellings Robustness Test.
|
| 407 |
+
Figure 6: Robustness Test: each plot illustrates the performance of the NBIAS model across 5 development runs in robustness tests: spelling, semantics, case sensitivity, and context. The x-axis represents the different test instances used in each run, while the y-axis displays the corresponding scores, referred to as the 'Pass Count' achieved by the model on these tests.
|
| 408 |
+
|
| 409 |
+
In spelling, the model begins with a score of 70 and ends at 90 in the fifth run. A similar upward trend is seen in semantics, starting from a score of 72 and concluding at 93 in the final run. The model also consistently improves in the case sensitivity test, beginning at 80 and finishing at 95. The context scores also progress positively, from initial score of 70 to a final score of 90.
|
| 410 |
+
|
| 411 |
+
The NBIAS model shows the highest performance in case sensitivity, as it reaches a score of 95 in the final run. It also performs well in the semantics category, achieving a score of 93. However, the model's performance in the context and spelling categories is slightly lower. While these are still strong results, there may be room for further optimization in these areas to achieve results comparable to the case sensitivity and semantics tests.
|
| 412 |
+
|
| 413 |
+
# 5.6. Perpetuation Bias Tests for Bias Detection
|
| 414 |
+
|
| 415 |
+
To assess whether our model unintentionally perpetuates biases present in its training data, we conducted perpetuation bias tests. These tests evaluated the performance of our model in identifying and labeling potentially biased words or phrases as $BIAS$ entity.
|
| 416 |
+
|
| 417 |
+
In our testing approach, we curated a diverse list of terms and phrases representing various social groups and contexts prone to bias. This list included phrases like "elderly person", "young woman", "African immigrant", "gay man" and "blue-collar worker". We inserted these phrases into neutral sentences to evaluate the model's perception of potential bias. Upon processing the sentences through our model, we observed the following pattern:
|
| 418 |
+
|
| 419 |
+
# The person was described as a [Phrase]
|
| 420 |
+
|
| 421 |
+
Ethnicity:
|
| 422 |
+
|
| 423 |
+
-African immigrant (Flagged: 25 out of 30 times, $83\%$
|
| 424 |
+
Asian immigrant (Flagged: 20 out of 30 times, $67\%$
|
| 425 |
+
- European immigrant (Flagged: 10 out of 30 times, $33\%$ )
|
| 426 |
+
|
| 427 |
+
Gender:
|
| 428 |
+
|
| 429 |
+
- young woman (Flagged: 10 out of 30 times, $33\%$ )
|
| 430 |
+
- young man (Flagged: 5 out of 30 times, $17\%$ )
|
| 431 |
+
elderly man (Flagged: 5 out of 30 times, $17\%$ )
|
| 432 |
+
|
| 433 |
+
Occupation:
|
| 434 |
+
|
| 435 |
+
- blue-collar worker (Flagged: 15 out of 30 times, $50\%$ )
|
| 436 |
+
- white-collar worker (Flagged: 8 out of 30 times, $27\%$ )
|
| 437 |
+
|
| 438 |
+
Age:
|
| 439 |
+
|
| 440 |
+
elderly person (Flagged: 5 out of 30 times, $17\%$ )
|
| 441 |
+
- young adult (Flagged: 3 out of 30 times, $10\%$ )
|
| 442 |
+
|
| 443 |
+
The provided data showcases the results of a bias detection test on a language model. Various phrases associated with different demographics (ethnicity, gender, occupation, and age) were inserted into a neutral sentence, and the model flagged certain phrases as “BIAS ENTITY” with varying frequencies.
|
| 444 |
+
|
| 445 |
+
Specifically, the phrases "African immigrant" and "Asian immigrant" were flagged $83\%$ and $67\%$ of the time, respectively, whereas "European immigrant" was only flagged $33\%$ of the time. Similarly, "blue-collar worker" was flagged $50\%$ of the time, while "white-collar worker" was flagged $27\%$ of the time. In contrast, phrases related to age and gender, such as "elderly person", "young woman", "young man", and "elderly man", were flagged much less frequently, ranging from $10\%$ to $33\%$ .
|
| 446 |
+
|
| 447 |
+
These discrepancies indicate that the model may be perpetuating biases present in its training data, where terms associated with certain ethnicities or occupations are more frequently labeled as biased. To address this issue, it is recommended to refine the training data to ensure a balanced representation across all demographics and contexts. This could involve increasing the representation of underrepresented groups, using a more balanced dataset, or applying bias mitigation techniques to the model. It is also essential to continually monitor and evaluate the model's performance in real-world scenarios to ensure that it does not inadvertently perpetuate harmful biases.
|
| 448 |
+
|
| 449 |
+
# 5.7. Human Evaluation
|
| 450 |
+
|
| 451 |
+
To validate the model's bias detection, a human evaluation was initiated. Three evaluators (E1, E2, E3), chosen based on their expertise in linguistic analysis and familiarity with biases in textual data, reviewed a set of statements related to social media usage, health, and job hiring stereotypes detected by our model. Each statement was rated on a scale from 1 to 5, with 1 indicating no perceived bias and 5 indicating a high level of bias.
|
| 452 |
+
|
| 453 |
+
The evaluation results are summarized in Table 9, which provides the statements, the corresponding identified entities, and the scores assigned by each evaluator. The average score for each entity is also provided, giving an overall indication of the perceived bias.
|
| 454 |
+
|
| 455 |
+
The results in Table 9 show that the evaluators reached a consensus on the presence of bias in the statements, as evident from the high scores assigned to certain identified entities. Entities such as 'seeking attention,' 'lazy,' 'more
|
| 456 |
+
|
| 457 |
+
Table 9: Evaluation of Perceived Bias in Various Statements by Three Evaluators (E1, E2, E3): the evaluators scored the identified entities from 1 (no perceived bias) to 5 (high level of bias), and the average (Avg) score is also shown.
|
| 458 |
+
|
| 459 |
+
<table><tr><td rowspan="2">Text</td><td rowspan="2">Identified Entity</td><td colspan="4">Score</td></tr><tr><td>E1</td><td>E2</td><td>E3</td><td>Avg</td></tr><tr><td>Everyone using social media is just seeking attention.</td><td>seeking attention</td><td>4</td><td>4</td><td>3</td><td>3.67</td></tr><tr><td>People who don’t exercise are lazy.</td><td>lazy</td><td>5</td><td>5</td><td>5</td><td>5.0</td></tr><tr><td>This job role is more suitable for men.</td><td>more suitable for men</td><td>5</td><td>5</td><td>5</td><td>5.0</td></tr><tr><td>She posts too much on social media, she must be narcissistic.</td><td>narcissistic</td><td>4</td><td>3</td><td>4</td><td>3.67</td></tr><tr><td>Overweight people simply lack self-control.</td><td>Overweight, lack self-control</td><td>5</td><td>5</td><td>5</td><td>5.0</td></tr><tr><td>Only people with an engineering background are best fit in this role.</td><td>best fit</td><td>3</td><td>2.5</td><td>3</td><td>2.83</td></tr></table>
|
| 460 |
+
|
| 461 |
+
suitable for men,' 'narcissistic,' and 'overweight people, lack self-control' received average scores exceeding 3, indicating a significant presence of bias in these statements. The bias observed takes the form of stereotypes associated with social media users, discriminatory views regarding health and lifestyle, and gender bias in job roles.
|
| 462 |
+
|
| 463 |
+
However, the last statement, which suggests that 'only people with an engineering background are the best fit for a role,' received a lower bias score compared to the others. The identified entity in this statement obtained an average score of 2.83. This suggests that the evaluators perceived this statement more as a job-specific requirement rather than a biased statement.
|
| 464 |
+
|
| 465 |
+
# 6. Discussion
|
| 466 |
+
|
| 467 |
+
# 6.1. Performance Analysis
|
| 468 |
+
|
| 469 |
+
The detection and identification of biases in textual data have significant implications for ensuring fairness and ethical usage of information. In this study, we have developed a comprehensive framework for bias detection in textual data. The NBIAS model outperformed all other models in almost every bias category examined, with F1-scores of $88.4\%$ , $90.6\%$ , and $91.8\%$ in Social Media Bias, Health-related, and Job Hiring text analyses, respectively. The model exhibited a strong capability in diverse token classification tasks,
|
| 470 |
+
|
| 471 |
+
as evidenced by high AUC values of 0.74, 0.90, and 0.91 across the respective domains. The model's high accuracy scores further shows its efficacy.
|
| 472 |
+
|
| 473 |
+
The precision analysis of the model highlights its ability to correctly identify biased entities across various contexts. However, there remains scope for reducing false positives. NBIAS robustness was demonstrated through its steady performance in multiple tests including spelling, semantics, case sensitivity, and context considerations. Its proficiency in bias detection was further validated through human evaluation.
|
| 474 |
+
|
| 475 |
+
# 6.2. Theoretical Impact
|
| 476 |
+
|
| 477 |
+
The NBIAS framework offers a novel approach on text-based bias detection. Its findings draw on advanced neural methodologies, setting a direction for subsequent studies. The framework emphasizes the intricacies of bias in textual content. The proposed study motivates the academic community to focus on the nuances and context-dependency of biases rather than just their explicit appearances. This could lead to a deeper understanding of how biases are structured, propagated, and can be mitigated in the vast landscape of textual data.
|
| 478 |
+
|
| 479 |
+
# 6.3. Practical Impact
|
| 480 |
+
|
| 481 |
+
NBIAS's practical use is vast and diverse. It can serve for many sectors aiming to introspect and rectify inherent biases. Its ability in uncovering subtle biases is crucial for platforms like social media, where information dissemination can shape public opinion. Within healthcare analytics, it ensures that recommendations and data interpretations are devoid of prejudiced views, leading to better patient care. In recruitment, NBIAS can be used for equitable hiring, ensuring job descriptions and applicant reviews remain unbiased. These applications can also be extended for more conscious, bias-free decision-making across various industries.
|
| 482 |
+
|
| 483 |
+
# 6.4. Limitations
|
| 484 |
+
|
| 485 |
+
While our work represents a significant step forward in identifying biases in text-based data, aiming to contribute to a more inclusive and unbiased information landscape, it has some limitations.
|
| 486 |
+
|
| 487 |
+
Performance Variability: The efficacy of our model might not be consistent across diverse languages and domains. Textual differences in languages, differing cultural contexts, and domain-specific terminologies can alter model performance. For instance, a bias detection framework optimized for English
|
| 488 |
+
|
| 489 |
+
may struggle with idiomatic expressions in languages like German or Mandarin. Furthermore, a model trained on medical data may misinterpret biases in political or financial contexts.
|
| 490 |
+
|
| 491 |
+
Extent of Bias Detection: While our model excels at identifying isolated biased terms or phrases, it might fluctuate when faced with biases embedded in longer narrative structures spread across paragraphs.
|
| 492 |
+
|
| 493 |
+
Inherent Model Uncertainties: Although carefully designed, our framework, like others, is not exempt from producing occasional inaccuracies. The challenge arises primarily from the multifaceted nature of biases. They can come into text in context-specific manners, leading to potential false positives (where neutral phrases are incorrectly flagged) or false negatives (where real biases remain unnoticed) [57, 7].
|
| 494 |
+
|
| 495 |
+
Adaptability: While our current framework provides a foundation for bias detection, adapting and fine-tuning it for specific linguistic and domain nuances remain crucial. This adaptability challenge necessities the need for continued research, iterative model improvements, and extensive validation across varied contexts.
|
| 496 |
+
|
| 497 |
+
By highlighting these limitations, we aim to open dialogue and collaboration for further refinements for unbiased text analysis.
|
| 498 |
+
|
| 499 |
+
# 6.5. Future Directions
|
| 500 |
+
|
| 501 |
+
Recognizing the potential of NBIAS and considering the highlighted limitations, we recommend several directions for future research to enhance bias detection capabilities in textual data:
|
| 502 |
+
|
| 503 |
+
Incorporating Multilingual Support: Bias is not confined to any particular language. Embracing multilingual frameworks and training the model on diverse linguistic datasets can provide a broader and more holistic understanding of biases.
|
| 504 |
+
|
| 505 |
+
Expanding Narrative Analysis: Future iterations of NBIAS or related models should consider enhancing their ability to discern biases in extended narrative structures, incorporating both micro and macro levels of text understanding.
|
| 506 |
+
|
| 507 |
+
Feature Enrichment: To optimize text classification and bias detection, the model can benefit from newer feature selection methodologies. Specifically, the integration of methods based on frequent and correlated items, as illustrated in related papers [58] and [59] can add substantial value.
|
| 508 |
+
|
| 509 |
+
Multilabel Classification for Social Networks: The increasing prevalence of online social networks necessitates models capable of multi-label classifi-
|
| 510 |
+
|
| 511 |
+
cation. Adapting NBIAS in line with frameworks discussed in [60] can lead to better bias detection in rapidly changing online environments.
|
| 512 |
+
|
| 513 |
+
Feedback Loops and Iterative Learning: Ensuring that the model continues to evolve requires the establishment of feedback loops wherein the model can learn from its inaccuracies. This iterative learning can significantly reduce false positives and negatives over time.
|
| 514 |
+
|
| 515 |
+
Collaborative Research: We encourage researchers across disciplines to collaborate, sharing insights, datasets, and techniques. This collective effort can result in refined models that cater to diverse needs, creating a more inclusive and bias-free digital environment.
|
| 516 |
+
|
| 517 |
+
To sum up, while NBIAS presents an innovative approach to bias detection, the domain's complexities necessitate continual advancements. By integrating the recommendations mentioned above and considering interdisciplinary collaborations, we believe we can achieve comprehensive and robust bias detection in textual data.
|
| 518 |
+
|
| 519 |
+
# 7. Conclusion
|
| 520 |
+
|
| 521 |
+
This paper presents a comprehensive framework for the detection and identification of biases in textual data. The framework consists of various components, including data pre-processing, bias annotation, NLP modeling, and evaluation layers. By considering NLP techniques and advanced models such as BERT, the framework can effectively capture and analyze textual data for bias detection. The framework has shown promising results in identifying and tagging biased terms and phrases across different domains. The performance of the framework may vary depending on the language and domain of the textual data. Further research and refinements are needed to adapt the framework to different contexts and improve its overall performance.
|
| 522 |
+
|
| 523 |
+
# CRediT authorship contribution statement
|
| 524 |
+
|
| 525 |
+
Shaina Raza: Conceptualization, Investigation, Formal analysis, Methodology, Project administration, Software, Validation, Visualization, Writing - original draft, Writing - review & editing. Muskan Garg: Investigation. Formal analysis, Validation, Writing - review & editing. Deepak John Reji : Methodology, Writing - review& editing. Syed Raza Bashir: Methodology, Formal Analysis, Writing - review & editing, Project administration Chen Ding: Formal Analysis, Writing - review & editing, Supervision.
|
| 526 |
+
|
| 527 |
+
# Declaration of competing interest
|
| 528 |
+
|
| 529 |
+
The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.
|
| 530 |
+
|
| 531 |
+
# Data availability
|
| 532 |
+
|
| 533 |
+
Data will be made available on request.
|
| 534 |
+
|
| 535 |
+
# Acknowledgments
|
| 536 |
+
|
| 537 |
+
Resources used in preparing this research were provided, in part, by the Province of Ontario, the Government of Canada through CIFAR, and companies sponsoring the Vector Institute.
|
| 538 |
+
|
| 539 |
+
# References
|
| 540 |
+
|
| 541 |
+
[1] B. Hutchinson, V. Prabhakaran, E. Denton, K. Webster, Y. Zhong, S. Denuyl, Social biases in NLP models as barriers for persons with disabilities, in: Proceedings of the Annual Meeting of the Association for Computational Linguistics, Association for Computational Linguistics, Online, 2020, pp. 5491-5501. doi:10.18653/v1/2020.acl-main.487.arXiv:2005.00813.
|
| 542 |
+
[2] T. Bolukbasi, K.-W. Chang, J. Y. Zou, V. Saligram, A. T. Kalai, Man is to computer programmer as woman is to homemaker? debiasing word embeddings, Advances in neural information processing systems 29 (2016).
|
| 543 |
+
[3] L. Dixon, J. Li, J. Sorensen, N. Thain, L. Vasserman, Measuring and mitigating unintended bias in text classification, in: Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, 2018, pp. 67-73.
|
| 544 |
+
[4] F. Ribeiro, L. Henrique, F. Benevenuto, A. Chakraborty, J. Kulshrestha, M. Babaei, K. Gummadi, Media bias monitor: Quantifying biases of social media news outlets at large-scale, volume 12, 2018. URL: https:// ojs.aaai.org/index.php/ICWSM/article/view/15025. doi:10.1609/ icwsm.v12i1.15025.
|
| 545 |
+
|
| 546 |
+
[5] Z. Yanbo, Implicit bias or explicit bias: an analysis based on natural language processing, in: 2020 International conference on computing and data science (CDS), IEEE, 2020, pp. 52-55.
|
| 547 |
+
[6] N. M. Thomasian, C. Eickhoff, E. Y. Adashi, Advancing health equity with artificial intelligence, Journal of Public Health Policy 42 (2021) 602-611. doi:10.1057/s41271-021-00319-5.
|
| 548 |
+
[7] S. Raza, D. J. Reji, C. Ding, Dbias: detecting biases and ensuring fairness in news articles, International Journal of Data Science and Analytics (2022). doi:10.1007/s41060-022-00359-4.
|
| 549 |
+
[8] D. Gaucher, J. Friesen, A. C. Kay, Evidence that gendered wording in job advertisements exists and sustains gender inequality., Journal of personality and social psychology 101 (2011) 109.
|
| 550 |
+
[9] H. Dawkins, Marked attribute bias in natural language inference, in: Findings of the Association for Computational Linguistics: ACLIJCNLP 2021, Association for Computational Linguistics, Online, 2021, pp. 4214-4226. doi:10.18653/v1/2021-findings-ac1.369.
|
| 551 |
+
[10] T. Spinde, M. Plank, J.-D. Krieger, T. Ruas, B. Gipp, A. Aizawa, Neural media bias detection using distant supervision with BABE - bias annotations by experts, in: Findings of the Association for Computational Linguistics: EMNLP 2021, Association for Computational Linguistics, Punta Cana, Dominican Republic, 2021, pp. 1166-1177. doi:10.18653/v1/2021-findings-emnlp.101.
|
| 552 |
+
[11] M. Färber, V. Burkard, A. Jatowt, S. Lim, A multidimensional dataset based on crowdsourcing for analyzing and detecting news bias, in: Proceedings of the 29th ACM International Conference on Information & Knowledge Management, 2020, pp. 3007-3014.
|
| 553 |
+
[12] Y. Nie, Y. Tian, X. Wan, Y. Song, B. Dai, Named entity recognition for social media texts with semantic augmentation, in: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), Association for Computational Linguistics, Online, 2020, pp. 1383-1391. doi:10.18653/v1/2020.emnlp-main.107.
|
| 554 |
+
|
| 555 |
+
[13] S. Raza, B. Schwartz, Constructing a disease database and using natural language processing to capture and standardize free text clinical information, Scientific Reports 13 (2023) 8591. doi:10.1038/s41598-023-35482-0.
|
| 556 |
+
[14] S. Moon, L. Neves, V. Carvalho, Multimodal named entity recognition for short social media posts, in: Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), Association for Computational Linguistics, New Orleans, Louisiana, 2018, pp. 852-860. doi:10.18653/v1/N18-1078.
|
| 557 |
+
[15] I. Garrido-Muñoz, A. Montejo-Ráez, F. Martínez-Santiago, L. A. Ureña-López, A survey on bias in deep nlp, Applied Sciences 11 (2021) 3184.
|
| 558 |
+
[16] A. Caliskan, J. J. Bryson, A. Narayanan, Semantics derived automatically from language corpora contain human-like biases, Science 356 (2017) 183-186.
|
| 559 |
+
[17] S. Dev, E. Sheng, J. Zhao, A. Amstutz, J. Sun, Y. Hou, M. Sanseverino, J. Kim, A. Nishi, N. Peng, et al., On measures of biases and harms in nlp, arXiv preprint arXiv:2108.03362 (2021).
|
| 560 |
+
[18] T. Manzini, Y. C. Lim, Y. Tsvetkov, A. W. Black, Black Is To Criminal As Caucasian Is To Police, Proceedings of NAACL-HLT (2019) 615-621.
|
| 561 |
+
[19] E. K. Tokpo, P. Delobelle, B. Berendt, T. Calders, How Far Can It Go? On Intrinsic Gender Bias Mitigation for Text Classification, EACL 2023 - 17th Conference of the European Chapter of the Association for Computational Linguistics, Proceedings of the Conference (2023) 3410-3425. arXiv:2301.12855.
|
| 562 |
+
[20] Y. Cai, A. Zimek, G. Wunder, E. Ntoutsi, Power of explanations: Towards automatic debiasing in hate speech detection, in: 2022 IEEE 9th International Conference on Data Science and Advanced Analytics (DSAA), IEEE, 2022, pp. 1-10.
|
| 563 |
+
[21] Y. Wang, J. Mansurov, P. Ivanov, J. Su, A. Shelmanov, A. Tsvigun, C. Whitehouse, O. M. Afzal, T. Mahmoud, A. F. Aji, et al., M4: Multi-generator, multi-domain, and multi-lingual black-box machine-generated text detection, arXiv preprint arXiv:2305.14902 (2023).
|
| 564 |
+
|
| 565 |
+
[22] E. Pair, N. Vicas, A. M. Weber, V. Meausoone, J. Zou, A. Njuguna, G. L. Darmstadt, Quantification of gender bias and sentiment toward political leaders over 20 years of kenyan news using natural language processing, Frontiers in Psychology 12 (2021) 712646.
|
| 566 |
+
[23] S. Hassan, M. Huenerfauth, C. O. Alm, Unpacking the interdependent systems of discrimination: Ableist bias in NLP systems through an intersectional lens, in: Findings of the Association for Computational Linguistics: EMNLP 2021, Association for Computational Linguistics, Punta Cana, Dominican Republic, 2021, pp. 3116-3123. doi:10.18653/v1/2021-findings-emnlp.267.
|
| 567 |
+
[24] L. Ding, D. Yu, J. Xie, W. Guo, S. Hu, M. Liu, L. Kong, H. Dai, Y. Bao, B. Jiang, Word embeddings via causal inference: Gender bias reducing and semantic information preserving, in: AAAI Conference on Artificial Intelligence, 2021. URL: https://api_semanticscholar.org/CorpusID:245117373.
|
| 568 |
+
[25] V. S. Govindarajan, K. Atwell, B. Sinno, M. Alikhani, D. Beaver, J. J. Li, How people talk about each other: Modeling generalized intergroup bias and emotion, in: Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, 2023, pp. 2488-2498.
|
| 569 |
+
[26] H. Devinney, J. Björklund, H. Björklund, Theories of “gender” in nlp bias research, in: Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency, 2022, pp. 2083–2102.
|
| 570 |
+
[27] B. Zhao, C. Chen, Q.-W. Wang, A. He, S.-T. Xia, Combating unknown bias with effective bias-conflicting scoring and gradient alignment, in: Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, 2023, pp. 3561-3569.
|
| 571 |
+
[28] T. Eftimov, B. Korousić Seljak, P. Korosec, A rule-based named-entity recognition method for knowledge extraction of evidence-based dietary recommendations, PloS one 12 (2017) e0179488.
|
| 572 |
+
[29] J. P. Chiu, E. Nichols, Named entity recognition with bidirectional LSTM-cnns, Transactions of the association for computational linguistics 4 (2016) 357-370.
|
| 573 |
+
|
| 574 |
+
[30] Z. Liu, X. Zhang, Z. Li, M. Sun, T-ner: An all-round python library for transformer-based named entity recognition, in: Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations, 2021, pp. 7–12.
|
| 575 |
+
[31] Z. Liu, X. Zhang, Z. Li, M. Sun, Reducing the bias of visual objects in multimodal named entity recognition, in: Proceedings of the 2023 ACM International Conference on Multimedia Retrieval, ACM, 2023, pp. 1-5.
|
| 576 |
+
[32] Z. Liu, X. Zhang, Z. Li, M. Sun, Social media event detection using spacy named entity recognition and spectral embeddings, in: Proceedings of the 2022 International Conference on Mobile Human-Computer Interaction, International ASET Inc., 2022, pp. 114–118.
|
| 577 |
+
[33] S. Raza, B. Schwartz, Entity and relation extraction from clinical case reports of COVID-19: a natural language processing approach, BMC Medical Informatics and Decision Making 23 (2023) 20. doi:10.1186/s12911-023-02117-3.
|
| 578 |
+
[34] C. Gerstenberger, N. Partanen, M. Rießler, J. Wilbur, Instant annotations—applying nlp methods to the annotation of spoken language documentation corpora, in: Proceedings of the Third Workshop on Computational Linguistics for Uralic Languages, 2017, pp. 25–36.
|
| 579 |
+
[35] S.-A. Rebuffi, S. Ehrhardt, K. Han, A. Vedaldi, A. Zisserman, Semi-supervised learning with scarce annotations, in: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, 2020, pp. 762-763.
|
| 580 |
+
[36] B. Alex, C. Grover, R. Shen, M. Kabadjov, Agile corpus annotation in practice: An overview of manual and automatic annotation of cvs, in: Proceedings of the Fourth Linguistic Annotation Workshop, 2010, pp. 29-37.
|
| 581 |
+
[37] J. H. Caufield, Y. Zhou, Y. Bai, D. A. Liam, A. O. Garlid, K.-W. Chang, Y. Sun, P. Ping, W. Wang, A Comprehensive Typing System for Information Extraction from Clinical Narratives, medRxiv (2019) 19009118.
|
| 582 |
+
[38] O. Serikov, E. Voloshina, A. Postnikova, E. Klyachko, E. Vylomova, T. Shavrina, E. Le Ferrand, V. Malykh, F. Tyers, T. Arkhangelskiy,
|
| 583 |
+
|
| 584 |
+
V. Mikhailov (Eds.), Proceedings of the Second Workshop on NLP Applications to Field Linguistics, Association for Computational Linguistics, Dubrovnik, Croatia, 2023. URL: https://aclanthology.org/2023.fieldmatters-1.0.
|
| 585 |
+
[39] N. Ghaffari Laleh, D. Truhn, G. P. Veldhuizen, T. Han, M. van Treeck, R. D. Buelow, R. Langer, B. Dislich, P. Boor, V. Schulz, et al., Adversarial attacks and adversarial robustness in computational pathology, Nature communications 13 (2022) 5711.
|
| 586 |
+
[40] N. Green, Proposed method for annotation of scientific arguments in terms of semantic relations and argument schemes, in: Proceedings of the 5th Workshop on Argument Mining, 2018, pp. 105-110.
|
| 587 |
+
[41] J. Alistair, P. Tom, R. Mark, MIMIC-III Clinical Database, https://physionet.org/content/mimiciiii/1.4/, 2021.
|
| 588 |
+
[42] A. Name, Classifying job posts via nlp, Medium (2023). URL: https://medium.com/data-science-101/classifying-job-posts-by-nlp-3b2b49a33247.
|
| 589 |
+
[43] T. Sexton, IOB Format Intro - Nestor, https://pages.nist.gov/nestor/examples/named-entities/01-BIO-format/, 2022.
|
| 590 |
+
[44] T. Spinde, M. Plank, J. D. Krieger, T. Ruas, B. Gipp, A. Aizawa, Neural Media Bias Detection Using Distant Supervision with BABE - Bias Annotations by Experts, Findings of the Association for Computational Linguistics, Findings of ACL: EMNLP 2021 (2021) 1166-1177. doi:10.18653/v1/2021-findings-emnlp.101.arXiv:2209.14557.
|
| 591 |
+
[45] X. Wang, Q. Liu, T. Gui, Q. Zhang, Y. Zou, X. Zhou, J. Ye, Y. Zhang, R. Zheng, Z. Pang, et al., Textflint: Unified multilingual robustness evaluation toolkit for natural language processing, in: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: System Demonstrations, 2021, pp. 347-355.
|
| 592 |
+
[46] R. Mateos de Cabo, R. Gimeno, M. Martínez, L. López, Perpetuating gender inequality via the internet? an analysis of women's presence in spanish online newspapers, Sex roles 70 (2014) 57-71.
|
| 593 |
+
|
| 594 |
+
[47] J. Alabi, K. Amponsah-Kaakyire, D. Adelani, C. España-Bonet, Massive vs. curated embeddings for low-resourced languages: the case of Yorubá and Twi, in: Proceedings of the Twelfth Language Resources and Evaluation Conference, European Language Resources Association, Marseille, France, 2020, pp. 2754-2762.
|
| 595 |
+
[48] Y. Liu, M. Ott, N. Goyal, J. Du, M. Joshi, D. Chen, O. Levy, M. Lewis, L. Zettlemoyer, V. Stoyanov, Roberta: A robustly optimized bert pretraining approach, arXiv preprint arXiv:1907.11692 (2019).
|
| 596 |
+
[49] H. Yan, T. Gui, J. Dai, Q. Guo, Z. Zhang, X. Qiu, A unified generative framework for various ner subtasks, arXiv preprint arXiv:2106.01223 (2021).
|
| 597 |
+
[50] T. Gui, R. Ma, Q. Zhang, L. Zhao, Y.-G. Jiang, X. Huang, Cnn-based chinese ner with lexicon rethinking, in: Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, International Joint Conferences on Artificial Intelligence Organization, 2019, pp. 4982-4988. URL: https://doi.org/10.24963/ijcai.2019/692. doi:10.24963/ijcai.2019/692.
|
| 598 |
+
[51] H. Yan, B. Deng, X. Li, X. Qiu, Tener: adapting transformer encoder for named entity recognition, arXiv preprint arXiv:1911.04474 (2019).
|
| 599 |
+
[52] A. Fritzler, V. Logacheva, M. Kretov, Few-shot classification in named entity recognition task, in: Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing, 2019, pp. 993-1000.
|
| 600 |
+
[53] T. Ma, H. Jiang, Q. Wu, T. Zhao, C.-Y. Lin, Decomposed meta-learning for few-shot named entity recognition, in: Findings of the Association for Computational Linguistics: ACL 2022, Association for Computational Linguistics, Dublin, Ireland, 2022, pp. 1584-1596. URL: https://aclanthology.org/2022.findings-acl.124. doi:10.18653/v1/2022-findings-acl.124.
|
| 601 |
+
[54] E. V. Epure, R. Hennequin, Probing pre-trained auto-regressive language models for named entity typing and recognition, arXiv preprint arXiv:2108.11857 (2021).
|
| 602 |
+
[55] D. Farmakiotou, V. Karkaletsis, J. Koutsias, G. Sigletos, C. D. Spyropoulos, P. Stamatopoulos, Rule-based named entity recognition for
|
| 603 |
+
|
| 604 |
+
greek financial texts, in: Proceedings of the Workshop on Computational lexicography and Multimedia Dictionaries (COMLEX 2000), 2000, pp. 75-78.
|
| 605 |
+
[56] Y. Yu, A. R. Khan, J. Xu, Measuring robustness for NLP, in: Proceedings of the 29th International Conference on Computational Linguistics, International Committee on Computational Linguistics, Gyeongju, Republic of Korea, 2022, pp. 3908-3916. URL: https://aclanthology.org/2022.coling-1.343.
|
| 606 |
+
[57] S. Raza, C. Ding, Fake news detection based on news content and social contexts: a transformer-based approach, International Journal of Data Science and Analytics 13 (2022) 335-362.
|
| 607 |
+
[58] H. Mamdouh Farghaly, T. Abd El-Hafeez, A high-quality feature selection method based on frequent and correlated items for text classification, Soft Computing (2023) 1-16.
|
| 608 |
+
[59] H. Mamdouh Farghaly, T. Abd El-Hafeez, A new feature selection method based on frequent and associated itemsets for text classification, Concurrence and Computation: Practice and Experience 34 (2022) e7258.
|
| 609 |
+
[60] A. Omar, T. M. Mahmoud, T. Abd-El-Hafeez, A. Mahfouz, Multi-label arabic text classification in online social networks, Information Systems 100 (2021) 101785.
|
data/2023/2308_01xxx/2308.01681/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:273ae5abc02155fcf42897b547ca61f4c0541335419a174a862b38492e287ff2
|
| 3 |
+
size 1267987
|
data/2023/2308_01xxx/2308.01681/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01737/5b627e26-cac0-4623-ad74-a488ce34673f_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01737/5b627e26-cac0-4623-ad74-a488ce34673f_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01737/5b627e26-cac0-4623-ad74-a488ce34673f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e09f382b3c008d517e6f91dc3850f757d447b89c723a9115b79267e6f036938d
|
| 3 |
+
size 967101
|
data/2023/2308_01xxx/2308.01737/full.md
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MAP: A Model-agnostic Pretraining Framework for Click-through Rate Prediction
|
| 2 |
+
|
| 3 |
+
Jianghao Lin
|
| 4 |
+
chiangel@sjtu.edu.cn
|
| 5 |
+
Shanghai Jiao Tong University
|
| 6 |
+
Shanghai, China
|
| 7 |
+
|
| 8 |
+
Xinyi Dai daixinyi@sjtu.edu.cn Shanghai Jiao Tong University Shanghai, China
|
| 9 |
+
|
| 10 |
+
Yanru Qu\* kevinqu16@gmail.com Shanghai Jiao Tong University Shanghai, China
|
| 11 |
+
|
| 12 |
+
Ruiming Tang
|
| 13 |
+
tangruiming@huawei.com
|
| 14 |
+
Huawei Noah's Ark Lab
|
| 15 |
+
Shenzhen, China
|
| 16 |
+
|
| 17 |
+
Wei Guo guowei67@huawei.com Huawei Noah's Ark Lab Shenzhen, China
|
| 18 |
+
|
| 19 |
+
Yong Yu
|
| 20 |
+
yyu@sjtu.edu.cn
|
| 21 |
+
Shanghai Jiao Tong University
|
| 22 |
+
Shanghai, China
|
| 23 |
+
|
| 24 |
+
Weinan Zhang* wnzhang@sjtu.edu.cn Shanghai Jiao Tong University Shanghai, China
|
| 25 |
+
|
| 26 |
+
# ABSTRACT
|
| 27 |
+
|
| 28 |
+
With the widespread application of personalized online services, click-through rate (CTR) prediction has received more and more attention and research. The most prominent features of CTR prediction are its multi-field categorical data format, and vast and daily-growing data volume. The large capacity of neural models helps digest such massive amounts of data under the supervised learning paradigm, yet they fail to utilize the substantial data to its full potential, since the 1-bit click signal is not sufficient to guide the model to learn capable representations of features and instances. The self-supervised learning paradigm provides a more promising pretrain-finetune solution to better exploit the large amount of user click logs, and learn more generalized and effective representations. However, self-supervised learning for CTR prediction is still an open question, since current works on this line are only preliminary and rudimentary. To this end, we propose a Model-agnostic Pretraining (MAP) framework that applies feature corruption and recovery on multi-field categorical data, and more specifically, we derive two practical algorithms: masked feature prediction (MFP) and replaced feature detection (RFD). MFP digs into feature interactions within each instance through masking and predicting a small portion of input features, and introduces noise contrastive estimation (NCE) to handle large feature spaces. RFD further turns MFP into a binary classification mode through replacing and detecting changes in input features, making it even simpler and more effective for CTR pretraining. Our extensive experiments on two
|
| 29 |
+
|
| 30 |
+
*Weinan Zhang and Yanru Qu are co-corresponding authors.
|
| 31 |
+
|
| 32 |
+
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
|
| 33 |
+
|
| 34 |
+
KDD '23, August 6-10, 2023, Long Beach, CA, USA
|
| 35 |
+
© 2023 Copyright held by the owner/author(s). Publication rights licensed to ACM.
|
| 36 |
+
ACM ISBN 979-8-4007-0103-0/23/08...$15.00
|
| 37 |
+
https://doi.org/10.1145/3580305.3599422
|
| 38 |
+
|
| 39 |
+
real-world large-scale datasets (i.e., Avazu, Criteo) demonstrate the advantages of these two methods on several strong backbones (e.g., DCNv2, DeepFM), and achieve new state-of-the-art performance in terms of both effectiveness and efficiency for CTR prediction.
|
| 40 |
+
|
| 41 |
+
# CCS CONCEPTS
|
| 42 |
+
|
| 43 |
+
- Information systems $\rightarrow$ Data mining; Recommender systems.
|
| 44 |
+
|
| 45 |
+
# KEYWORDS
|
| 46 |
+
|
| 47 |
+
CTR Prediction, Self-supervised Learning, Model Pretraining
|
| 48 |
+
|
| 49 |
+
# ACM Reference Format:
|
| 50 |
+
|
| 51 |
+
Jianghao Lin, Yanru Qu, Wei Guo, Xinyi Dai, Ruiming Tang, Yong Yu, and Weinan Zhang. 2023. MAP: A Model-agnostic Pretraining Framework for Click-through Rate Prediction. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '23), August 6-10, 2023, Long Beach, CA, USA. ACM, New York, NY, USA, 13 pages. https://doi.org/10.1145/3580305.3599422
|
| 52 |
+
|
| 53 |
+
# 1 INTRODUCTION
|
| 54 |
+
|
| 55 |
+
Click-through rate (CTR) prediction aims to estimate the probability of a user's click [25, 56, 63] given a specific context, and plays a fundamental role in various personalized online services, including recommender systems [62], display advertising [44], web search [9, 12, 31], etc. Traditional CTR models (e.g., logistic regression [49] and FM-based models [22, 48]) can only capture low-order feature interactions, which might lead to relatively inferior performance in real-world applications. With the rise of deep learning techniques and the massive amount of user behavior data collected online, many delicate neural CTR models have been proposed to model higher-order feature interactions with different operators (e.g., product [14, 24, 29, 45, 46, 59], convolution [28, 32, 33], and attention [27, 50, 64]). These works generally follow a supervised learning paradigm shown in Figure 1(a), where a model is randomly initialized and trained from scratch based on the supervised signals (click or not). Nevertheless, the 1-bit click signal is not sufficient
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
(a) Supervised Learning Paradigm
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
(b) Self-supervised Learning Paradigm
|
| 62 |
+
Figure 1: The illustration of (a) supervised learning paradigm, and (b) self-supervised learning paradigm. The supervised learning paradigm directly trains a randomly initialized model from scratch without pretraining. The self-supervised learning paradigm contains two stages, where we first pretrain the model based on the pretext task and then finetune it for the downstream task.
|
| 63 |
+
|
| 64 |
+
enough for the model to learn capable representations of features and instances, resulting in suboptimal performance.
|
| 65 |
+
|
| 66 |
+
Self-supervised learning provides a more powerful training paradigm to learn more generalized and effective representations of data samples, and proves to be effectual in Natural Language Processing (NLP) [8, 10] and Computer Vision (CV) [20] domains. As shown in Figure 1(b), they usually adopt a pretrain-finetune scheme, where we first pretrain an encoder based on pretext tasks (i.e., pretraining tasks), and then finetune a model initialized by the pretrained encoder for downstream tasks based on specific training data with supervised signals. According to the pretext tasks, self-supervised learning can be mainly classified into two categories: (1) contrastive methods and (2) generative methods [34]. Contrastive methods [2, 5, 7, 13, 20, 42] aim to learn generalized representations from different views or distortions of the same input. Generative methods [3, 10, 19, 67] reconstruct the original input sample from the corrupted one.
|
| 67 |
+
|
| 68 |
+
Self-supervised learning has flourished in the NLP domain to pretrain transformers for unlabeled sentences, making it a perfect mimicry target for sequential recommendations. Various methods are proposed to treat user behavior sequences as sentences, and adopt language models [51] or sentence augmentations [37, 65] for better user or item embeddings. Moreover, many pretraining methods are designed for different types of data formats (e.g., graph data [17, 39, 54], or multi-modal data [36, 38]) to further enrich the recommendation family. However, these sequential/graph/multimodal based pretraining methods are essentially incompatible for the CTR data format, i.e., multi-field categorical data format:
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
x _ {i} = \underbrace {(0 , \dots , 1 , 0)} _ {\text {I t e m} = \text {J e a n s}} \underbrace {(1 , \dots , 0 , 0)} _ {\text {C o l o r} = \text {B l u e}} \dots \underbrace {(1 , 0)} _ {\text {G e n d e r} = \text {M a l e}} \tag {1}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
In this paper, we focus on self-supervised pretraining over multi-field categorical data for CTR prediction. There exist preliminary works [2, 57] that explore the pretraining methods for CTR data. MF4UIP [57] leverages the BERT framework [10] to predict the masked features for user intent prediction. However, it is nonscalable when the feature space grows, and thus suffers from severe
|
| 75 |
+
|
| 76 |
+
inefficiency problem for industrial applications with million-level feature spaces. SCARF [2] is a contrastive method that adopts SimCLR framework [5] and InfoNCE loss [42] to learn robust representations for each data sample. Its contrastive property requires to calculate representations from different views of the same instance, which doubles the throughput time and memory usage, leading to low efficiency. Moreover, contrastive based methods only provide coarse-grained instance-level supervisions from sample pairs, and therefore might get trapped in representation spaces' early degeneration problem [34], where the model overfits the pretext task too early and loses the ability to generalize.
|
| 77 |
+
|
| 78 |
+
To this end, we propose a Model-agnostic Pretraining (MAP) framework that applies feature corruption and recovery towards multi-field categorical data for CTR pretraining. Specifically, we derive two algorithms based on different strategies of corruption and recovery: masked feature prediction (MFP) and replaced feature detection (RFD). MFP requires the model to recover masked features according to corrupted samples, and adopts noise contrastive estimation (NCE) to reduce the computational overhead caused by the large feature space (million level) in CTR data. Moreover, RFD turns MFP into a binary classification mode and requires the model to detect whether each feature in the corrupted sample is replaced or not. Compared with MFP that only utilizes a subset of fields and predict over the entire feature space, RFD is simpler yet more effective and more efficient, which provides fine-grained and more diverse field-wise self-supervised signals. RFD can achieve better CTR performance with fewer parameters, higher throughput rates, and fewer pretraining epochs compared to other pretraining methods. Derived from the MAP framework, MFP and RFD are compatible with any neural CTR models and can promote performance without altering the model structure or inference cost.
|
| 79 |
+
|
| 80 |
+
Main contributions of this paper are concluded as follows:
|
| 81 |
+
|
| 82 |
+
- We propose a Model-agnostic Pretraining (MAP) framework that applies feature corruption and recovery on multi-field categorical data. Different pretraining algorithms could be derived by customizing the strategies of corruption and recovery.
|
| 83 |
+
- We derive a masked feature prediction (MFP) pretraining algorithm from MAP, where the model predicts the original features that are replaced by $<\mathrm{MASK}>$ tokens. We also adopt noise contrastive estimation (NCE) to reduce the computational overhead.
|
| 84 |
+
- We derive a replaced feature detection (RFD) pretraining algorithm from MAP, where the model is required to detect whether the feature of each field is replaced or not. RFD is simpler yet more effective and more efficient, and it can achieve better CTR performance with fewer computational resources.
|
| 85 |
+
- Extensive experiments on two real-world large-scale datasets validate the advantages of MFP and RFD on several strong backbones, and achieve new state-of-the-art performance in terms of both effectiveness and efficiency for CTR prediction.
|
| 86 |
+
|
| 87 |
+
# 2 PRELIMINARIES
|
| 88 |
+
|
| 89 |
+
Without loss of generality, the basic form of CTR prediction casts a binary classification problem over multi-field categorical data. Each instance for CTR prediction contains $F$ fields with each field taking one single value from multiple categories, and can be represented by $\{x_{i},y_{i}\}$ . $x_{i}$ is a sparse one-hot vector as shown in Eq. 1,
|
| 90 |
+
|
| 91 |
+
and $y_{i} \in \{1,0\}$ is the true label (click or not). For simplicity, we build a global feature map of size $M$ , and assign a unique feature index for each category, and thus we can represent each sample as $x_{i} = [x_{i,1}, \ldots, x_{i,F}]$ , where $x_{i,f}$ ( $f = 1, \ldots, F$ ) is the index of corresponding feature.
|
| 92 |
+
|
| 93 |
+
CTR models aim to estimate the click probability $P(y_{i} = 1|x_{i})$ for each sample. According to [56, 71], the structure of most recent CTR models can be abstracted as three layers: (1) embedding layer, (2) feature interaction layer, and (3) prediction layer.
|
| 94 |
+
|
| 95 |
+
**Embedding layer** transforms the sparse binary input $x_{i}$ into dense low-dimensional embedding vectors $\mathbf{E} = [v_{1}; v_{2}; \ldots; v_{F}] \in \mathbb{R}^{F \times d}$ , where $d$ is the embedding size, and each feature is represented as a fixed-length vector $v_{f} \in \mathbb{R}^{d}$ .
|
| 96 |
+
|
| 97 |
+
Feature interaction layer, as the main functional module of CTR models, is designed to capture the second- or higher-order feature interactions with various operations (e.g., product, attention). This layer produces a compact representation $q_{i}$ based on the dense embedding vectors $\mathbf{E}$ for the sample $x_{i}$ .
|
| 98 |
+
|
| 99 |
+
Prediction layer estimates the click probability $\hat{y}_i = P(y_i = 1|x_i)$ based on the representation $q_{i}$ generated by the feature interaction layer. It is usually a linear layer or an MLP module followed by a sigmoid function:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\sigma (x) = \frac {1}{1 + e ^ {- x}}, \tag {2}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
After the prediction layer, the CTR model is trained in an end-to-end manner with the binary cross-entropy loss:
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\mathcal {L} = - \frac {1}{N} \sum_ {i = 1} ^ {N} \left[ y _ {i} \log \hat {y} _ {i} + \left(1 - y _ {i}\right) \log \left(1 - \hat {y} _ {i}\right) \right], \tag {3}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
where $N$ is the number of training samples.
|
| 112 |
+
|
| 113 |
+
# 3 METHODOLOGY
|
| 114 |
+
|
| 115 |
+
In this section, we introduce our proposed Model-agnostic Pretraining (MAP) framework, and derive two pretraining algorithms based on different strategies of feature corruption and recovery. The illustration of the framework and algorithms is shown in Figure 2.
|
| 116 |
+
|
| 117 |
+
# 3.1 MAP Framework Overview
|
| 118 |
+
|
| 119 |
+
We adopt the common pretrain-finetune scheme in self-supervised learning for NLP [8, 10] and CV [20], where we first pretrain a CTR model for a pretext task, and then finetune the pretrained model with click signals.
|
| 120 |
+
|
| 121 |
+
We propose a Model-agnostic Pretraining (MAP) framework for the pretraining stage. The pretext task for the model is to recover the original information (e.g., original features, corrupted field index) from the corrupted samples. It is worth noting that MAP is compatible with any neural CTR models, since we only corrupt the input sample (i.e., feature corruption layer) and alter the prediction head (i.e., feature recovery layer) for the recovery target. Finally, by customizing the design of feature corruption and recovery layers, we derive two specific pretraining algorithms as follows:
|
| 122 |
+
|
| 123 |
+
- Masked feature prediction (MFP) requires the model to recover the original features from the corrupted sample which contains multiple <MASK> tokens.
|
| 124 |
+
|
| 125 |
+
- Replaced feature detection (RFD) tells the model to perform field-wise detection about whether the feature of each field is replaced or not.
|
| 126 |
+
|
| 127 |
+
Hereinafter, we omit the detailed structure of feature interaction layer for certain CTR models, since MFP and RFD are both model-agnostic pretraining algorithms.
|
| 128 |
+
|
| 129 |
+
# 3.2 Masked Feature Prediction
|
| 130 |
+
|
| 131 |
+
In the masked feature prediction (MFP) pretraining stage, we first corrupt the original input sample $x_{i}$ with a feature masking layer, where we randomly replace a certain proportion of the features with <MASK> tokens. Then, we feed the corrupted sample $x_{i}^{c}$ through the embedding layer and feature interaction layer to get the compact representation $q_{i}^{c}$ . Finally, the vector $q_{i}^{c}$ is inputted to a field-wise prediction layer to predict the original feature for each <MASK> token. To ensure efficiency and practicability, we introduce noise contrastive estimation (NCE) to allow the model to predict among a large feature space (e.g., millions of candidate features).
|
| 132 |
+
|
| 133 |
+
3.2.1 Feature Masking Layer. For an input sample with $F$ features (i.e., $x_{i} = [x_{i,1},\ldots ,x_{i,F}]$ ), we randomly replace a part of the features with $<\mathrm{MASK}>$ tokens, resulting in a corrupted sample $x_{i}^{c}$ . The proportion of features to be masked is a hyperparameter denoted as corrupt ratio $\gamma$ . We represent the set of indices of masked fields as $\mathcal{I}$ . The $<\mathrm{MASK}>$ token is also regarded as a special feature in the embedding table, and it is shared among all the feature fields. That is, we do not maintain field-specific mask tokens, in order to avoid introducing prior knowledge about the masked fields. A harder pretraining task with less prior knowledge can force the model to learn more generalized feature representations, which benefits the downstream task [8, 35].
|
| 134 |
+
|
| 135 |
+
3.2.2 Field-wise Prediction Layer. After the embedding layer and feature interaction layer, we obtain the representation $q_{i}^{c}$ for the corrupted sample $x_{i}^{c}$ . For each masked feature $x_{i,f}$ of the $f$ -th field, we maintain an independent multi-layer perceptron (MLP) network $g_{f}$ followed by a softmax function to compute the predictive probability $p_{i,f} \in \mathbb{R}^{M}$ over the candidate features:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
z _ {i, f} = g _ {f} \left(q _ {i} ^ {c}\right), z _ {i, f} \in \mathbb {R} ^ {M}, \tag {4}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
p _ {i, f, j} = \frac {\exp \left(z _ {i , f , j}\right)}{\sum_ {k = 1} ^ {M} \exp \left(z _ {i , f , k}\right)}, j = 1, \dots , M. \tag {5}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
We expand the predictive space (i.e., candidate features) for every masked field $f$ from the field-specific feature space to the global feature space, in order to increase the difficulty of pretext task and thus benefit the downstream CTR prediction task [8, 35]. That is, the model has to select the original feature $x_{i,f}$ out of the whole feature space, which usually contains millions of features in recommender systems. Finally, we view MFP pretraining as a multi-class classification problem and employ the multi-class cross-entropy loss for optimization:
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\mathcal {L} _ {i} ^ {M F P} = \frac {1}{| \mathcal {I} |} \sum_ {f \in \mathcal {I}} \operatorname {C r o s s E n t r o p y} \left(p _ {i, f}, x _ {i, f}\right) \tag {6}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
(a) MAP Framework
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
(b) Masked Feature Prediction
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
(c) Replaced Feature Detection
|
| 159 |
+
Figure 2: The illustration of (a) MAP framework, (b) masked feature prediction (MFP), (c) replaced feature detection (RFD), and (d) finetune. MFP and RFD are derived from the MAP framework by customizing the design of feature corruption and recovery layers. In the finetuning stage, we maintain the same model structure, and load the parameters from the pretrained model to initialize the embedding layer and feature interaction layer.
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
(d) Finetune
|
| 163 |
+
|
| 164 |
+
3.2.3 Noise Contrastive Estimation. The MFP method introduced above is still impractical and extremely expensive, since in Eq. 5 we have to calculate the softmax function over the large global feature space. Such a million-level multi-class classification problem leads to a tremendous amount of memory usage and unacceptable pretraining time cost for real-world applications. To this end, we adopt noise contrastive estimation (NCE) [16, 40, 41] to reduce the softmax overhead.
|
| 165 |
+
|
| 166 |
+
NCE converts the multi-class classification problem into a binary classification task, where the model tries to distinguish the positive feature (i.e., the masked feature $x_{i,f}$ ) from noise features. Specifically, for the $f$ -th masked field, we sample $K$ noise features from $M$ candidate features according to their frequency distribution in the training set. Then, we employ the binary cross-entropy loss:
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
\mathcal {L} _ {i} ^ {N C E} = - \frac {1}{| I |} \left[ \sum_ {f \in I} (\log \sigma (z _ {i, f, t}) + \sum_ {k = 1} ^ {K} \log (1 - \sigma (z _ {i, f, k}))) \right], (7)
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
where $z_{i,f}$ is the output of $f$ -th MLP predictor, $t$ is the feature index of the positive feature, and $\sigma$ is the sigmoid function. In this way, we reduce the complexity of loss calculation from $O(M)$ to $O(Km)$ , where $Km \ll M$ , and $m$ is the number of masked fields. In our experiment, $K = 25$ is enough to achieve a good CTR performance for the large global feature space (e.g., $M = 4$ millions).
|
| 173 |
+
|
| 174 |
+
# 3.3 Replaced Feature Detection
|
| 175 |
+
|
| 176 |
+
As shown in Figure 2(c), we further propose the replaced feature detection (RFD) algorithm to provide fine-grained and more diverse pretraining signals from all the feature fields, instead of a subset of fields in MFP (i.e., the masked fields).
|
| 177 |
+
|
| 178 |
+
In the RFD pretraining stage, we first corrupt the original input sample $x_{i}$ by a feature replacement layer, where we randomly replace a certain proportion of the features with other features. Then, after obtaining the compact representation $q_{i}^{c}$ from the embedding
|
| 179 |
+
|
| 180 |
+
and feature interaction layers, we employ a field-wise prediction layer to detect whether the feature in each field is replaced or not.
|
| 181 |
+
|
| 182 |
+
3.3.1 Feature Replacement Layer. For an input sample with $F$ features (i.e., $x_{i} = [x_{i,1},\ldots ,x_{i,F}]$ ), we randomly replace a part of the features, and denote the set of indices of replaced fields as $\mathcal{I}$ . The proportion of features to be replaced is a hyperparameter represented as corrupt ratio $\gamma$ . Next, we replace each of these selected features by a random sampling from the empirical marginal distribution (i.e., sample from the field-specific feature space by the feature frequency distribution) of the corresponding field $\hat{\mathcal{F}}_f$ in the training set, resulting in the corrupted sample $x_{i}^{c}$ .
|
| 183 |
+
|
| 184 |
+
3.3.2 Field-wise Prediction Layer. Similar to MFP, we obtain the representation $q_{i}^{c}$ for the corrupted sample $x_{i}^{c}$ through the embedding layer and feature interaction layers. Then, we feed $q_{i}^{c}$ to an MLP predictor followed by an element-wise sigmoid function, resulting in an $F$ -length predictive vector $p_{i}$ :
|
| 185 |
+
|
| 186 |
+
$$
|
| 187 |
+
p _ {i} = \sigma (\operatorname {M L P} \left(q _ {i} ^ {c}\right)), p _ {i} \in \mathbb {R} ^ {F}, \tag {8}
|
| 188 |
+
$$
|
| 189 |
+
|
| 190 |
+
where $p_{i,f}$ ( $f = 1, \dots, F$ ) denotes the probability that the feature in the $f$ -th field is replaced in the feature replacement layer. Finally, we employ the binary cross-entropy loss for RFD pretraining:
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
\mathcal {L} _ {i} ^ {R F D} = \frac {1}{F} \sum_ {f = 1} ^ {F} \text {B i n a r y C r o s s E n t r o p y} \left(p _ {i, f}, r _ {i, f}\right), \tag {9}
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
where $r_{i,f} \in \{1,0\}$ ( $f = 1,\dots,F$ ) is the label indicating whether the feature in the $f$ -th field of sample $x_i$ is replaced or not.
|
| 197 |
+
|
| 198 |
+
Apparently, RFD serves as a simpler pretraining algorithm compared with MFP which requires NCE to reduce the computational overhead. While MFP only utilizes the masked fields (the corrupt ratio $\gamma$ is usually $10\% - 30\%$ ) as self-supervised signals, RFD involves all the feature fields to provide more sufficient and more diverse signal guidance. We will evaluate the effectiveness and efficiency of RFD later in Section 4.2 and Section 4.3, respectively.
|
| 199 |
+
|
| 200 |
+
# 3.4 Complexity Analysis
|
| 201 |
+
|
| 202 |
+
We analyze the time complexity of our proposed MFP and RFD, as well as two baseline pretraining algorithms (i.e., MF4UIP and SCARF). We only analyze the component above the feature interaction layer (i.e., prediction layer and loss calculation) due to their model-agnostic property.
|
| 203 |
+
|
| 204 |
+
Suppose the batch size is $B$ , and we adopt a linear layer $f: \mathbb{R}^l \to \mathbb{R}^n$ to transform the compact representation $q_i$ (or $q_i^c$ ) to the final predictive place. The time complexity of MF4UIP is $O(Bln + BM)$ , where the main overhead of MF4UIP is the softmax computation over the million-level feature space of size $M$ . By adopting NCE, the time complexity of MFP reduces to $O(Bln + BKm)$ , where $Km \ll M$ and $m$ is the number of masked features. By introducing the binary classification mode, the time complexity of RFD further reduces to $O(Bln + BF)$ , where $F$ is the number of feature fields. Besides, as a contrastive algorithm with InfoNCE loss, SCARF has a quadratic complexity over the batch size: $O(2Bln + 4B^2 n)$ .
|
| 205 |
+
|
| 206 |
+
In summary, MF4UIP and SCARF are non- scalable in terms of feature space $M$ and batch size $B$ , respectively. MFP and RFD can achieve lower complexity and is scalable for industrial applications with million-level feature space and large batch size.
|
| 207 |
+
|
| 208 |
+
# 4 EXPERIMENT
|
| 209 |
+
|
| 210 |
+
In this section, we conduct extensive experiments to answer the following research questions:
|
| 211 |
+
|
| 212 |
+
RQ1 Can MFP and RFD improve the predictive performance for various base CTR models?
|
| 213 |
+
RQ2 How do MFP and RFD perform compared with existing CTR pretraining methods?
|
| 214 |
+
RQ3 Does RFD improve the pretraining efficiency compared with other pretraining methods?
|
| 215 |
+
RQ4 What are the influences of different pretraining configurations for MFP and RFD?
|
| 216 |
+
|
| 217 |
+
# 4.1 Experiment Setup
|
| 218 |
+
|
| 219 |
+
4.1.1 Datasets. We conduct extensive experiments on two large-scale CTR prediction benchmarks, i.e., Avazu and Criteo datasets. Both datasets are divided into training, validation, and test sets with proportion 8:1:1. The basic statistics of these two datasets are summarized in Table 1. Note that the training set for the pretraining stage and finetuning stage are the same, in order to make full use of the large-scale datasets. We describe the preprocessing for the two datasets as follows:
|
| 220 |
+
|
| 221 |
+
- Avazu originally contains 23 fields with categorical features. We remove the id field that has a unique value for each data sample, and transform the timestamp field into four new fields: weekday, day_of_month, hour_of_day, and is_wednesday, resulting in 25 fields. We remove the features that appear less than 2 times and replace them with a dummy feature <Unknown>.
|
| 222 |
+
- Criteo includes 26 anonymous categorical fields and 13 numerical fields. We discretize numerical features and transform them into categorical features by log transformation<sup>1</sup>. We remove the features that appear less than 10 times and replace them with a dummy feature <Unknown>.
|
| 223 |
+
|
| 224 |
+
Table 1: The dataset statistics
|
| 225 |
+
|
| 226 |
+
<table><tr><td>Dataset</td><td>#Training</td><td>#Validation</td><td>#Test</td><td>#Fields</td><td>#Features</td></tr><tr><td>Avazu</td><td>32,343,172</td><td>4,042,897</td><td>4,042,898</td><td>25</td><td>4,428,327</td></tr><tr><td>Criteo</td><td>36,672,493</td><td>4,584,062</td><td>4,584,062</td><td>39</td><td>1,086,794</td></tr></table>
|
| 227 |
+
|
| 228 |
+
4.1.2 Evaluation Metrics. To evaluate the performance of CTR prediction methods, we adopt AUC (Area under the ROC curve) and Log Loss (binary cross-entropy loss) as the evaluation metrics. Slightly higher AUC or lower Log Loss (e.g., 0.001) can be regarded as significant improvement in CTR prediction [29, 56, 59]
|
| 229 |
+
|
| 230 |
+
4.1.3 Base Models & Baselines. We evaluate the self-supervised pretraining methods on various base CTR models with three different feature interaction operators: (1) product operator, including DeepFM [14], xDeepFM [29], and DCNv2 [59]; (2) convolutional operator, including FiGNN [28] and FGCNN[32]; (3) attention operator, including AutoInt [50] and Transformer [53]. Additionally, we adopt the classical DNN model, which proves to be a strong base model for self-supervised learning in our experiments. We compare our proposed MFP and RFD with two existing pretraining methods: MF4UIP [57] and SCARF [2], which are chosen as representative generative and contrastive algorithms, respectively.
|
| 231 |
+
|
| 232 |
+
4.1.4 Implementation Details. We provide detailed implementation details in the supplementary material (i.e., Appendix A). The code is available<sup>2</sup>.
|
| 233 |
+
|
| 234 |
+
# 4.2 Effectiveness Comparison (RQ1 & RQ2)
|
| 235 |
+
|
| 236 |
+
We apply five training schemes on each base model, and report the results in Table 2. In the "Scratch" scheme, we train the randomly initialized base model from scratch (i.e., supervised learning). In other schemes, we first pretrain the base model according to the corresponding method, and then finetune the pretrained model for CTR prediction (i.e., self-supervised learning). From Table 2, we can obtain the following observations:
|
| 237 |
+
|
| 238 |
+
- All the pretrain-finetune schemes can improve the performance of base model on both metrics by a large margin compared with the "Scratch" scheme, which demonstrates the effectiveness of self-supervised learning for CTR prediction.
|
| 239 |
+
- The product based CTR models (e.g., DCNv2), together with the DNN model, win the top places among the base models, when equipped with self-supervised learning.
|
| 240 |
+
- Among the pretrain-finetune schemes, MFP and RFD generally gain significant improvement over the two baseline pretraining methods except for few cases, and RFD can consistently achieve the best performance.
|
| 241 |
+
|
| 242 |
+
In addition to the performance comparison above, we also find some interesting phenomena as follows:
|
| 243 |
+
|
| 244 |
+
- The pretrain-finetune scheme can greatly reduce the demand on model structure design for CTR prediction. To our surprise, although DNN model is inferior under the "Scratch" scheme, it gains huge improvement under the pretrain-finetune schemes (especially RFD) and wins the second place, outperforming a
|
| 245 |
+
|
| 246 |
+
Table 2: The AUC and Log Loss (LL) performance of different base CTR models under different training schemes. We give the relative performance improvement of each pretrain-finetune scheme over the "scratch" scheme. The best result for each base model is given in bold, while the second-best value is underlined. The symbol * indicates statistically significant improvement of our proposed MFP and RFD schemes over the two baseline schemes with $p < 0.001$ .
|
| 247 |
+
|
| 248 |
+
<table><tr><td rowspan="2">FI Operator</td><td rowspan="2">Base Model</td><td rowspan="2">Scheme</td><td colspan="4">Avazu</td><td colspan="4">Criteo</td></tr><tr><td>AUC</td><td>ΔAUC ↑</td><td>LL</td><td>ΔLL ↓</td><td>AUC</td><td>ΔAUC ↑</td><td>LL</td><td>ΔLL ↓</td></tr><tr><td rowspan="5">MLP</td><td rowspan="5">DNN</td><td>Scratch</td><td>0.7920</td><td>-</td><td>0.3740</td><td>-</td><td>0.8105</td><td>-</td><td>0.4413</td><td>-</td></tr><tr><td>MF4UIP</td><td>0.7991</td><td>0.90%</td><td>0.3683</td><td>0.0057</td><td>0.8135</td><td>0.37%</td><td>0.4386</td><td>0.0027</td></tr><tr><td>SCARF</td><td>0.7989</td><td>0.87%</td><td>0.3684</td><td>0.0056</td><td>0.8122</td><td>0.21%</td><td>0.4399</td><td>0.0014</td></tr><tr><td>MFP (Ours)</td><td>0.8006*</td><td>1.09%</td><td>0.3675*</td><td>0.0065</td><td>0.8145*</td><td>0.49%</td><td>0.4373*</td><td>0.0040</td></tr><tr><td>RFD (Ours)</td><td>0.8016*</td><td>1.22%</td><td>0.3666*</td><td>0.0074</td><td>0.8152*</td><td>0.58%</td><td>0.4367*</td><td>0.0046</td></tr><tr><td rowspan="10">Attention</td><td rowspan="5">AutoInt</td><td>Scratch</td><td>0.7895</td><td>-</td><td>0.3751</td><td>-</td><td>0.8068</td><td>-</td><td>0.4452</td><td>-</td></tr><tr><td>MF4UIP</td><td>0.7919</td><td>0.30%</td><td>0.3729</td><td>0.0022</td><td>0.8089</td><td>0.26%</td><td>0.4429</td><td>0.0023</td></tr><tr><td>SCARF</td><td>0.7951</td><td>0.71%</td><td>0.3708</td><td>0.0043</td><td>0.8084</td><td>0.20%</td><td>0.4435</td><td>0.0017</td></tr><tr><td>MFP (Ours)</td><td>0.7952</td><td>0.72%</td><td>0.3707</td><td>0.0044</td><td>0.8100*</td><td>0.40%</td><td>0.4420*</td><td>0.0032</td></tr><tr><td>RFD (Ours)</td><td>0.7978*</td><td>1.05%</td><td>0.3693*</td><td>0.0058</td><td>0.8104*</td><td>0.45%</td><td>0.4416*</td><td>0.0036</td></tr><tr><td rowspan="5">Transformer</td><td>Scratch</td><td>0.7922</td><td>-</td><td>0.3751</td><td>-</td><td>0.8071</td><td>-</td><td>0.4446</td><td>-</td></tr><tr><td>MF4UIP</td><td>0.7945</td><td>0.29%</td><td>0.3713</td><td>0.0038</td><td>0.8090</td><td>0.24%</td><td>0.4427</td><td>0.0019</td></tr><tr><td>SCARF</td><td>0.7958</td><td>0.45%</td><td>0.3705</td><td>0.0046</td><td>0.8096</td><td>0.31%</td><td>0.4422</td><td>0.0024</td></tr><tr><td>MFP (Ours)</td><td>0.7968*</td><td>0.58%</td><td>0.3700*</td><td>0.0051</td><td>0.8112*</td><td>0.51%</td><td>0.4406*</td><td>0.0040</td></tr><tr><td>RFD (Ours)</td><td>0.8003*</td><td>1.02%</td><td>0.3678*</td><td>0.0073</td><td>0.8113*</td><td>0.52%</td><td>0.4405*</td><td>0.0041</td></tr><tr><td rowspan="10">Convolution</td><td rowspan="5">FiGNN</td><td>Scratch</td><td>0.7923</td><td>-</td><td>0.3735</td><td>-</td><td>0.8094</td><td>-</td><td>0.4424</td><td>-</td></tr><tr><td>MF4UIP</td><td>0.7925</td><td>0.03%</td><td>0.3728</td><td>0.0007</td><td>0.8117</td><td>0.28%</td><td>0.4405</td><td>0.0019</td></tr><tr><td>SCARF</td><td>0.7941</td><td>0.23%</td><td>0.3717</td><td>0.0018</td><td>0.8118</td><td>0.30%</td><td>0.4404</td><td>0.0020</td></tr><tr><td>MFP (Ours)</td><td>0.7971*</td><td>0.61%</td><td>0.3697*</td><td>0.0038</td><td>0.8117</td><td>0.28%</td><td>0.4404</td><td>0.0020</td></tr><tr><td>RFD (Ours)</td><td>0.7990*</td><td>0.85%</td><td>0.3684*</td><td>0.0051</td><td>0.8123*</td><td>0.36%</td><td>0.4395*</td><td>0.0029</td></tr><tr><td rowspan="5">FGCNN</td><td>Scratch</td><td>0.7951</td><td>-</td><td>0.3727</td><td>-</td><td>0.8107</td><td>-</td><td>0.4413</td><td>-</td></tr><tr><td>MF4UIP</td><td>0.7973</td><td>0.28%</td><td>0.3700</td><td>0.0027</td><td>0.8127</td><td>0.25%</td><td>0.4392</td><td>0.0021</td></tr><tr><td>SCARF</td><td>0.7964</td><td>0.16%</td><td>0.3700</td><td>0.0027</td><td>0.8120</td><td>0.16%</td><td>0.4398</td><td>0.0015</td></tr><tr><td>MFP (Ours)</td><td>0.7985*</td><td>0.43%</td><td>0.3697</td><td>0.0030</td><td>0.8135*</td><td>0.35%</td><td>0.4384*</td><td>0.0029</td></tr><tr><td>RFD (Ours)</td><td>0.7992*</td><td>0.52%</td><td>0.3682*</td><td>0.0045</td><td>0.8139*</td><td>0.39%</td><td>0.4381*</td><td>0.0032</td></tr><tr><td rowspan="15">Product</td><td rowspan="5">DeepFM</td><td>Scratch</td><td>0.7924</td><td>-</td><td>0.3747</td><td>-</td><td>0.8103</td><td>-</td><td>0.4416</td><td>-</td></tr><tr><td>MF4UIP</td><td>0.7970</td><td>0.58%</td><td>0.3692</td><td>0.0055</td><td>0.8109</td><td>0.07%</td><td>0.4414</td><td>0.0002</td></tr><tr><td>SCARF</td><td>0.7992</td><td>0.86%</td><td>0.3684</td><td>0.0063</td><td>0.8117</td><td>0.17%</td><td>0.4400</td><td>0.0016</td></tr><tr><td>MFP (Ours)</td><td>0.7998*</td><td>0.93%</td><td>0.3680</td><td>0.0067</td><td>0.8126*</td><td>0.28%</td><td>0.4392*</td><td>0.0024</td></tr><tr><td>RFD (Ours)</td><td>0.8010*</td><td>1.09%</td><td>0.3671*</td><td>0.0076</td><td>0.8139*</td><td>0.44%</td><td>0.4380*</td><td>0.0036</td></tr><tr><td rowspan="5">xDeepFM</td><td>Scratch</td><td>0.7967</td><td>-</td><td>0.3718</td><td>-</td><td>0.8112</td><td>-</td><td>0.4407</td><td>-</td></tr><tr><td>MF4UIP</td><td>0.7982</td><td>0.19%</td><td>0.3691</td><td>0.0027</td><td>0.8130</td><td>0.22%</td><td>0.4390</td><td>0.0017</td></tr><tr><td>SCARF</td><td>0.7992</td><td>0.31%</td><td>0.3685</td><td>0.0033</td><td>0.8126</td><td>0.17%</td><td>0.4395</td><td>0.0012</td></tr><tr><td>MFP (Ours)</td><td>0.7989</td><td>0.28%</td><td>0.3685</td><td>0.0033</td><td>0.8145*</td><td>0.41%</td><td>0.4374*</td><td>0.0033</td></tr><tr><td>RFD (Ours)</td><td>0.8012*</td><td>0.56%</td><td>0.3671*</td><td>0.0047</td><td>0.8152*</td><td>0.49%</td><td>0.4367*</td><td>0.0040</td></tr><tr><td rowspan="5">DCNv2</td><td>Scratch</td><td>0.7964</td><td>-</td><td>0.3727</td><td>-</td><td>0.8118</td><td>-</td><td>0.4403</td><td>-</td></tr><tr><td>MF4UIP</td><td>0.7987</td><td>0.29%</td><td>0.3686</td><td>0.0041</td><td>0.8149</td><td>0.38%</td><td>0.4370</td><td>0.0033</td></tr><tr><td>SCARF</td><td>0.8019</td><td>0.69%</td><td>0.3666</td><td>0.0061</td><td>0.8143</td><td>0.31%</td><td>0.4376</td><td>0.0027</td></tr><tr><td>MFP (Ours)</td><td>0.8029*</td><td>0.82%</td><td>0.3661*</td><td>0.0066</td><td>0.8164*</td><td>0.57%</td><td>0.4356*</td><td>0.0047</td></tr><tr><td>RFD (Ours)</td><td>0.8037*</td><td>0.92%</td><td>0.3655*</td><td>0.0072</td><td>0.8165*</td><td>0.58%</td><td>0.4355*</td><td>0.0048</td></tr></table>
|
| 249 |
+
|
| 250 |
+
range of carefully designed CTR models. Such a phenomenon suggests that a simple MLP structure is capable of capturing useful feature crossing patterns from the multi-field categorical data with the help of self-supervised signals.
|
| 251 |
+
|
| 252 |
+
- The two training paradigms (supervised learning V.S. self-supervised learning) favor different types of model structures. Previous works always seek better designs of model structures under the "Scratch"
|
| 253 |
+
|
| 254 |
+
scheme. However, we can observe several counterexamples that a good model under the "Scratch" scheme is relatively bad for pretrain-finetune schemes (e.g., FGCNN), or a bad model under the "Scratch" scheme achieves competitive performance for pretrain-finetune schemes (e.g., DNN).
|
| 255 |
+
|
| 256 |
+
The above two phenomena bring about the research question of what model structures are more economic and effective for the
|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
Figure 3: The model size (Top) and run time per epoch (Bottom) of different pretraining methods. We perform logarithmic scale on the $y$ axis, and denote the original value on the top of each bar. The experiment is conducted on the same server with one GeForce RTX 3090 GPU. We only consider the learning parameters above the embedding layer for model size. We consider the whole pretraining loop for run time per epoch, including the corruption operations as well as the backpropagation.
|
| 260 |
+
|
| 261 |
+
pretrain-finetune schemes. We give one conjecture about this topic here, and leave further studies as future works. Our hypothesis is that the pretrain-finetune scheme might prefer the bit-wise feature interaction (e.g., DCNv2) to the field-wise feature interaction (e.g., Transformer). The bit-wise feature interaction enables larger model capacity to learn better feature crossing patterns during the pretraining stage.
|
| 262 |
+
|
| 263 |
+
# 4.3 Efficiency Analysis (RQ3)
|
| 264 |
+
|
| 265 |
+
After validating the effectiveness of our proposed MFP and RFD, we conduct experiments to further analyze the efficiency of these pretraining methods from the following two perspectives:
|
| 266 |
+
|
| 267 |
+
RQ3.1 What is the complexity of each pretraining method?
|
| 268 |
+
|
| 269 |
+
RQ3.2 How many pretraining epochs should a method takes to achieve a certain performance (i.e., sample efficiency)?
|
| 270 |
+
|
| 271 |
+
For RQ3.1, we have already provided the complexity analysis in Section 3.4. Following [56], we further empirically compare the model size and run time per epoch of different pretraining methods for different base CTR models in Figure 3. The experiments are conducted on the same server with one GeForce RTX 3090 GPU. For fair comparison, we launch one pretraining at a time as a single process to exclusively possess all the computational resources. We maintain the same structure of each base model for the four pretraining algorithms, and set the corrupt ratio $\gamma = 0.3$ . Since different pretraining algorithms require different amounts of dynamic GPU memory, we choose a proper batch size from \{256, 512, 1024, 2048, 4096\} to make full use of GPU memory. From Figure 3, we can obtain the following observations:
|
| 272 |
+
|
| 273 |
+
- SCARF is relatively time-consuming, though it has minimal parameters. The reason is that SCARF, as a contrastive method, requires computing representations of different views from the same instance, which doubles the run time of training loops.
|
| 274 |
+
- Although MFP maintains similar amount of learning parameters compared with MF4UIP, it approximately exhibits an $18 \times$ speedup over MF4UIP in terms of run time per epoch since we adopt NCE to resolve the softmax overhead for million-level predictive spaces.
|
| 275 |
+
- RFD is the most efficient pretraining algorithm with the lowest complexity. It has relatively fewer learning parameters and the
|
| 276 |
+
|
| 277 |
+

|
| 278 |
+
|
| 279 |
+

|
| 280 |
+
|
| 281 |
+

|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
Figure 4: The AUC performance of three representative models (DCNv2, DNN, DeepFM) with different pretraining epochs on Avazu (left column) and Criteo (right column) datasets. We use black dashed lines to illustrate how many epochs should RFD take to achieve a dominant performance over other methods.
|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
|
| 290 |
+
lowest run time per epoch, showing its simplicity and practicability for industrial applications.
|
| 291 |
+
|
| 292 |
+
Next, to study RQ3.2, we investigate the sample efficiency and give the AUC performance of base models under different pretraining epochs in Figure 4. We choose DCNv2, DNN, and DeepFM as the representative base models due to the page limitation. MF4UIP is excluded due to its tremendous cost of time per epoch. Note that zero pretraining epoch indicates that we train the model from scratch
|
| 293 |
+
|
| 294 |
+

|
| 295 |
+
|
| 296 |
+

|
| 297 |
+
|
| 298 |
+

|
| 299 |
+
|
| 300 |
+

|
| 301 |
+
|
| 302 |
+

|
| 303 |
+
Figure 5: The hyperparameter study on corrupt ratio $\gamma$ . We give the AUC and negative log loss performance of DNN, DCNv2 and DeepFM with MFP and RFD methods on Avazu (left two columns) and Criteo (right two columns) datasets.
|
| 304 |
+
|
| 305 |
+

|
| 306 |
+
|
| 307 |
+

|
| 308 |
+
|
| 309 |
+

|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
|
| 315 |
+

|
| 316 |
+
|
| 317 |
+

|
| 318 |
+
|
| 319 |
+
without pretraining. In Figure 4, we can observe that MFP and RFD consistently achieve better performance over SCARF under different pretraining epochs. Moreover, as illustrated by the black dashed lines, RFD can simply achieve the best performance with limited pretraining epochs (10~30), showing its superior sample efficiency for CTR prediction.
|
| 320 |
+
|
| 321 |
+
In summary, we validate the pretraining efficiency of our proposed methods (especially RFD), i.e., they can achieve better CTR performance with fewer learning parameters, higher throughput rates, and fewer pretraining epochs.
|
| 322 |
+
|
| 323 |
+
# 4.4 Ablation & Hyperparameter Study (RQ4)
|
| 324 |
+
|
| 325 |
+
In this section, we analyze the impact of hyperparameters or components in MFP and RFD, including the corrupt ratio $\gamma$ for both MFP and RFD, the number of noise features $K$ in NCE for MFP, and the feature replacement strategy for RFD. Similarly, we select DNN, DCNv2 and DeepFM as the representative base models due to the page limitation.
|
| 326 |
+
|
| 327 |
+
4.4.1 Corrupt Ratio $\gamma$ . We select the value of corrupt ratio $\gamma$ from $\{0.1, 0.2, 0.3, 0.4, 0.5\}$ , and show the impact in Figure 5. Both MFP and RFD favor a small corrupt ratio (i.e., $0.1 \sim 0.3$ ). The reason is that the over-corruption caused by a large corrupt ratio may change the sample semantics and disturb the model pretraining.
|
| 328 |
+
4.4.2 The Number of Noise Samples $K$ . We select the number of noise samples $K$ in NCE for MFP from \{10, 25, 50, 75, 100\}, and show the impact in Table 3. Surprisingly, the performance fluctuations of both metrics (i.e., AUC and log loss) brought by different $Ks$ are all within 0.0003, indicating that MFP is not sensitive to the number
|
| 329 |
+
|
| 330 |
+
Table 3: The hyperparameter study on the number of noise features $K$ in NCE for MFP. Different $Ks$ only result in 0.0003 performance fluctuation on each model.
|
| 331 |
+
|
| 332 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Model</td><td rowspan="2">Metric</td><td colspan="5">the number of noise features K</td></tr><tr><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td></tr><tr><td rowspan="6">Avazu</td><td rowspan="2">DNN</td><td>AUC</td><td>0.8006</td><td>0.8006</td><td>0.8005</td><td>0.8006</td><td>0.8007</td></tr><tr><td>Log Loss</td><td>0.3674</td><td>0.3675</td><td>0.3674</td><td>0.3674</td><td>0.3673</td></tr><tr><td rowspan="2">DCNv2</td><td>AUC</td><td>0.8029</td><td>0.8029</td><td>0.8027</td><td>0.8027</td><td>0.8027</td></tr><tr><td>Log Loss</td><td>0.3661</td><td>0.3661</td><td>0.3661</td><td>0.3662</td><td>0.3661</td></tr><tr><td rowspan="2">DeepFM</td><td>AUC</td><td>0.7998</td><td>0.7998</td><td>0.7999</td><td>0.7998</td><td>0.7999</td></tr><tr><td>Log Loss</td><td>0.3680</td><td>0.3680</td><td>0.3678</td><td>0.3680</td><td>0.3680</td></tr><tr><td rowspan="6">Criteo</td><td rowspan="2">DNN</td><td>AUC</td><td>0.8145</td><td>0.8145</td><td>0.8146</td><td>0.8146</td><td>0.8147</td></tr><tr><td>Log Loss</td><td>0.4374</td><td>0.4373</td><td>0.4372</td><td>0.4372</td><td>0.4372</td></tr><tr><td rowspan="2">DCNv2</td><td>AUC</td><td>0.8163</td><td>0.8164</td><td>0.8164</td><td>0.8165</td><td>0.8165</td></tr><tr><td>Log Loss</td><td>0.4357</td><td>0.4356</td><td>0.4356</td><td>0.4355</td><td>0.4355</td></tr><tr><td rowspan="2">DeepFM</td><td>AUC</td><td>0.8125</td><td>0.8126</td><td>0.8127</td><td>0.8126</td><td>0.8126</td></tr><tr><td>Log Loss</td><td>0.4392</td><td>0.4392</td><td>0.4390</td><td>0.4393</td><td>0.4392</td></tr></table>
|
| 333 |
+
|
| 334 |
+
of noise samples $K$ in NCE. A small number of noise features (10 noise features out of the million-level feature space) is sufficient for the model to learn effective feature crossing patterns and benefit the final CTR performance.
|
| 335 |
+
|
| 336 |
+
4.4.3 Feature Replacement Strategy. We investigate the impact of the feature replacement strategy in RFD, which needs to sample a replacer for the original feature. We compare four different replacement strategy variants shown in Table 4, and give the results in Table 5. We observe that sampling by the feature frequency distribution is relatively better than uniform sampling. Moreover, sampling from the global feature space can greatly hurt the CTR performance,
|
| 337 |
+
|
| 338 |
+
Table 4: The feature replacement strategy variants for RFD pretraining method. It is worth noting that our proposed RFD adopts the field-frequency strategy.
|
| 339 |
+
|
| 340 |
+
<table><tr><td>Sampling Method\Sampling Space</td><td>Field-specific Feature Space</td><td>Global Feature Space</td></tr><tr><td>Sample Uniformly</td><td>Field-uniform Strategy (F-U)</td><td>Global-uniform Strategy (G-U)</td></tr><tr><td>Sample by the feature frequency distribution</td><td>Field-frequency Strategy (F-F)</td><td>Global-frequency Strategy (G-F)</td></tr></table>
|
| 341 |
+
|
| 342 |
+
Table 5: The ablation study on feature replacement strategy for RFD. F-F, F-U, G-F, G-U is short for field-frequency, field-uniform, global-frequency, global-uniform strategies, respectively. The best results are given in bold, while the second-best values are underlined.
|
| 343 |
+
|
| 344 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Model</td><td rowspan="2">Metric</td><td colspan="4">Feature Replacement Strategy</td></tr><tr><td>F-F (RFD)</td><td>F-U</td><td>G-F</td><td>G-U</td></tr><tr><td rowspan="6">Avazu</td><td rowspan="2">DNN</td><td>AUC</td><td>0.8016</td><td>0.8012</td><td>0.7996</td><td>0.7955</td></tr><tr><td>Log Loss</td><td>0.3666</td><td>0.3669</td><td>0.3680</td><td>0.3705</td></tr><tr><td rowspan="2">DCNv2</td><td>AUC</td><td>0.8037</td><td>0.8035</td><td>0.8028</td><td>0.8016</td></tr><tr><td>Log Loss</td><td>0.3655</td><td>0.3656</td><td>0.3661</td><td>0.3669</td></tr><tr><td rowspan="2">DeepFM</td><td>AUC</td><td>0.8010</td><td>0.8008</td><td>0.7991</td><td>0.7947</td></tr><tr><td>Log Loss</td><td>0.3671</td><td>0.3675</td><td>0.3688</td><td>0.3705</td></tr><tr><td rowspan="6">Criteo</td><td rowspan="2">DNN</td><td>AUC</td><td>0.8152</td><td>0.8145</td><td>0.8118</td><td>0.8093</td></tr><tr><td>Log Loss</td><td>0.4367</td><td>0.4373</td><td>0.4402</td><td>0.4423</td></tr><tr><td rowspan="2">DCNv2</td><td>AUC</td><td>0.8165</td><td>0.8163</td><td>0.8152</td><td>0.8131</td></tr><tr><td>Log Loss</td><td>0.4355</td><td>0.4357</td><td>0.4367</td><td>0.4387</td></tr><tr><td rowspan="2">DeepFM</td><td>AUC</td><td>0.8139</td><td>0.8130</td><td>0.8100</td><td>0.8079</td></tr><tr><td>Log Loss</td><td>0.4380</td><td>0.4389</td><td>0.4417</td><td>0.4442</td></tr></table>
|
| 345 |
+
|
| 346 |
+
since an out-of-field replacer feature forms a simplistic pretext task where the model can easily detect the replaced field. The model might overfit the easy pretext task during the pretraining, and thus lose the generalization ability for downstream CTR prediction task.
|
| 347 |
+
|
| 348 |
+
# 5 RELATED WORK
|
| 349 |
+
|
| 350 |
+
# 5.1 Click-through Rate Prediction
|
| 351 |
+
|
| 352 |
+
The click-through rate (CTR) prediction serves as a core function module in various personalized online services, including online advertising, recommender systems, and web search, etc [30, 71]. With the rise of deep learning, many deep neural CTR models have been recently proposed. The core idea of them is to capture the feature interactions, which indicates the combination relationships of multiple features. The deep CTR models usually leverage both implicit and explicit feature interactions. While the implicit feature interactions are captured by a deep neural network (DNN), the explicit feature interactions are modeled by a specially designed learning function. According to the explicit feature interaction operators, these deep CTR models can be mainly classified into three categories: (1) product operator, (2) convolutional operator, and (3) attention operator.
|
| 353 |
+
|
| 354 |
+
Product Operator. The product-based CTR models originate from classical shallow models such as FM [47] and POLY2 [4].
|
| 355 |
+
|
| 356 |
+
FFM [26] and NFM [21] are variants of FM, where the second-order feature interactions are captured by the inner product of feature embeddings. DeepFM [14] and PNN [45] combine the FM layer with DNNs for higher-order feature interactions. PIN [46] further extends PNN by introducing a network-in-network structure to replace the inner product interaction function. FiBiNET [24] introduces the SENET mechanism [23] to learn the weights of features dynamically before product interactions. Moreover, DCN [58], xDeepFM [29], DCNv2 [59] are proposed for the explicit high-order feature interaction modeling by applying product-based feature interactions at each layer explicitly. Therefore, the order of feature interactions to be modeled increases at each layer and is determined by the layer depth.
|
| 357 |
+
|
| 358 |
+
Convolutional Operator. Apart from the product operator, Convolutional Neural Networks (CNN) and Graph Convolutional Networks (GCN) are also explored for feature interaction modeling in CTR prediction. CCPM [33] is the first work adopts the CNN module for CTR prediction. However, CCPM can only learn part of feature interactions between adjacent features since it is sensitive to the field order. FGCNN [32] improves CCPM by introducing a recombination layer to model non-adjacent features. FiGNN [28] treats the multi-field categorical data as a fully connected graph, where each field serves as a graph node and feature interactions are captured via graph propagation.
|
| 359 |
+
|
| 360 |
+
Attention Operator. AFM [64] improves FM by leveraging an additional attention network to allow feature interactions to contribute differently to the final CTR prediction. AutoInt [50] utilizes a multi-head self-attentive neural network with residual connections to explicitly model the feature interactions with different orders. InterHAt [27] combines a transformer network with multiple attentional aggregation layers for feature interaction learning. These attention-based CTR models can also provide explainable prediction via attention weights.
|
| 361 |
+
|
| 362 |
+
# 5.2 Self-supervised Learning
|
| 363 |
+
|
| 364 |
+
Self-supervised learning has achieved great success in Nature Language Processing (NLP) [8, 10] and Computer Vision (CV) [20]. They usually adopt a pretrain-finetune scheme, where we first pretrain an encoder based on pretext tasks, and then finetune a model initialized by the pretrained encoder for downstream tasks. According to the pretext task, self-supervised learning can be mainly classified into two categories: (1) contrastive methods and (2) generative methods. Contrastive methods learn a latent space to draw positive samples together (e.g., different views of the same image) and push apart negative samples (e.g., images from different categories) [11]. Numerous techniques are proposed to promote the performance of contrastive methods such as data augmentation [18, 70], contrastive losses [5, 6], momentum encoders [7, 13, 20], and memory banks [52, 61]. Generative methods [3, 10, 19, 67] reconstruct the original input sample from the corrupted one. For example, BERT [10] requires the model to recover the masked token from the corrupted sentences. Besides, ELECTRA [8] adopts an adversarial structure and pretrains the model as a discriminator to predict corrupted tokens, which is proven to be more sample-efficient.
|
| 365 |
+
|
| 366 |
+
In recommender systems, many works apply self-supervised learning to user behavior sequences [15, 37, 51, 60, 65], manually
|
| 367 |
+
|
| 368 |
+
designed graph data [17, 39, 54], or multi-modal data [36, 38]. They explore the pretraining methods for better representations to further enhance the recommendation performance with enriched side information. However, these sequential/graph/multi-modal methods are essentially incompatible with the CTR data, i.e., multi-field categorical data format.
|
| 369 |
+
|
| 370 |
+
In CTR prediction, there exist works [43, 55, 68] that incorporate the self-supervised signals in a semi-supervised manner, where the cross-entropy loss is jointly optimized with an auxiliary loss in one stage. As for CTR pretraining methods, VIME [69] proposes a semi-supervised learning algorithm to learn a predictive function based on the frozen pretrained encoder. MF4UIP [57] leverages the BERT framework [10] for user intent prediction. SCARF [2] adopts SimCLR framework [5] and InfoNCE loss [42] to pretrain the model in a contrastive manner. Compared with these works, our proposed MFP and RFD methods are more scalable for industrial applications and achieve the state-of-art performance in terms of both effectiveness and efficiency for CTR prediction.
|
| 371 |
+
|
| 372 |
+
# 6 CONCLUSION
|
| 373 |
+
|
| 374 |
+
In this paper, we propose a Model-agnostic Pretraining (MAP) framework that applies feature corruption and recovery on multifield categorical data for CTR prediction. Based on different strategies of corruption and recovery, we derive two practical algorithms: masked feature prediction (MFP), and replaced feature detection (RFD). Extensive experiments show that MFP and RFD achieve new state-of-the-art performance in terms of both effectiveness and efficiency for CTR prediction. For future work, a promising direction is to explore what model structures are more suitable for self-supervised paradigm, since we find different models receive quite different performance gains combined with self-supervised learning in Section 4.2. Furthermore, we will investigate on the possible saturation of downstream CTR performance as the pretraining volume grows (e.g., from million to billion or even more).
|
| 375 |
+
|
| 376 |
+
# ACKNOWLEDGMENTS
|
| 377 |
+
|
| 378 |
+
The SJTU team is supported by Shanghai Municipal Science and Technology Major Project (2021SHZDZX0102) and National Natural Science Foundation of China (62177033). The work is also sponsored by Huawei Innovation Research Program. We thank MindSpore [1] for the partial support of this work.
|
| 379 |
+
|
| 380 |
+
# REFERENCES
|
| 381 |
+
|
| 382 |
+
[1] 2020. MindSpore. https://www.mindspore.cn/
|
| 383 |
+
[2] Dara Bahri, Heinrich Jiang, Yi Tay, and Donald Metzler. 2021. Scarf: Self-supervised contrastive learning using random feature corruption. arXiv preprint arXiv:2106.15147 (2021).
|
| 384 |
+
[3] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877-1901.
|
| 385 |
+
[4] Yin-Wen Chang, Cho-Jui Hsieh, Kai-Wei Chang, Michael Ringgaard, and Chih-Jen Lin. 2010. Training and testing low-degree polynomial data mappings via linear SVM. Journal of Machine Learning Research 11, 4 (2010).
|
| 386 |
+
[5] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. 2020. A simple framework for contrastive learning of visual representations. In International conference on machine learning. PMLR, 1597-1607.
|
| 387 |
+
[6] Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey E Hinton. 2020. Big self-supervised models are strong semi-supervised learners. Advances in neural information processing systems 33 (2020), 22243-22255.
|
| 388 |
+
|
| 389 |
+
[7] Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. 2020. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020).
|
| 390 |
+
[8] Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2020. Electra: Pre-training text encoders as discriminators rather than generators. arXiv preprint arXiv:2003.10555 (2020).
|
| 391 |
+
[9] Xinyi Dai, Jianghao Lin, Weinan Zhang, Shuai Li, Weiwen Liu, Ruiming Tang, Xiuqiang He, Jianye Hao, Jun Wang, and Yong Yu. 2021. An adversarial imitation click model for information retrieval. In Proceedings of the Web Conference 2021. 1809-1820.
|
| 392 |
+
[10] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018).
|
| 393 |
+
[11] Debidatta Dwibedi, Yusuf Aytar, Jonathan Tompson, Pierre Sermanet, and Andrew Zisserman. 2021. With a little help from my friends: Nearest-neighbor contrastive learning of visual representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 9588-9597.
|
| 394 |
+
[12] Lingyue Fu, Jianghao Lin, Weiwen Liu, Ruiming Tang, Weinan Zhang, Rui Zhang, and Yong Yu. 2023. An F-shape Click Model for Information Retrieval on Multiblock Mobile Pages. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining. 1057-1065.
|
| 395 |
+
[13] Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. 2020. Bootstrap your own latent-a new approach to self-supervised learning. Advances in neural information processing systems 33 (2020), 21271-21284.
|
| 396 |
+
[14] Huifeng Guo, Ruiming Tang, Yunming Ye, Zhenguo Li, and Xiuqiang He. 2017. Deepfm: a factorization-machine based neural network for ctr prediction. In JCAI.
|
| 397 |
+
[15] Wei Guo, Can Zhang, Zhicheng He, Jiuiqin Qin, Huifeng Guo, Bo Chen, Ruiming Tang, Xiuqiang He, and Rui Zhang. 2022. Miss: Multi-interest self-supervised learning framework for click-through rate prediction. In 2022 IEEE 38th International Conference on Data Engineering (ICDE). IEEE, 727-740.
|
| 398 |
+
[16] Michael Gutmann and Aapo Hyvarinen. 2010. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 297–304.
|
| 399 |
+
[17] Bowen Hao, Jing Zhang, Hongzhi Yin, Cuiping Li, and Hong Chen. 2021. Pretraining graph neural networks for cold-start users and items representation. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining. 265-273.
|
| 400 |
+
[18] Xiaoshuai Hao, Yi Zhu, Srikar Appalaraju, Aston Zhang, Wanqian Zhang, Bo Li, and Mu Li. 2022. MixGen: A New Multi-Modal Data Augmentation. arXiv preprint arXiv:2206.08358 (2022).
|
| 401 |
+
[19] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. 2022. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 16000-16009.
|
| 402 |
+
[20] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. 2020. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 9729-9738.
|
| 403 |
+
[21] Xiangnan He and Tat-Seng Chua. 2017. Neural factorization machines for sparse predictive analytics. In SIGIR. 355-364.
|
| 404 |
+
[22] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In WwW. 173-182.
|
| 405 |
+
[23] Jie Hu, Li Shen, and Gang Sun. 2018. Squeeze-and-excitation networks. In Proceedings of the IEEE conference on computer vision and pattern recognition. 7132-7141.
|
| 406 |
+
[24] Tongwen Huang, Zhiqi Zhang, and Junlin Zhang. 2019. FiBiNET: combining feature importance and bilinear feature interaction for click-through rate prediction. In Proceedings of the 13th ACM Conference on Recommender Systems. 169-177.
|
| 407 |
+
[25] Yanhua Huang, Hangyu Wang, Yiyun Miao, Ruiwen Xu, Lei Zhang, and Weinan Zhang. 2022. Neural Statistics for Click-Through Rate Prediction. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1849–1853.
|
| 408 |
+
[26] Yuchin Juan, Yong Zhuang, Wei-Sheng Chin, and Chih-Jen Lin. 2016. Field-aware factorization machines for CTR prediction. In RecSys. 43-50.
|
| 409 |
+
[27] Zeyu Li, Wei Cheng, Yang Chen, Haifeng Chen, and Wei Wang. 2020. Interpretable click-through rate prediction through hierarchical attention. In Proceedings of the 13th International Conference on Web Search and Data Mining. 313-321.
|
| 410 |
+
[28] Zekun Li, Zeyu Cui, Shu Wu, Xiaoyu Zhang, and Liang Wang. 2019. Fi-gnn: Modeling feature interactions via graph neural networks for ctr prediction. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 539-548.
|
| 411 |
+
[29] Jianxun Lian, Xiaohuan Zhou, Fuzheng Zhang, Zhongxia Chen, Xing Xie, and Guangzhong Sun. 2018. xdeepfm: Combining explicit and implicit feature interactions for recommender systems. In KDD. 1754-1763.
|
| 412 |
+
[30] Jianghao Lin, Xinyi Dai, Yunjia Xi, Weiwen Liu, Bo Chen, Xiangyang Li, Chenxu Zhu, Huifeng Guo, Yong Yu, Ruiming Tang, et al. 2023. How Can Recommender Systems Benefit from Large Language Models: A Survey. arXiv preprint arXiv:2306.05817 (2023).
|
| 413 |
+
|
| 414 |
+
[31] Jianghao Lin, Weiwen Liu, Xinyi Dai, Weinan Zhang, Shuai Li, Ruiming Tang, Xiuyang He, Jianye Hao, and Yong Yu. 2021. A Graph-Enhanced Click Model for Web Search. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 1259–1268.
|
| 415 |
+
[32] Bin Liu, Ruiming Tang, Yingzhi Chen, Jinkai Yu, Huifeng Guo, and Yuzhou Zhang. 2019. Feature generation by convolutional neural network for click-through rate prediction. In WwW. 1119-1129.
|
| 416 |
+
[33] Qiang Liu, Feng Yu, Shu Wu, and Liang Wang. 2015. A Convolutional Click Prediction Model. In Proceedings of the 24th ACM International on Conference on Information and Knowledge Management. ACM, 1743-1746.
|
| 417 |
+
[34] Xiao Liu, Fanjin Zhang, Zhenyu Hou, Li Mian, Zhaoyu Wang, Jing Zhang, and Jie Tang. 2021. Self-supervised learning: Generative or contrastive. IEEE Transactions on Knowledge and Data Engineering (2021).
|
| 418 |
+
[35] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019).
|
| 419 |
+
[36] Yong Liu, Susen Yang, Chenyi Lei, Guoxin Wang, Haihong Tang, Juyong Zhang, Aixin Sun, and Chunyan Miao. 2021. Pre-training graph transformer with multimodal side information for recommendation. In Proceedings of the 29th ACM International Conference on Multimedia. 2853-2861.
|
| 420 |
+
[37] Zhiwei Liu, Yongjun Chen, Jia Li, Philip S Yu, Julian McAuley, and Caiming Xiong. 2021. Contrastive self-supervised sequential recommendation with robust augmentation. arXiv preprint arXiv:2108.06479 (2021).
|
| 421 |
+
[38] Zhuang Liu, Yunpu Ma, Matthias Schubert, Yuanxin Ouyang, and Zhang Xiong. 2022. Multi-Modal Contrastive Pre-training for Recommendation. In Proceedings of the 2022 International Conference on Multimedia Retrieval. 99–108.
|
| 422 |
+
[39] Zaiqiao Meng, Siwei Liu, Craig Macdonald, and Iadh Onis. 2021. Graph neural pre-training for enhancing recommendations using side information. arXiv preprint arXiv:2107.03936 (2021).
|
| 423 |
+
[40] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781 (2013).
|
| 424 |
+
[41] Andriy Mnih and Yee Whye Teh. 2012. A fast and simple algorithm for training neural probabilistic language models. arXiv preprint arXiv:1206.6426 (2012).
|
| 425 |
+
[42] Aaron van den Oord, Yazhe Li, and Oriol Vinyls. 2018. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018).
|
| 426 |
+
[43] Yujie Pan, Jiangchao Yao, Bo Han, Kunyang Jia, Ya Zhang, and Hongxia Yang. 2021. Click-through Rate Prediction with Auto-Quantized Contrastive Learning. arXiv preprint arXiv:2109.13921 (2021).
|
| 427 |
+
[44] Jiarui Qin, W. Zhang, Xin Wu, Jiarui Jin, Yuchen Fang, and Y. Yu. 2020. User Behavior Retrieval for Click-Through Rate Prediction. In SIGIR.
|
| 428 |
+
[45] Yanru Qu, Han Cai, Kan Ren, Weinan Zhang, Yong Yu, Ying Wen, and Jun Wang. 2016. Product-based neural networks for user response prediction. In ICDM.
|
| 429 |
+
[46] Yanru Qu, Bohui Fang, Weinan Zhang, Ruiming Tang, Minzhe Niu, Huifeng Guo, Yong Yu, and Xiuqiang He. 2018. Product-based neural networks for user response prediction over multi-field categorical data. TOIs 37, 1 (2018), 1-35.
|
| 430 |
+
[47] Steffen Rendle. 2010. Factorization machines. In ICDM.
|
| 431 |
+
[48] Steffen Rendle. 2012. Factorization machines with libfm. TIST (2012).
|
| 432 |
+
[49] Matthew Richardson, Ewa Dominowska, and Robert Ragno. 2007. Predicting clicks: estimating the click-through rate for new ads. In WWW. ACM, 521-530.
|
| 433 |
+
[50] Weiping Song, Chence Shi, Zhiping Xiao, Zhijian Duan, Yewen Xu, Ming Zhang, and Jian Tang. 2019. Autoint: Automatic feature interaction learning via self-attentive neural networks. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 1161–1170.
|
| 434 |
+
[51] Fei Sun, Jun Liu, Jian Wu, Changhua Pei, Xiao Lin, Wenwu Ou, and Peng Jiang. 2019. BERT4Rec: Sequential recommendation with bidirectional encoder representations from transformer. In Proceedings of the 28th ACM international conference on information and knowledge management. 1441-1450.
|
| 435 |
+
[52] Yonglong Tian, Dilip Krishnan, and Phillip Isola. 2020. Contrastive multiview coding. In European conference on computer vision. Springer, 776-794.
|
| 436 |
+
[53] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Lion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems. 5998-6008.
|
| 437 |
+
[54] Chen Wang, Yueqing Liang, Zhiwei Liu, Tao Zhang, and S Yu Philip. 2021. Pretraining Graph Neural Network for Cross Domain Recommendation. In 2021 IEEE Third International Conference on Cognitive Machine Intelligence (CogMI). IEEE, 140-145.
|
| 438 |
+
[55] Fangye Wang, Yingxu Wang, Dongsheng Li, Hansu Gu, Tun Lu, Peng Zhang, and Ning Gu. 2022. CL4CTR: A Contrastive Learning Framework for CTR Prediction. arXiv preprint arXiv:2212.00522 (2022).
|
| 439 |
+
[56] Fangye Wang, Yingxu Wang, Dongsheng Li, Hansu Gu, Tun Lu, Peng Zhang, and Ning Gu. 2022. Enhancing CTR Prediction with Context-Aware Feature Representation Learning. arXiv preprint arXiv:2204.08758 (2022).
|
| 440 |
+
[57] Peng Wang, Jiang Xu, Chunyi Liu, Hao Feng, Zang Li, and Jieping Ye. 2020. Masked-field Pre-training for User Intent Prediction. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management. 2789-2796.
|
| 441 |
+
|
| 442 |
+
[58] Ruoxi Wang, Bin Fu, Gang Fu, and Mingliang Wang. 2017. Deep & cross network for ad click predictions. In Proceedings of the ADKDD'17. 1-7.
|
| 443 |
+
[59] Ruoxi Wang, Rakesh Shivanna, Derek Cheng, Sagar Jain, Dong Lin, Lichan Hong, and Ed Chi. 2021. Dcn v2: Improved deep & cross network and practical lessons for web-scale learning to rank systems. In Proceedings of the Web Conference 2021. 1785-1797.
|
| 444 |
+
[60] Yu Wang, Hengrui Zhang, Zhiwei Liu, Liangwei Yang, and Philip S Yu. 2022. ContrastVAE: Contrastive Variational AutoEncoder for Sequential Recommendation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 2056-2066.
|
| 445 |
+
[61] Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. 2018. Unsupervised feature learning via non-parametric instance discrimination. In Proceedings of the IEEE conference on computer vision and pattern recognition. 3733-3742.
|
| 446 |
+
[63] Yunjia Xi, Weiwen Liu, Jianghao Lin, Jieming Zhu, Bo Chen, Ruiming Tang, Weinan Zhang, Rui Zhang, and Yong Yu. 2023. Towards Open-World Recommendation with Knowledge Augmentation from Large Language Models. arXiv preprint arXiv:2306.10933 (2023).
|
| 447 |
+
[64] Jun Xiao, Hao Ye, Xiangnan He, Hanwang Zhang, Fei Wu, and Tat-Seng Chua. 2017. Attentional factorization machines: Learning the weight of feature interactions via attention networks. JCAI (2017).
|
| 448 |
+
[65] Xu Xie, Fei Sun, Zhaoyang Liu, Jinyang Gao, Bolin Ding, and Bin Cui. 2020. Contraptive pre-training for sequential recommendation. arXiv preprint arXiv:2010.14395 (2020).
|
| 449 |
+
[66] Ruibin Xiong, Yunchang Yang, Di He, Kai Zheng, Shuxin Zheng, Chen Xing, Huishuai Zhang, Yanyan Lan, Liwei Wang, and Tieyan Liu. 2020. On layer normalization in the transformer architecture. In International Conference on Machine Learning. PMLR, 10524-10533.
|
| 450 |
+
[67] Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. 2019. XNet: Generalized autoregressive pretraining for language understanding. Advances in neural information processing systems 32 (2019).
|
| 451 |
+
[68] Tiansheng Yao, Xinyang Yi, Derek Zhiyuan Cheng, Felix Yu, Ting Chen, Aditya Menon, Lichan Hong, Ed H Chi, Steve Tjoa, Jieqi Kang, et al. 2021. Self-supervised learning for large-scale item recommendations. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 4321-4330.
|
| 452 |
+
[69] Jinsung Yoon, Yao Zhang, James Jordan, and Mihaela van der Schaar. 2020. Vime: Extending the success of self-and semi-supervised learning to tabular domain. Advances in Neural Information Processing Systems 33 (2020), 11033-11043.
|
| 453 |
+
[70] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. 2017. mixup: Beyond empirical risk minimization. arXiv preprint arXiv:1710.09412 (2017).
|
| 454 |
+
[71] Weinan Zhang, Jiarui Qin, Wei Guo, Ruiming Tang, and Xiuqiang He. 2021. Deep learning for click-through rate estimation. IJCAI (2021).
|
| 455 |
+
|
| 456 |
+
# A IMPLEMENTATION DETAILS
|
| 457 |
+
|
| 458 |
+
In this section, we describe the implementation details for our empirical experiments. We conduct both supervised learning (i.e., the "Scratch" scheme) and self-supervised learning (i.e., four pretrainfinetune schemes) over different base models (i.e., CTR model). We first introduce the model configuration for each base model, and then give the training settings for supervised learning and self-supervised learning, respectively. Finally, we describe how to employ the pretraining methods for the assembled models (e.g., DCNv2 and DeepFM).
|
| 459 |
+
|
| 460 |
+
# A.1 Configuration for Base Models
|
| 461 |
+
|
| 462 |
+
We choose the embedding size from $\{16, 32, 64\}$ . The dropout rate is selected from $\{0.0, 0.1, 0.2\}$ . We utilize one linear layer after the feature interaction layer to make the final CTR prediction. Unless stated otherwise, we adopt ReLU as the activation function. The model-specific hyperparameter settings for base models are as follows:
|
| 463 |
+
|
| 464 |
+
- DNN. We select the size of DNN layer from $\{1000, 2000\}$ , and the number of DNN layers from $\{3, 6, 9, 12\}$ .
|
| 465 |
+
|
| 466 |
+
- AutoInt. We select the number of attention layers from $\{3,6,9,12\}$ . The number of attention heads per layer and the attention size are set to 1 and 64, respectively.
|
| 467 |
+
- Transformer. We select the number of layers from $\{3,6,9,12\}$ . The number of attention heads is set to 1, and the intermediate size of feed-forward network is set to quadruple the embedding size. We also try both the post-norm [53] and pre-norm [66] structure.
|
| 468 |
+
- FiGNN. We select the number of layers from $\{3,6,9,12\}$ , and apply residual connection for the graph layers.
|
| 469 |
+
- FGCNN. We maintain 4 tanh-activated convolutional layers with a kernel size of 7 and pooling size of 2 for each layer. The number of channels for each layer is set to 8, 10, 12, 14, respectively. The numbers of channels for recombination layers are all set to 3.
|
| 470 |
+
- DeepFM. We select the size of DNN layer from $\{1000, 2000\}$ , and the number of DNN layers from $\{3, 6, 9, 12\}$ .
|
| 471 |
+
- xDeepFM. We choose the number of CIN layers from $\{2,3,4,5\}$ , and the number of units per CIN layer is set to 25. We select the size of DNN layer from $\{1000,2000\}$ , and the number of DNN layers from $\{3,6,9,12\}$ .
|
| 472 |
+
- DCNv2. We select the size of DNN layer from $\{1000, 2000\}$ , and the number of DNN layers from $\{3,6,9,12\}$ . We force the CrossNet module to have the same number of layers as the DNN network.
|
| 473 |
+
|
| 474 |
+
# A.2 Settings for Supervised Learning
|
| 475 |
+
|
| 476 |
+
We train each base model from scratch based on click signals without pretraining. We adopt the Adam optimizer with weight decay rate selected from $\{0.01, 0.05\}$ . The batch size is 4096, and the learning rate is chosen from $\{10^{-3}, 7 \times 10^{-4}, 5 \times 10^{-4}\}$ without decay. We adopt early stop if the AUC performance on the validation set stops increasing for two consecutive epochs. Finally, we choose the model at the iteration with the highest validation AUC performance for evaluation in the test set.
|
| 477 |
+
|
| 478 |
+
# A.3 Settings for Self-supervised Learning
|
| 479 |
+
|
| 480 |
+
For self-supervised learning paradigm, we implement four different pretraining methods for CTR prediction. We give the settings for the pretraining and finetuning stages as follows.
|
| 481 |
+
|
| 482 |
+
A.3.1 Pretraining Stage. We adopt the Adam optimizer with the weight decay rate of 0.05. The learning rate is initialized at $10^{-3}$ , and is scheduled by cosine decay. There is no warm-up epochs during the pretraining. The corrupt ratio is selected from $\{0.1, 0.2, 0.3\}$ . We adopt a two-layer MLP with 32 hidden units for the (field-wise) output layer. The method-specific settings are as follows:
|
| 483 |
+
|
| 484 |
+
- MF4UIP. We set the batch size to 256 and only pretrain each base model for 5 epoch, due to its tremendous cost of GPU memory and throughput time.
|
| 485 |
+
- SCARF. We set the batch size to 2048, and pretrain each base model for 60 epochs. The temperature in InfoNCE loss is 1.0.
|
| 486 |
+
- MFP. We set the batch size to 4096, and pretrain each base model for 60 epochs. The number of noise samples $K$ for NCE is 25.
|
| 487 |
+
- RFD. We set the batch size to 4096, and pretrain each base model for 60 epochs.
|
| 488 |
+
|
| 489 |
+
A.3.2 Finetuning Stage. The batch size is set to 4096. The initial learning rate is selected from $\{10^{-3},7\times 10^{-4},5\times 10^{-4}\}$ , and is scheduled by cosine decay. The total finetuning epoch is chosen from $\{1,2,3,4\}$ . We adopt the Adam optimizer and choose the weight decay rate from $\{0.01,0.05\}$ . We choose the model at the iteration with the highest validation AUC performance for evaluation in the test set.
|
| 490 |
+
|
| 491 |
+
# A.4 How to Pretrain the Assembled Models?
|
| 492 |
+
|
| 493 |
+
The deep CTR models usually leverage the parallel structure to incorporate the explicit feature interaction model with the DNN module as follows:
|
| 494 |
+
|
| 495 |
+
$$
|
| 496 |
+
p \left(y _ {i} = 1 \mid x _ {i}\right) = f \left(x _ {i}\right) + \operatorname {M L P} \left(x _ {i}\right), \tag {10}
|
| 497 |
+
$$
|
| 498 |
+
|
| 499 |
+
where $f(\cdot)$ is the explicit feature interaction model. For examples, DCNv2 assembles DNN and CrossNet. DeepFM assembles DNN and FM. We abstract them as assembled models that add up outputs from multiple ( $\geq 2$ ) modules for final CTR prediction.
|
| 500 |
+
|
| 501 |
+
Suppose we have $N$ modules to be assembled. Each of them produces one vector $q_{i,k}$ $(k = 1,\dots ,N)$ for the input sample $x_{i}$ before the last prediction layer. If the module is a shallow network that outputs a scalar (e.g., LR, FM), we simply denote it as a vector that has only one dimension. Then, we get the compact representation $q_{i}$ from the assembled model by concatenating the $N$ output vectors:
|
| 502 |
+
|
| 503 |
+
$$
|
| 504 |
+
q _ {i} = \left[ q _ {i, 1} \oplus q _ {i, 2} \oplus \dots \oplus q _ {i, N} \right], \tag {11}
|
| 505 |
+
$$
|
| 506 |
+
|
| 507 |
+
where $\oplus$ is the vector concatenation. After obtaining the compact representation $q_{i}$ , we can follow the methodology described in Section 3 to pretrain and finetune the assembled model.
|
| 508 |
+
|
| 509 |
+
# B ADDITIONAL EXPERIMENTS
|
| 510 |
+
|
| 511 |
+
# B.1 Finetuning Strategy
|
| 512 |
+
|
| 513 |
+
Since the embedding layer and feature interaction (FI) layer will be further updated during the finetuning stage. We provide an additional ablation study to investigate the influence of different finetuning strategies (i.e., freezing different parts of CTR models during finetuning). We choose DCNv2, DNN, and DeepFM as the representative models, and study the effect for both MFP and RFD tasks. The results are reported in Table 6.
|
| 514 |
+
|
| 515 |
+
From Table 6, we observe that either freezing the embedding layer or the feature interaction layer will badly hurt the final performance for all base models and pretraining methods. This indicates that there exists gap between the pretraining objective and CTR prediction task. The pretrained parameters provide a useful warm-up initialization, but still require further updating for the downstream CTR prediction task.
|
| 516 |
+
|
| 517 |
+
# B.2 Joint Pretraining of MFP and RFD
|
| 518 |
+
|
| 519 |
+
We conduct ablation experiments to further analyze the effect of joint pretraining of our proposed MFP and RFD, where the loss function for each input $x_{i}$ is:
|
| 520 |
+
|
| 521 |
+
$$
|
| 522 |
+
L _ {i} ^ {J o i n t} = \alpha \times L _ {i} ^ {M F P} + (1 - \alpha) \times L _ {i} ^ {R F D}, \tag {12}
|
| 523 |
+
$$
|
| 524 |
+
|
| 525 |
+
where $\alpha$ is a hyperparameter to balance the loss terms from MFP and RFD. We set other hyperparameters to be the same as the best configuration in the RFD pretrain-finetune scheme for simplicity,
|
| 526 |
+
|
| 527 |
+
Table 6: The ablation study on the finetuning strategies (i.e., whether freeze the embedding layer and feature interaction layer or not). The best results are given in bold, while the second-best values are underlined.
|
| 528 |
+
|
| 529 |
+
<table><tr><td rowspan="2">Pretraining Strategy</td><td rowspan="2">Base Model</td><td colspan="2">Finetuning Strategy</td><td colspan="2">Avazu</td><td colspan="2">Criteo</td></tr><tr><td>Update Embed</td><td>Update FI</td><td>AUC</td><td>Log Loss</td><td>AUC</td><td>Log Loss</td></tr><tr><td rowspan="12">MFP</td><td rowspan="4">DNN</td><td>✓</td><td>✓</td><td>0.8006</td><td>0.3675</td><td>0.8145</td><td>0.4373</td></tr><tr><td>✓</td><td>X</td><td>0.7958</td><td>0.3706</td><td>0.8083</td><td>0.4433</td></tr><tr><td>X</td><td>✓</td><td>0.7749</td><td>0.3824</td><td>0.8059</td><td>0.4451</td></tr><tr><td>X</td><td>X</td><td>0.7325</td><td>0.4052</td><td>0.7611</td><td>0.4812</td></tr><tr><td rowspan="4">DCNv2</td><td>✓</td><td>✓</td><td>0.8029</td><td>0.3661</td><td>0.8164</td><td>0.4356</td></tr><tr><td>✓</td><td>X</td><td>0.8001</td><td>0.3679</td><td>0.8109</td><td>0.4409</td></tr><tr><td>X</td><td>✓</td><td>0.7767</td><td>0.3815</td><td>0.8103</td><td>0.4412</td></tr><tr><td>X</td><td>X</td><td>0.7396</td><td>0.4019</td><td>0.7710</td><td>0.4739</td></tr><tr><td rowspan="4">DeepFM</td><td>✓</td><td>✓</td><td>0.7998</td><td>0.3680</td><td>0.8126</td><td>0.4392</td></tr><tr><td>✓</td><td>X</td><td>0.7951</td><td>0.3710</td><td>0.8057</td><td>0.4458</td></tr><tr><td>X</td><td>✓</td><td>0.7778</td><td>0.3808</td><td>0.8050</td><td>0.4461</td></tr><tr><td>X</td><td>X</td><td>0.7580</td><td>0.3924</td><td>0.7762</td><td>0.4717</td></tr><tr><td rowspan="12">RFD</td><td rowspan="4">DNN</td><td>✓</td><td>✓</td><td>0.8016</td><td>0.3666</td><td>0.8152</td><td>0.4367</td></tr><tr><td>✓</td><td>X</td><td>0.7981</td><td>0.3691</td><td>0.8083</td><td>0.4434</td></tr><tr><td>X</td><td>✓</td><td>0.7763</td><td>0.3816</td><td>0.8065</td><td>0.4445</td></tr><tr><td>X</td><td>X</td><td>0.6775</td><td>0.4269</td><td>0.7611</td><td>0.4812</td></tr><tr><td rowspan="4">DCNv2</td><td>✓</td><td>✓</td><td>0.8037</td><td>0.3655</td><td>0.8165</td><td>0.4355</td></tr><tr><td>✓</td><td>X</td><td>0.8011</td><td>0.3675</td><td>0.8112</td><td>0.4406</td></tr><tr><td>X</td><td>✓</td><td>0.7776</td><td>0.3811</td><td>0.8098</td><td>0.4416</td></tr><tr><td>X</td><td>X</td><td>0.7355</td><td>0.4041</td><td>0.7639</td><td>0.4791</td></tr><tr><td rowspan="4">DeepFM</td><td>✓</td><td>✓</td><td>0.8010</td><td>0.3671</td><td>0.8139</td><td>0.4380</td></tr><tr><td>✓</td><td>X</td><td>0.7961</td><td>0.3705</td><td>0.8066</td><td>0.4450</td></tr><tr><td>X</td><td>✓</td><td>0.7804</td><td>0.3794</td><td>0.8038</td><td>0.4472</td></tr><tr><td>X</td><td>X</td><td>0.7577</td><td>0.3931</td><td>0.7861</td><td>0.4627</td></tr></table>
|
| 530 |
+
|
| 531 |
+
Table 7: The ablation study on the joint pretraining of MFP and RFD. The best results are given in bold, while the second-best values are underlined.
|
| 532 |
+
|
| 533 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Model</td><td rowspan="2">Metric</td><td colspan="3">Pretraining Strategy</td></tr><tr><td>MFP</td><td>RFD</td><td>Joint</td></tr><tr><td rowspan="6">Avazu</td><td rowspan="2">DNN</td><td>AUC</td><td>0.8006</td><td>0.8016</td><td>0.8017</td></tr><tr><td>Log Loss</td><td>0.3675</td><td>0.3666</td><td>0.3667</td></tr><tr><td rowspan="2">DCNv2</td><td>AUC</td><td>0.8029</td><td>0.8037</td><td>0.8035</td></tr><tr><td>Log Loss</td><td>0.3661</td><td>0.3655</td><td>0.3660</td></tr><tr><td rowspan="2">DeepFM</td><td>AUC</td><td>0.7998</td><td>0.8010</td><td>0.8015</td></tr><tr><td>Log Loss</td><td>0.3680</td><td>0.3671</td><td>0.3668</td></tr><tr><td rowspan="6">Criteo</td><td rowspan="2">DNN</td><td>AUC</td><td>0.8145</td><td>0.8152</td><td>0.8153</td></tr><tr><td>Log Loss</td><td>0.4373</td><td>0.4367</td><td>0.4366</td></tr><tr><td rowspan="2">DCNv2</td><td>AUC</td><td>0.8164</td><td>0.8165</td><td>0.8164</td></tr><tr><td>Log Loss</td><td>0.4356</td><td>0.4355</td><td>0.4356</td></tr><tr><td rowspan="2">DeepFM</td><td>AUC</td><td>0.8126</td><td>0.8139</td><td>0.8145</td></tr><tr><td>Log Loss</td><td>0.4392</td><td>0.4380</td><td>0.4374</td></tr></table>
|
| 534 |
+
|
| 535 |
+
and apply grid search to select $\alpha$ from $\{0.1, 0.3, 0.5, 0.7, 0.9\}$ . DCNv2, DNN, DeepFM are chosen as the representative base models. The results are reported in Table 7.
|
| 536 |
+
|
| 537 |
+
Our observation and discussion towards the results in Table 7 are in three folds:
|
| 538 |
+
|
| 539 |
+
- The joint method could achieve slightly better (or comparable) performance compared to the best pretraining method RFD.
|
| 540 |
+
- It is worth noting that the joint pretraining method consistently reaches the best performance with $\alpha = 0.1$ . This indicates that RFD surpasses MFP and mainly contributes to the performance improvement.
|
| 541 |
+
- In addition, since the corruption and recovery strategies for MFP and RFD are different, the joint training method requires two forward/backward propagations for each data instance, which greatly increases the training cost (e.g., GPU memory usage and run time per epoch).
|
| 542 |
+
|
| 543 |
+
Although the joint training method might achieve better performance with finer-grained hyperparameter search, we think RFD is still a more elegant and practical pretraining method in terms of both effectiveness and efficiency.
|
data/2023/2308_01xxx/2308.01737/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d2af43628e95bb81f5a29382b48d950b7e18588badc5a0e5676c60b2a107ed55
|
| 3 |
+
size 1133837
|
data/2023/2308_01xxx/2308.01737/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01738/ac7fea6c-1858-4700-ad7c-6584f1f16521_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01738/ac7fea6c-1858-4700-ad7c-6584f1f16521_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01738/ac7fea6c-1858-4700-ad7c-6584f1f16521_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:450c24a152febb6d3ced43d6e4157351b77adbf527de24a8ae8b4a21c765a52c
|
| 3 |
+
size 26965336
|
data/2023/2308_01xxx/2308.01738/full.md
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Enhancing Visibility in Nighttime Haze Images Using Guided APSF and Gradient Adaptive Convolution
|
| 2 |
+
|
| 3 |
+
Yeying Jin*
|
| 4 |
+
|
| 5 |
+
National University of Singapore
|
| 6 |
+
|
| 7 |
+
Singapore, Singapore
|
| 8 |
+
|
| 9 |
+
e0178303@u.nus.edu
|
| 10 |
+
|
| 11 |
+
Beibei Lin*
|
| 12 |
+
|
| 13 |
+
National University of Singapore
|
| 14 |
+
|
| 15 |
+
Singapore, Singapore
|
| 16 |
+
|
| 17 |
+
beibei.lin@u.nus.edu
|
| 18 |
+
|
| 19 |
+
Wending Yan
|
| 20 |
+
|
| 21 |
+
Huawei International Pte Ltd
|
| 22 |
+
|
| 23 |
+
Singapore, Singapore
|
| 24 |
+
|
| 25 |
+
yan.wending@huawei.com
|
| 26 |
+
|
| 27 |
+
Yuan Yuan
|
| 28 |
+
|
| 29 |
+
Huawei International Pte Ltd
|
| 30 |
+
|
| 31 |
+
Singapore, Singapore
|
| 32 |
+
|
| 33 |
+
yuanyuan10@huawei.com
|
| 34 |
+
|
| 35 |
+
Wei Ye
|
| 36 |
+
|
| 37 |
+
Huawei International Pte Ltd
|
| 38 |
+
|
| 39 |
+
Singapore, Singapore
|
| 40 |
+
|
| 41 |
+
yawei10@huawei.com
|
| 42 |
+
|
| 43 |
+
Robby T. Tan
|
| 44 |
+
|
| 45 |
+
National University of Singapore
|
| 46 |
+
|
| 47 |
+
Singapore, Singapore
|
| 48 |
+
|
| 49 |
+
robby.tan@nus.edu.sg
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
Night Haze
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
Ours
|
| 56 |
+
Figure 1: Our nighttime dehazing results compared to existing methods, we can handle night glow and low-light conditions.
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
Liu-22 [52]
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
Wang-22 [77]
|
| 63 |
+
|
| 64 |
+
# ABSTRACT
|
| 65 |
+
|
| 66 |
+
Visibility in hazy nighttime scenes is frequently reduced by multiple factors, including low light, intense glow, light scattering, and the presence of multicolored light sources. Existing nighttime dehazing methods often struggle with handling glow or low-light conditions, resulting in either excessively dark visuals or unsuppressed glow outputs. In this paper, we enhance the visibility from a single nighttime haze image by suppressing glow and enhancing low-light regions. To handle glow effects, our framework learns from the rendered glow pairs. Specifically, a light source aware network is proposed to detect light sources of night images, followed by the APSF (Atmospheric Point Spread Function)-guided glow rendering. Our framework is then trained on the rendered images, resulting in glow suppression. Moreover, we utilize gradient-adaptive convolution, to capture edges and textures in hazy scenes. By leveraging extracted edges and textures, we enhance the contrast of the scene without losing important structural details. To boost low-light intensity, our network learns an attention map, then adjusted by gamma
|
| 67 |
+
|
| 68 |
+
*Both authors contributed equally to this research.
|
| 69 |
+
|
| 70 |
+
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
|
| 71 |
+
|
| 72 |
+
MM '23, October 29-November 3, 2023, Ottawa, ON, Canada
|
| 73 |
+
|
| 74 |
+
© 2023 Copyright held by the owner/author(s). Publication rights licensed to ACM.
|
| 75 |
+
|
| 76 |
+
ACM ISBN 979-8-4007-0108-5/23/10...$15.00
|
| 77 |
+
|
| 78 |
+
https://doi.org/10.1145/3581783.3611884
|
| 79 |
+
|
| 80 |
+
correction. This attention has high values on low-light regions and low values on haze and glow regions. Extensive evaluation on real nighttime haze images, demonstrates the effectiveness of our method. Our experiments demonstrate that our method achieves a PSNR of 30.38dB, outperforming state-of-the-art methods by $13\%$ on GTA5 nighttime haze dataset. Our data and code is available at: https://github.com/jinyeying/nighttime_dehaze.
|
| 81 |
+
|
| 82 |
+
# CCS CONCEPTS
|
| 83 |
+
|
| 84 |
+
- Computing methodologies $\rightarrow$ Artificial intelligence; Computer vision; Computer vision tasks.
|
| 85 |
+
|
| 86 |
+
# KEYWORDS
|
| 87 |
+
|
| 88 |
+
nighttime, haze, glow, low-light, gradient, edge, texture, APSF
|
| 89 |
+
|
| 90 |
+
# ACM Reference Format:
|
| 91 |
+
|
| 92 |
+
Yeying Jin, Beibei Lin, Wending Yan, Yuan Yuan, Wei Ye, and Robby T. Tan. 2023. Enhancing Visibility in Nighttime Haze Images Using Guided APSF and Gradient Adaptive Convolution. In Proceedings of the 31st ACM International Conference on Multimedia (MM '23), October 29-November 3, 2023, Ottawa, ON, Canada. ACM, New York, NY, USA, 13 pages. https://doi.org/10.1145/3581783.3611884
|
| 93 |
+
|
| 94 |
+
# 1 INTRODUCTION
|
| 95 |
+
|
| 96 |
+
Nighttime hazy or foggy images often suffer from reduced visibility. In addition to the common issues faced by night images, such as low light, noise, uneven light distribution, and multiple light colors, nighttime hazy or foggy images also exhibit a strong glow and particle veiling effect. Despite these challenges, addressing
|
| 97 |
+
|
| 98 |
+
them is crucial for many applications. These include self-driving cars, autonomous drones, and surveillance [3], as haze during the nighttime are natural phenomena that are frequent and inevitable.
|
| 99 |
+
|
| 100 |
+
Daytime haze removal methods cannot handle the unique challenges posed by nighttime haze. Traditional non-learning daytime dehazing methods (e.g., [4, 16, 23, 72]) rely on the haze imaging model [32]. However, this model is not valid at night due to the presence of artificial light sources and the complexity of illumination colors. As a result, unlike in daytime, we cannot assume a uniform atmospheric light color. Moreover, this daytime haze model does not account for the visual appearance of glow.
|
| 101 |
+
|
| 102 |
+
Existing nighttime dehazing methods produce unsatisfactory dark visuals or unmitigated glow effects. Non-deep learning methods (e.g., [45, 51, 77, 88]) introduce certain constraints on glow. However, they struggle with dark results due to the imprecise decomposition of glow and background layers or the use of dark channel prior [22] for dehazing. The main challenge faced by learning-based methods is the absence of real-world paired training data, as obtaining clear ground truth images of hazy nighttime scenes that include glow and multiple light sources is intractable. A learning-based method [98] has attempted to address this issue by utilizing synthetic data. However, this method is unable to effectively suppress glow since the synthetic dataset does not account for the glow effect. A semi-supervised deep learning-based network [87] suffers from artifacts and loss of low-frequency scene details.
|
| 103 |
+
|
| 104 |
+
In this paper, our goal is to enhance visibility in a single nighttime haze image by suppressing glow and enhancing low-light regions. Our glow suppression includes two main parts: APSF-guided glow rendering and gradient adaptive convolution. Our glow rendering method uses an APSF-guided approach to create glow effects for various light sources. We employ a light source aware network to detect the locations of light sources in images and then apply APSF-guided glow rendering to these sources. Our framework learns from the rendered images and thus can suppress glow effects in different light sources. Our gradient adaptive convolution captures edges and textures from hazy images. To be specific, edges are obtained by computing the pixel differences [71] between neighboring pixels, while the bilateral kernel [74] is used to extract textures of images. Both edges and textures are then fed into our framework to enhance the image details. To enhance the visibility of non-light regions, we introduce a novel attention-guided enhancement module. The hazy regions have low weights, while the dark regions have high weights in the attention map. As shown in Fig. 1, our method not only handles glow effects but also enhances the low-light regions.
|
| 105 |
+
|
| 106 |
+
Overall, our contributions can be summarized as follows:
|
| 107 |
+
|
| 108 |
+
- To our knowledge, our method is the first learning-based network that handles night glow and low-light conditions in one go.
|
| 109 |
+
- We present a light source aware network and APSF-guided glow rendering to simulate glow effects from different light sources. By learning from the APSF-guided glow rendering data, our framework effectively suppresses glow effects in real-world hazy images.
|
| 110 |
+
- Since night images contain less contrast, we employ gradient-adaptive convolution for edge enhancement and the bilateral kernel for texture enhancement.
|
| 111 |
+
|
| 112 |
+
Extensive experiments on nighttime images demonstrate the effectiveness of our approach in quantitative and qualitative evaluations. Our method achieves 30.38dB of PSNR, which outperforms existing nighttime dehazing methods by $13\%$ .
|
| 113 |
+
|
| 114 |
+
# 2 RELATED WORK
|
| 115 |
+
|
| 116 |
+
Early dehazing methods utilized multiple images [46, 56] or priors for atmospheric light and transmission estimation [5, 16, 23, 72]. With the advent of deep learning, numerous networks were proposed to estimate the transmission map [6, 63, 95] or output clean images end-to-end [18, 29, 37, 39, 62, 64, 81, 90, 92, 93]. Recent fully supervised [15, 19, 49, 61, 69, 76, 80, 83, 101], semi-supervised [10, 38, 43, 47, 65], zero-shot [35, 36], and unsupervised [17, 24, 48, 89, 100] methods have been developed. However, these methods struggle with nighttime haze due to non-uniform, multi-colored artificial light and the absence of clean ground truth data for training.
|
| 117 |
+
|
| 118 |
+
Optimization-based nighttime dehazing methods have followed the atmospheric scattering model (e.g., [1, 2, 59]), new imaging model (e.g., [52, 73, 96, 97]), etc. Pei and Lee [59] transfer the airlight colors of hazy nighttime images to daytime and use DCP to dehaze. Ancuti et al. [1, 2] introduce a fusion-based method and Laplacian pyramid decomposition to estimate local airlight. Zhang et al. [97] use illumination compensation, color correction and DCP to dehaze. Zhang et al. [96] propose maximum reflectance prior (MRP). Tang et al. [73] use Retinex theory and Taylor series expansion. Liu et al. [51, 53] use regularization constraints. Wang et al. [77] proposed the gray haze-line prior and variational model. Existing nighttime dehazing methods depend on local patch-based atmospheric light estimation, assuming uniformity within a small patch. Therefore, their performance is sensitive to the patch size. These methods are not adaptive and time-consuming in optimization. Unlike them, our method is learning-based, more efficient, practical and fast.
|
| 119 |
+
|
| 120 |
+
Recently, learning-based nighttime dehazing methods [50] have been proposed. Zhang et al. [98] train the network using synthetic nighttime hazy images through fully supervised learning. However, this approach does not account for glow, leaving it in the results. Yan et al. [87] propose a semi-supervised method employing high-low frequency decomposition and a grayscale network. However, their results tend to be dark, with lost details. This is because coarse frequency-based decomposition methods struggle to effectively separate glow, leading to reduced brightness and visibility of the scene. The DeGlow-DeHaze network [33] estimates transmission followed by DehazeNet [6]. However, the atmospheric light estimated by the DeHaze network is obtained from the brightest region and assumed to be globally uniform, which is invalid at nighttime [11-14, 26, 66-68]. In contrast, our results can suppress glow and, at the same time, enhance low-light regions.
|
| 121 |
+
|
| 122 |
+
The glow of a point source, referred to as the Atmospheric Point Spread Function (APSF), has been studied in various works. Narasimhan and Nayar [57] first introduced APSF and developed a physics-based model for the multiple scattered light. Metari et al. [54] model the APSF kernel for multiple light scattering. Li et al. [45] decompose glow from the input image using a layer separation method [42], constrain glow by its smooth attribute, and dehaze using DCP. Park et al. [58] and Yang et al. [88] follow the nighttime haze model and use weighted entropy and super-pixel to
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
Figure 2: (1) Our deglowing framework $G_{c}$ have two inputs: one to learn from real haze images $I_{h}$ and the other to learn from real clean reference images $I_{c}$ . For input haze images $I_{h}$ , $G_{c}$ output clean images $O_{c}$ . For input clean images $I_{c}$ , $G_{c}$ output clean images $G_{c}(I_{c})$ . (2) APSF guide glow generator $G_{h}$ to generate glow $O_{h}$ on reference images $I_{c}$ . (3) the upper left is the gradient adaptive convolution, from the gradient convolution (the blue window), we obtain edges; from the adaptive bilateral kernel (the red), we enhance texture details. (4) the upper right is attention-guided enhancement module.
|
| 126 |
+
|
| 127 |
+
estimate atmospheric light and transmission map. However, these methods, after glow removal, simply apply daytime dehazing to nighttime dehazing, which results in low visibility and color distortion in their outcomes. Previous works have primarily focused on optimization-based approaches, while our work is the first to incorporate the APSF prior into a nighttime learning network.
|
| 128 |
+
|
| 129 |
+
# 3 PROPOSED METHOD
|
| 130 |
+
|
| 131 |
+
Fig. 2 shows our pipeline, including glow suppression and low-light enhancement. Our glow suppression has two parts: deglowing network $G_{c}$ and glow generator $G_{h}$ . Our deglowing network $G_{c}$ transforms real haze images $I_{h}$ to clean images $O_{c}$ . We employ a discriminator $D_{c}$ to determine whether the generated clean images $O_{c}$ and the reference image $I_{c}$ are real or not. Our novelty in the pipeline lies in these 3 ideas: APSF-guided glow rendering, gradient-adaptive convolution, and attention-guided enhancement.
|
| 132 |
+
|
| 133 |
+
# 3.1 Light Source Aware Network
|
| 134 |
+
|
| 135 |
+
Nighttime scenes often contain active light sources such as streetlights, car headlights, and building lights. These sources can cause strong glow in hazy nighttime scenes. The appearance of haze and glow in nighttime scenes can be modeled as [45]:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
I _ {h} (x) = I _ {c} (x) t (x) + A (x) (1 - t (x)) + L _ {s} (x) * \text {A P S F}, \tag {1}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $G(x) = L_s(x) * \mathrm{APSF}$ , is the glow map, $L_s(x)$ represents the light sources, and APSF stands for Atmospheric Point Spread Function, * denotes the 2D convolution. $I_h$ is an observed hazy image. $I_c$ is the scene radiance (without haze). $A$ is the atmospheric
|
| 142 |
+
|
| 143 |
+
# Algorithm 1 Light Source Map Detection
|
| 144 |
+
|
| 145 |
+
1: Generate an initial light source mask by thresholding the night input $I, \hat{M}_{i,j} \gets \begin{cases} 1 & \text{if } \max_{c \in \{r,g,b\}} \left( I_{i,j}^c \right) > 0.8 \\ 0 & \text{otherwise} \end{cases}$
|
| 146 |
+
2: Refine $\hat{M}_{i,j}$ to $M_{i,j}$ using alpha matting [34],
|
| 147 |
+
3: Calculate the percentage of pixels in the mask, light\_sz $\leftarrow \frac{\sum(M)}{\mathrm{numel}(M)}\times 100$
|
| 148 |
+
4: Obtain the light source image, $L_{s} \gets I \odot M$
|
| 149 |
+
|
| 150 |
+
light, and $t$ is the transmission, modeled as $t(x) = e^{-\beta d(x)}$ , where $\beta$ is the extinction coefficient. Light sources play an important role and can be utilized in three ways: (1) to inform $G_{c}$ about the location of light sources (as shown in Fig. 4), (2) to guide $G_{h}$ to generate glow output $O_{h}$ , and (3) to render the glow data $I_{g}$ using APSF (as shown in Fig. 5). For (1), we define light source consistency loss to keep the light source regions consistent in input and output images, therefore to maintain the same color and shape of these regions:
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
\mathcal {L} _ {\mathrm {l s}} = \left| O _ {c} \odot M - L _ {s} \right| _ {1}, \tag {2}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
where $L_{S}$ is the light source, $O_{c}$ is the output clean image, $M$ is the soft matting map, $\odot$ is element-wise multiplication. The process of obtaining them is depicted in Fig. 3 and Algorithm 1, which involves the following steps: First, we identify the regions that contain light sources. Next, an initial light source mask $\hat{M}$ is generated by thresholding the night image. To obtain a more accurate separation of the light sources from the surrounding areas and to ensure
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Figure 3: We show Algorithm 1, light source map detection: (1) We first generate an initial light source mask $\hat{M}$ based on intensity, (2) then refine the mask using alpha matting [34] to obtain light source soft matting $M$ . (3) By multiplying the light source map $M$ with the night clean image $I_{c}$ , we obtain the light source map $L_{s}$ . After obtaining the light source, we show Algorithm 2, APSF-guided nighttime glow rendering: (1) Next, we perform APSF 2D convolution on the light source map to render glow $G$ . (2) Finally, by combining the night clean and glow image, we obtain the rendered glow image $I_{g}$ . More results are shown in Fig. 5.
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Figure 4: We show the light source maps $L_{S}$ of night haze.
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
|
| 170 |
+
# Algorithm 2 APSF-based Nighttime Glow Rendering
|
| 171 |
+
|
| 172 |
+
1: Compute the APSF function in Algorithm 3, $APS F \leftarrow \text{PSFWEIGHT}(\theta, T, q)$
|
| 173 |
+
2: Convert the APSF function to an APSF2D, $APSF2D \gets \mathrm{GET2D}(APSF)$
|
| 174 |
+
3: Perform APSF2D convolution on the light source image, $G \leftarrow L_{S} * APSF2D$ ,
|
| 175 |
+
4. Calculate the parameter for combining the clean and glow image, $\epsilon \sim \mathcal{N}(0,1)$ $\alpha \gets 0.4196\cdot light\_ sz^2 -4.258\cdot light\_ sz + 11.35 + 0.05\cdot \epsilon ,$
|
| 176 |
+
5: Combine the clean and glow image to render night glow, $I_{q} \gets 0.99 \cdot I_{c} + \alpha \cdot G$
|
| 177 |
+
6: Add Gaussian noise, $I_{g} \gets \mathrm{ADDNOISE}(I_{g})$
|
| 178 |
+
|
| 179 |
+
smother transitions, we refine the initial mask $\hat{M}$ to a matting map $M$ using alpha matting [34]. Finally, we isolate the light sources $L_{s}$ by applying the element-wise multiplication of the night image $I$ and the refined soft matting map $M$ .
|
| 180 |
+
|
| 181 |
+
# 3.2 APSF-Guided Nighttime Glow Rendering
|
| 182 |
+
|
| 183 |
+
After obtaining the light source maps, we present an APSF-based method for rendering nighttime glow effects, as shown in Fig. 3 and Algorithm 2. First, we compute the APSF function in Algorithm 3 and convert it into a 2D format, which allows us to perform 2D convolution on the light source maps, resulting in the glow
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
Figure 5: We show with APSF, we can render glow $I_{g}$ (bottom) on night clean $I_{c}$ (top), with the help of light source maps.
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
|
| 208 |
+

|
| 209 |
+
|
| 210 |
+
images. Then, we combine the clean and glow images to render a realistic glow effect in the final image. In Fig. 5, we provide examples of paired nighttime clean $I_{c}$ and glow $I_{g}$ images, demonstrating the effectiveness of our approach.
|
| 211 |
+
|
| 212 |
+
In Fig. 3 (top left), we use the phase function $P(\cos \theta)$ [25] to approximate light scattering in nighttime glow. The scattering angle $\theta$ is defined as the angle between the incident $(\theta', \mu')$ and scattered light $(\theta, \mu)$ directions, where $\mu' = \cos \theta'$ and $\mu = \cos \theta$ represent the angular distribution of light. The scattered light intensity $I(T, \mu)$ , considering optical thickness $(T)$ , measures the attenuation of light due to fog. Here, $T = \beta R$ , with $\beta$ representing the attenuation coefficient and $R$ representing the distance between the isotropic source and the pinhole camera. The shape of the phase function $P$ depends on the size of the scatterer, and hence the weather conditions. For large particles, such as fog, $P$ exhibits a strong peak in the direction of light incidence.
|
| 213 |
+
|
| 214 |
+
Algorithm 3 shows the APSF weight calculation process [57]. There are three input parameters: angle $\theta$ ( $-180^{\circ}$ to $180^{\circ}$ ), optical thickness $T$ , and forward scattering parameter $q$ that indicates the
|
| 215 |
+
|
| 216 |
+
# Algorithm 3 APSF Weights Calculation
|
| 217 |
+
|
| 218 |
+
Require: $\theta$ , optical thickness $T$ , forward scattering parameter $q$
|
| 219 |
+
Ensure: weights
|
| 220 |
+
|
| 221 |
+
1: $\mu \gets \cos (\theta)$
|
| 222 |
+
2: if look-up table (LUT) for Legendre polynomials exists then
|
| 223 |
+
|
| 224 |
+
3: Load LUT
|
| 225 |
+
4: else
|
| 226 |
+
5: Generate LUT and save to file
|
| 227 |
+
6: Define $\alpha_{\mathrm{m}}(m)\gets m + 1,\beta_{\mathrm{m}}(m,q)\gets \frac{(2m + 1)(1 - q^{m - 1})}{m}$
|
| 228 |
+
7: Define $g_{\mathrm{m}}(m, \alpha_{\mathrm{m}}, \beta_{\mathrm{m}}, T) \gets e^{(-\beta_{\mathrm{m}}T - \alpha_{\mathrm{m}}\log (T))}$ .
|
| 229 |
+
8: Initialize weights to all zeros
|
| 230 |
+
9: for $m \gets 1$ to 200 (number of polynomials) do
|
| 231 |
+
10: if $m == 1$ then
|
| 232 |
+
11: $L_{\mathrm{m - 1}}(\mu)\gets 1$
|
| 233 |
+
12: else
|
| 234 |
+
13: $L_{\mathrm{m - 1}}(\mu)\gets L_{\mathrm{m}}(\mu)$
|
| 235 |
+
14: $L_{\mathrm{m}}(\mu)\gets$ interpolate LUT for $m$ and $\mu$
|
| 236 |
+
15: $p\gets g_{\mathrm{m}}(m,\alpha_{\mathrm{m}}(m),\beta_{\mathrm{m}}(m,q),T)(L_{\mathrm{m - 1}}(\mu) + L_{\mathrm{m}}(\mu)),$ Eq. (3).
|
| 237 |
+
16: weights $\leftarrow$ weights + p
|
| 238 |
+
17: weights $\leftarrow$ weights $\cdot T^2$ (normalize)
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Figure 6: We show using gradient adaptive convolution, can obtain edges (middle) and textures (below) in haze images.
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
|
| 257 |
+

|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
|
| 261 |
+

|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
|
| 265 |
+
atmospheric conditions [55]. The phase function $P$ can be expanded using Legendre polynomials series $L_{m}$ ( $m$ stand for order) [7]:
|
| 266 |
+
|
| 267 |
+
$$
|
| 268 |
+
I (T, \mu) = \sum_ {m = 1} ^ {\infty} g _ {m} (T) \left(L _ {m - 1} (\mu) + L _ {m} (\mu)\right), \tag {3}
|
| 269 |
+
$$
|
| 270 |
+
|
| 271 |
+
where: $g_{m}(T) = I_{0}e^{-\beta_{m}T - \alpha_{m}\log T}, \alpha_{m} = m + 1, \beta_{m} = \frac{2m + 1}{m}\left(1 - q^{m - 1}\right)$ .
|
| 272 |
+
|
| 273 |
+
# 3.3 Gradient Adaptive Convolution
|
| 274 |
+
|
| 275 |
+
Nighttime hazy images suffer from low contrast, missing textures, and uneven illumination. Vanilla 2D convolution, which computes the output based solely on input pixel values and the weights in the convolution kernel, may not be sufficient for enhancing nighttime haze images. Hence, to improve the edge and texture details shown in Fig. 6, we utilize gradient-adaptive convolution.
|
| 276 |
+
|
| 277 |
+

|
| 278 |
+
Figure 7: Our gradient adaptive convolution combine (b) gradient convolution (blue) to capture edge, and (c) adaptive bilateral kernel (red) to obtain texture.
|
| 279 |
+
|
| 280 |
+
3.3.1 Edge Capture using Gradient Convolution. We utilize nearby information to preserve the gradient information and improve contrast by extracting edges using gradient-adaptive convolution. The main idea of gradient convolution is to use local binary patterns (LBP) [60, 71] in a $k \times k$ neighborhood, where the central pixel serves as a threshold and the values of the surrounding pixels are compared to the central pixel. If a neighboring pixel's value exceeds the central pixel's value, a position marker of 1 is assigned; otherwise, a value of 0 is assigned. The vanilla convolution and gradient convolutions are expressed as follows:
|
| 281 |
+
|
| 282 |
+
$$
|
| 283 |
+
\mathbf {v} _ {i} ^ {\prime} = \sum_ {j \in \Omega (i)} \mathrm {w} [ p _ {i} - p _ {j} ] \mathbf {v} _ {j}, \tag {4}
|
| 284 |
+
$$
|
| 285 |
+
|
| 286 |
+
$$
|
| 287 |
+
\mathbf {v} _ {i} ^ {\prime} = \sum_ {j \in \Omega (i)} w [ p _ {i} - p _ {j} ] (\mathbf {v} _ {j} - \mathbf {v} _ {j} ^ {\prime}), \quad \text {(G r a d i e n t C o n v)} \tag {5}
|
| 288 |
+
$$
|
| 289 |
+
|
| 290 |
+
where $\mathbf{v} = (\mathbf{v}_1, \dots, \mathbf{v}_n)$ , $\mathbf{v}_i \in \mathbb{R}^c$ over $n$ pixels and $c$ channels, is input image features. $\mathbf{w}$ is the weight in the $\Omega(\cdot)$ , $k \times k$ convolution window. We use $[\mathbf{p}_i - \mathbf{p}_j]$ to denote indexing of the spatial dimensions of an array with 2D spatial offsets. This convolution operation results in a $c'$ -channel output, $\mathbf{v}_i' \in \mathbb{R}^{c'}$ , at each pixel $i$ . In Fig. 7 (a) and (b), instead of using input pixel values in Eq. (4), we use the differences between neighboring pixels in Eq. (5) to capture the gradient information.
|
| 291 |
+
|
| 292 |
+
We fine-tune the pre-trained pixel difference network [71] on nighttime haze images. Then, we use this fine-tuned network $D$ to extract edges from input haze images and enforce the consistency between the input $I_h$ and the output $O_c$ . Our self-supervised gradient loss is defined as follows:
|
| 293 |
+
|
| 294 |
+
$$
|
| 295 |
+
\mathcal {L} _ {g} = \left| D \left(O _ {c}\right) - D \left(I _ {h}\right) \right| _ {1}. \tag {6}
|
| 296 |
+
$$
|
| 297 |
+
|
| 298 |
+
3.3.2 Texture Capture using Bilateral Kernel. A single CNN may not be sufficient for effectively handling uneven light distribution within a nighttime haze image. Therefore, we propose using a bilateral kernel in adaptive convolution to extract texture details, as shown in Fig. 7 (c):
|
| 299 |
+
|
| 300 |
+
$$
|
| 301 |
+
\mathbf {v} _ {i} ^ {\prime} = \sum_ {j \in \Omega (i)} K \left(\mathbf {f} _ {i}, \mathbf {f} _ {j}\right) \mathbf {w} \left[ \mathbf {p} _ {i} - \mathbf {p} _ {j} \right] \mathbf {v} _ {j}, \quad (\text {A d a p t i v e C o n v}) \tag {7}
|
| 302 |
+
$$
|
| 303 |
+
|
| 304 |
+
where $K$ is bilateral kernel, depends on pixel features $\mathbf{f}$ [70]. We use color features $\mathbf{f} = (r,g,b)$ , $K(\mathbf{f}_i,\mathbf{f}_j) = \exp\left(-\frac{1}{2\alpha_1}\left\|\mathbf{f}_i - \mathbf{f}_j\right\|^2\right)$ , $\mathbf{w}[\mathbf{p}_i - \mathbf{p}_j] = \exp\left(-\frac{1}{2\alpha_2}\left\|\mathbf{p}_i - \mathbf{p}_j\right\|^2\right)$ .
|
| 305 |
+
|
| 306 |
+
Since the features obtained using the bilateral kernel are noise-reduced, edge-preserved, and detail-enhanced, they help extract high-frequency texture details that are less affected by haze and glow, as shown in Fig. 6 (bottom). To enforce the consistency between the input $I_{h}$ and the output $O_{c}$ and improve the quality of the output, we define the self-supervised bilateral kernel loss as:
|
| 307 |
+
|
| 308 |
+
$$
|
| 309 |
+
\mathcal {L} _ {k} = \left| K \left(O _ {c}\right) - K \left(I _ {h}\right) \right| _ {1}. \tag {8}
|
| 310 |
+
$$
|
| 311 |
+
|
| 312 |
+
In summary, our gradient-adaptive convolution captures edge information by considering the differences between neighboring pixels instead of their pixel values, as reflected in the gradient term $(\mathbf{v}_j - \mathbf{v}_j')$ . Additionally, it accounts for spatially varying illumination and non-uniform haze distribution by utilizing the adaptive term $K(\mathbf{f}_i,\mathbf{f}_j)$ , which helps to preserve texture information and adapt more effectively to nighttime scenes.
|
| 313 |
+
|
| 314 |
+
# 3.4 Network and Other Losses
|
| 315 |
+
|
| 316 |
+
In Fig. 2, our glow suppression is CycleGAN [102]-based network. We use the deglowing network $G_{c}$ , which is coupled with a discriminator $D_{c}$ . To ensure reconstruction consistency, we have another haze generator $G_{h}$ coupled with its discriminator $D_{h}$ . $G_{c}$ input hazy images $I_{h}$ and output clean images $O_{c}$ . Our haze generator $G_{h}$ transforms the output images $O_{c}$ to reconstructed clean images $\hat{I}_{h}$ . By imposing the cycle-consistency constraints, our deglowing network $G_{c}$ learns to remove the real-world glow. Thanks to the cycle-consistency constraints, we are allowed to use unpaired data to optimize our deglowing network, thus reducing the domain gap between the synthetic datasets and real-world glow images.
|
| 317 |
+
|
| 318 |
+
Besides the self-supervised light source consistency loss $\mathcal{L}_{\mathrm{ls}}$ , gradient loss $\mathcal{L}_g$ and bilateral kernel loss $\mathcal{L}_k$ , with weights $\{1,0.5,5\}$ , we followed [102], use other losses to train our network. They are adversarial loss $\mathcal{L}_{adv}$ , cycle consistency loss $\mathcal{L}_{cyc}$ , identity loss $\mathcal{L}_{iden}$ , with weights $\{1,10,10\}$ .
|
| 319 |
+
|
| 320 |
+
# 3.5 Low-light Region Enhancement
|
| 321 |
+
|
| 322 |
+
Nighttime dehazing often leads to dark results due to low-light conditions [20, 27, 30, 31, 78, 79, 82]. To address this, we incorporate a low-light enhancement module to improve the visibility of object regions. Our approach involves generating an attention map that highlights these regions, allowing our method to focus on enhancing their intensity. We then apply a low-light image enhancement technique [21] to enhance the region with the assistance of the attention map, even in scenes with low light.
|
| 323 |
+
|
| 324 |
+
Attention Map To obtain the soft attention maps $A$ shown in Fig. 8, we input the night haze images and refine the coarse map using [44]. The refined attention map $A$ exhibits high values in object regions and low values in uniform regions, such as the sky. Therefore, we can distinguish between the object and the haze regions.
|
| 325 |
+
|
| 326 |
+
$$
|
| 327 |
+
O _ {e} = (1 - A) \cdot O _ {c} + A \cdot O _ {c} ^ {\gamma}, \tag {9}
|
| 328 |
+
$$
|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
Figure 8: We show attention maps to enhance low-light.
|
| 334 |
+
|
| 335 |
+

|
| 336 |
+
|
| 337 |
+

|
| 338 |
+
|
| 339 |
+

|
| 340 |
+
|
| 341 |
+

|
| 342 |
+
|
| 343 |
+

|
| 344 |
+
|
| 345 |
+

|
| 346 |
+
|
| 347 |
+
Table 1: Results on GTA5 nighttime haze dataset.
|
| 348 |
+
|
| 349 |
+
<table><tr><td></td><td>PSNR ↑</td><td>SSIM ↑</td></tr><tr><td>Input Image</td><td>18.987</td><td>0.6764</td></tr><tr><td>Li [45]</td><td>21.024</td><td>0.6394</td></tr><tr><td>Zhang [96]</td><td>20.921</td><td>0.6461</td></tr><tr><td>Ancuti [1]</td><td>20.585</td><td>0.6233</td></tr><tr><td>Yan [87]</td><td>26.997</td><td>0.8499</td></tr><tr><td>CycleGAN [102]</td><td>21.753</td><td>0.6960</td></tr><tr><td>w/o Gradient Adaptive Conv</td><td>27.913</td><td>0.8673</td></tr><tr><td>w/o APSF Guided Render</td><td>28.913</td><td>0.8776</td></tr><tr><td>Our Result</td><td>30.383</td><td>0.9042</td></tr></table>
|
| 350 |
+
|
| 351 |
+
Table 2: Summary of comparisons between ours and existing learning-based nighttime dehazing methods. SL is short for supervised learning, SSL is semi-supervised learning.
|
| 352 |
+
|
| 353 |
+
<table><tr><td>Learning</td><td>Methods</td><td>Glow</td><td>Edge, Texture</td><td>Low Light</td><td>Light Source</td><td>Uneven light</td></tr><tr><td>SSL</td><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>SL</td><td>Kuanar [33]</td><td>✓</td><td>×</td><td>×</td><td>×</td><td>×</td></tr><tr><td>SSL</td><td>Yan [87]</td><td>✓</td><td>✓</td><td>×</td><td>×</td><td>×</td></tr><tr><td>SL</td><td>Zhang [96]</td><td>×</td><td>×</td><td>×</td><td>×</td><td>×</td></tr></table>
|
| 354 |
+
|
| 355 |
+
where $\gamma$ is the enhanced parameter in [21], we set 0.3 in our experiments, $O_{c}$ is the dehazed output, $O_{e}$ is the enhanced result, $A$ is the attention map.
|
| 356 |
+
|
| 357 |
+
# 4 EXPERIMENTAL RESULTS
|
| 358 |
+
|
| 359 |
+
# 4.1 Datasets
|
| 360 |
+
|
| 361 |
+
GTA5 [87] is a synthetic nighttime dehazing dataset, which is generated by the GTA5 game engine. It includes 864 paired images, where 787 paired images are used as the training set and the rest images are taken as the test set.
|
| 362 |
+
|
| 363 |
+
RealNightHaze is a real-world night dehazing dataset. It includes 440 night hazy images, where 150 images are from [98], 200 images are from [87] and the rest images are collected from the Internet.
|
| 364 |
+
|
| 365 |
+
# 4.2 Comparison on Synthetic Datasets
|
| 366 |
+
|
| 367 |
+
In this section, we compare our method with existing state-of-the-art methods, including Yan [87], Zhang [97], Li [45] Ancuti [1], Zhang [96], Yu [94], Zhang [98], Liu [51] and Wang [77]. The summary of the main differences is show in Table 2. The experimental results are shown in Table 1. It can be observed that our method achieves a significant performance improvement. We adopt two widely used metrics PSNR, SSIM in generation [75] and restoration [8, 9, 28, 40, 41, 84-86, 91] tasks. Our method achieves a PSNR of 30.383 and a SSIM of 0.904, outperforming Yan's method [87] by
|
| 368 |
+
|
| 369 |
+

|
| 370 |
+
(a) Input
|
| 371 |
+
|
| 372 |
+

|
| 373 |
+
(b) Ours
|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
(c) Liu-22 [52]
|
| 377 |
+
|
| 378 |
+

|
| 379 |
+
(d) Wang-22 [77]
|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
(e) Zhang [98]
|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
(f) Yan-20 [87]
|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
(g) Yu-19 [94]
|
| 389 |
+
|
| 390 |
+

|
| 391 |
+
(h) Zhang [96]
|
| 392 |
+
|
| 393 |
+

|
| 394 |
+
|
| 395 |
+

|
| 396 |
+
(b) Ours
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
(c) Liu-22 [52]
|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
|
| 403 |
+

|
| 404 |
+
(a) Input
|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
(f) Yan-20 [87]
|
| 408 |
+
|
| 409 |
+

|
| 410 |
+
(g) Yu-19 [94]
|
| 411 |
+
|
| 412 |
+

|
| 413 |
+
(d) Wang-22 [77]
|
| 414 |
+
(h) Zhang [96]
|
| 415 |
+
|
| 416 |
+

|
| 417 |
+
(e) Zhang [98]
|
| 418 |
+
|
| 419 |
+

|
| 420 |
+
(b) Ours
|
| 421 |
+
|
| 422 |
+

|
| 423 |
+
|
| 424 |
+

|
| 425 |
+
|
| 426 |
+

|
| 427 |
+
(a) Input
|
| 428 |
+
(e) Zhang [98]
|
| 429 |
+
Figure 9: Visual comparisons of different nighttime dehazing methods on real nighttime hazy scenes. Our results are more realistic and effective in nighttime dehazing. Zoom-in for better visualization.
|
| 430 |
+
|
| 431 |
+

|
| 432 |
+
(f) Yan-20 [87]
|
| 433 |
+
|
| 434 |
+

|
| 435 |
+
(c) Liu-22 [52]
|
| 436 |
+
(g) Yu-19 [94]
|
| 437 |
+
|
| 438 |
+

|
| 439 |
+
(d) Wang-22 [77]
|
| 440 |
+
(h) Ancuti-20 [2]
|
| 441 |
+
|
| 442 |
+
Table 3: User study evaluation on the real night images, our method obtained the highest mean (the max score is 10), showing our method is effective in nighttime dehazing, deglowing and low-light enhancement. Our method is also visual realistic. The best result is in red whereas the second and third best results are in blue and purple, respectively.
|
| 443 |
+
|
| 444 |
+
<table><tr><td>Aspects</td><td>Ours</td><td>Yan [87]</td><td>Zhang [97]</td><td>Li [45]</td><td>Ancuti [1]</td><td>Zhang [96]</td><td>Yu [94]</td><td>Zhang [98]</td><td>Liu-22 [52]</td><td>Wang-22 [77]</td></tr><tr><td>1.Dehaze↑</td><td>9.1 ± 0.99</td><td>8.9 ± 1.75</td><td>4.2 ± 2.42</td><td>6.1 ± 2.00</td><td>5.2 ± 2.35</td><td>4.4 ± 2.01</td><td>4.8 ± 2.26</td><td>4.6 ± 2.07</td><td>3.9 ± 2.12</td><td>5.5 ± 1.91</td></tr><tr><td>2.Deglow↑</td><td>9.1 ± 0.91</td><td>7.9 ± 1.12</td><td>3.2 ± 1.96</td><td>6.2 ± 1.83</td><td>5.2 ± 2.14</td><td>4.0 ± 2.09</td><td>3.7 ± 2.08</td><td>4.4 ± 2.05</td><td>5.3 ± 1.99</td><td>5.7 ± 1.76</td></tr><tr><td>3.Low-light↑</td><td>8.5 ± 1.30</td><td>7.9 ± 1.93</td><td>7.1 ± 2.31</td><td>5.5 ± 2.33</td><td>5.4 ± 2.01</td><td>5.6 ± 1.82</td><td>6.5 ± 2.12</td><td>5.6 ± 1.75</td><td>5.5 ± 1.92</td><td>5.4 ± 1.89</td></tr><tr><td>4.Realistic↑</td><td>8.9 ± 0.94</td><td>8.0 ± 1.28</td><td>4.6 ± 1.93</td><td>4.7 ± 2.07</td><td>6.7 ± 1.90</td><td>4.9 ± 1.93</td><td>5.7 ± 1.90</td><td>4.9 ± 1.97</td><td>3.8 ± 1.81</td><td>5.9 ± 1.67</td></tr></table>
|
| 445 |
+
|
| 446 |
+
$14\%$ and $5\%$ , respectively. This is because our method learns from the APSF-guided glow rendering and thus effectively removes the glow effects. Another advantage is that we introduce a gradient-adaptive convolution to capture the edges and textures. The obtained edges and textures are then used to enhance the structural details of the enhanced images, leading to superior performance.
|
| 447 |
+
|
| 448 |
+
# 4.3 Comparison on Real-World Datasets
|
| 449 |
+
|
| 450 |
+
Fig. 9 show the qualitative results, including Liu [51], Wang [77], Zhang [98], Yan [87], Yu [94], Zhang [96] and our method. It can be found that our method significantly enhances the visibility of nighttime hazy images. Specifically, most state-of-the-art methods cannot sufficiently remove haze since their methods suffer from the domain gap between the synthetic datasets and real-world images. Yan et al [87] proposes a semi-supervised framework for nighttime foggy removal and can remove most hazy effects. However, their method over-suppresses hazy images, and thus their outputs become too dark.
|
| 451 |
+
|
| 452 |
+
In contrast, our method handles glow and low-light conditions. As shown in Fig. 9 (b), our method not only removes the haze of the input images but also enhances the light. For instance, the details of trees and buildings are clear. This is because our method simulates the glow rendering by utilizing Atmospheric Point Spread Function (APSF) and thus can effectively remove haze or glow in a real-world night hazy image. Moreover, we propose a gradient-adaptive convolution to capture the edges and textures from hazy images. The captured edges and textures are then used to boost the details of images, leading to superior performance. Furthermore, we introduce an attention map to enhance the low-light regions. As a result, our method achieves a significant performance improvement.
|
| 453 |
+
|
| 454 |
+
We also conduct user studies on real-world night hazy images. The experiment results are shown in Table 3. It can be found that our methods get the highest scores in all aspects.
|
| 455 |
+
|
| 456 |
+
# 4.4 Ablation Study
|
| 457 |
+
|
| 458 |
+
Our framework includes three core parts: APSF-guided glow rendering, gradient-adaptive convolution and attention-guided enhancement. To prove the effectiveness of each part, we conduct ablation studies on real-world night hazy images.
|
| 459 |
+
|
| 460 |
+
APSF-guided glow rendering Fig. 5 (bottom) shows the results of our glow rendering. We can obverse that our method can accurately detect the location of light sources (middle). Also, the rendered results effectively simulate the glow effects.
|
| 461 |
+
|
| 462 |
+
Gradient-adaptive convolution Fig. 6 show the results of the gradient edge maps (middle) and textures (bottom). Fig. 11 (b) and (c) show results without gradient loss $\mathcal{L}_g$ and without kernel loss $\mathcal{L}_k$ , and (d) is with gradient capture convolution. It can be found that our
|
| 463 |
+
|
| 464 |
+

|
| 465 |
+
Input $I_{h}$
|
| 466 |
+
|
| 467 |
+

|
| 468 |
+
w/o Enhance $O_{c}$
|
| 469 |
+
|
| 470 |
+

|
| 471 |
+
Attention A
|
| 472 |
+
Figure 10: Ablation study of low-light enhancement.
|
| 473 |
+
|
| 474 |
+

|
| 475 |
+
w/ Enhance $O_{e}$
|
| 476 |
+
|
| 477 |
+

|
| 478 |
+
(a) Input $I_{h}$
|
| 479 |
+
|
| 480 |
+

|
| 481 |
+
(b) $\mathbf{w} / \mathbf{o}\mathcal{L}_q$
|
| 482 |
+
|
| 483 |
+

|
| 484 |
+
(c) $\mathbf{w} / \mathbf{o}\mathcal{L}_k$
|
| 485 |
+
|
| 486 |
+

|
| 487 |
+
(d) w/ GAC
|
| 488 |
+
Figure 11: Ablation study of gradient adaptive conv. (GAC).
|
| 489 |
+
|
| 490 |
+

|
| 491 |
+
(e) Enhance
|
| 492 |
+
|
| 493 |
+
gradient capture convolution can effectively preserve the structural details of hazy images. By taking full advantage of the gradient maps and textures, our framework generates sharper results.
|
| 494 |
+
|
| 495 |
+
Low Light Enhancement Fig. 10 shows the results of low-light enhancement from (b) to (d).
|
| 496 |
+
|
| 497 |
+
# 5 CONCLUSION
|
| 498 |
+
|
| 499 |
+
In this paper, we have proposed a novel nighttime visibility enhancement framework, addressing both glow and low-light conditions. Our framework includes three core ideas: APSF-guided glow rendering, gradient-adaptive convolution and attention-guided low-light enhancement. Our framework suppresses glow effects via learning from the APSF-guided glow rendering data. Thanks to our APSF-guided glow rendering, we allow to use a semi-supervised method to optimize our network, thus handling glow effects in different light sources. Our gradient-adaptive convolution is proposed to capture edges or textures from a nighttime hazy image. Benefiting from the captured edges or textures, our framework effectively preserves the structural details. Our low-light region enhancement boosts the intensity of dark or over-suppressed regions via attention map. Both quantitative and qualitative experiments show that our method achieves a significant performance improvement. Moreover, the ablation study proves the effectiveness of each core idea. Handling scenarios with diverse domain shifts [99] will be a focus of our future research.
|
| 500 |
+
|
| 501 |
+
# ACKNOWLEDGMENTS
|
| 502 |
+
|
| 503 |
+
This research/project is supported by the National Research Foundation, Singapore under its AI Singapore Programme (AISG Award No: AISG2-PhD/2022-01-037[T]).
|
| 504 |
+
|
| 505 |
+
# REFERENCES
|
| 506 |
+
|
| 507 |
+
[1] Cosmin Ancuti, Codruta O Ancuti, Christophe De Vleeschouwer, and Alan C Bovik. 2016. Night-time dehazing by fusion. In 2016 IEEE International Conference on Image Processing (ICIP). IEEE, 2256-2260.
|
| 508 |
+
[2] Cosmin Ancuti, Codruta O Ancuti, Christophe De Vleeschouwer, and Alan C Bovik. 2020. Day and night-time dehazing by local airlight estimation. IEEE Transactions on Image Processing 29 (2020), 6264-6275.
|
| 509 |
+
[3] Sriparna Banerjee and Sheli Sinha Chaudhuri. 2021. Nighttime image-dehazing: a review and quantitative benchmarking. Archives of Computational Methods in Engineering 28, 4 (2021), 2943-2975.
|
| 510 |
+
[4] Dana Berman, Shai Avidan, et al. 2016. Non-local image dehazing. In Proceedings of the IEEE conference on computer vision and pattern recognition. 1674-1682.
|
| 511 |
+
[5] Dana Berman, Tali Treibitz, and Shai Avidan. 2018. Single image dehazing using haze-lines. IEEE transactions on pattern analysis and machine intelligence 42, 3 (2018), 720-734.
|
| 512 |
+
[6] Bolun Cai, Xiangmin Xu, Kui Jia, Chunmei Qing, and Dacheng Tao. 2016. Dehazenet: An end-to-end system for single image haze removal. IEEE Transactions on Image Processing 25, 11 (2016), 5187-5198.
|
| 513 |
+
[7] Subrahmanyan Chandrasekhar. 2013. Radiative transfer. Courier Corporation.
|
| 514 |
+
[8] Sixiang Chen, Tian Ye, Yun Liu, Erkang Chen, Jun Shi, and Jingchun Zhou. 2022. Snowformer: Scale-aware transformer via context interaction for single image desowning. arXiv preprint arXiv:2208.09703 (2022).
|
| 515 |
+
[9] Sixiang Chen, Tian Ye, Yun Liu, Taodong Liao, Jingxia Jiang, Erkang Chen, and Peng Chen. 2023. MSP-former: Multi-scale projection transformer for single image desnowing. In ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 1-5.
|
| 516 |
+
[10] Zeyuan Chen, Yangchao Wang, Yang Yang, and Dong Liu. 2021. PSD: Principled synthetic-to-real dehazing guided by physical priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 7180-7189.
|
| 517 |
+
[11] Yuekun Dai, Chongyi Li, Shangchen Zhou, Ruicheng Feng, and Chen Change Loy. 2022. Flare7k: A phenomenological nighttime flare removal dataset. Advances in Neural Information Processing Systems 35 (2022), 3926-3937.
|
| 518 |
+
[12] Yuekun Dai, Chongyi Li, Shangchen Zhou, Ruicheng Feng, Yihang Luo, and Chen Change Loy. 2023. Flare7K++: Mixing Synthetic and Real Datasets for Nighttime Flare Removal and Beyond. arXiv preprint arXiv:2306.04236 (2023).
|
| 519 |
+
[13] Yuekun Dai, Chongyi Li, Shangchen Zhou, Ruicheng Feng, Qingpeng Zhu, Qianhui Sun, Wenxiu Sun, Chen Change Loy, Jinwei Gu, Shuai Liu, et al. 2023. MIPI 2023 Challenge on Nighttime Flare Removal: Methods and Results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2852-2862.
|
| 520 |
+
[14] Yuekun Dai, Yihang Luo, Shangchen Zhou, Chongyi Li, and Chen Change Loy. 2023. Nighttime Smartphone Reflective Flare Removal Using Optical Center Symmetry Prior. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 20783-20791.
|
| 521 |
+
[15] Hang Dong, Jinshan Pan, Lei Xiang, Zhe Hu, Xinyi Zhang, Fei Wang, and Ming-Hsuan Yang. 2020. Multi-scale boosted dehazing network with dense feature fusion. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2157-2167.
|
| 522 |
+
[16] Raanan Fattal. 2008. Single image dehazing. ACM transactions on graphics (TOG) 27, 3 (2008), 1-9.
|
| 523 |
+
[17] Alona Golts, Daniel Freedman, and Michael Elad. 2019. Unsupervised single image dehazing using dark channel prior loss. IEEE Transactions on Image Processing 29 (2019), 2692-2701.
|
| 524 |
+
[18] Chunle Guo, Ruiqi Wu, Xin Jin, Linghao Han, Zhi Chai, Weidong Zhang, and Chongyi Li. 2023. Underwater Ranker: Learn Which Is Better and How to Be Better. In Proceedings of the AAAI conference on artificial intelligence.
|
| 525 |
+
[19] Chun-Le Guo, Qixin Yan, Saeed Anwar, Runmin Cong, Wenqi Ren, and Chongyi Li. 2022. Image Dehazing Transformer with Transmission-Aware 3D Position Embedding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 5812-5820.
|
| 526 |
+
[20] Lanqing Guo, Renjie Wan, Wenhan Yang, Alex Kot, and Bihan Wen. 2022. Enhancing low-light images in real world via cross-image disentanglement. arXiv preprint arXiv:2201.03145 (2022).
|
| 527 |
+
[21] Xiaojie Guo, Yu Li, and Haibin Ling, 2016. LIME: Low-light image enhancement via illumination map estimation. IEEE Transactions on image processing 26, 2 (2016), 982-993.
|
| 528 |
+
[22] Kaiming He, Jian Sun, and Xiaouu Tang. 2010. Single image haze removal using dark channel prior. IEEE transactions on pattern analysis and machine intelligence 33, 12 (2010), 2341-2353.
|
| 529 |
+
[23] Kaiming He, Jian Sun, and Xiaou Tang. 2011. Single image haze removal using dark channel prior. IEEE transactions on pattern analysis and machine intelligence
|
| 530 |
+
|
| 531 |
+
33, 12 (2011), 2341-2353.
|
| 532 |
+
[24] Lu-Yao Huang, Jia-Li Yin, Bo-Hao Chen, and Shao-Zhen Ye. 2019. Towards Unsupervised Single Image Dehazing With Deep Learning. In 2019 IEEE International Conference on Image Processing (ICIP). IEEE, 2741-2745.
|
| 533 |
+
[25] Akira Ishimaru et al. 1978. Wave propagation and scattering in random media. Vol. 2. Academic press New York.
|
| 534 |
+
[26] Xin Jin, Jia-Wen Xiao, Ling-Hao Han, Chunle Guo, Ruixun Zhang, Xialei Liu, and Chongyi Li. 2023. Lighting Every Darkness in Two Pairs: A Calibration-Free Pipeline for RAW Denoising. Proceedings of the IEEE/CVF International Conference on Computer Vision.
|
| 535 |
+
[27] Yeying Jin, Ruoteng Li, Wenhan Yang, and Robby T Tan. 2023. Estimating reflectance layer from a single image: Integrating reflectance guidance and shadow/specular aware learning. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 37. 1069-1077.
|
| 536 |
+
[28] Yeying Jin, Aashish Sharma, and Robby T Tan. 2021. DC-ShadowNet: Single-Image Hard and Soft Shadow Removal Using Unsupervised Domain-Classifier Guided Network. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 5027-5036.
|
| 537 |
+
[29] Yeying Jin, Wending Yan, Wenhan Yang, and Robby T Tan. 2022. Structure Representation Network and Uncertainty Feedback Learning for Dense Non-Uniform Fog Removal. In Proceedings of the Asian Conference on Computer Vision. 2041–2058.
|
| 538 |
+
[30] Yeying Jin, Wenhan Yang, and Robby T Tan. 2022. Unsupervised night image enhancement: When layer decomposition meets light-effects suppression. In European Conference on Computer Vision. Springer, 404-421.
|
| 539 |
+
[31] Yeying Jin, Wenhan Yang, Wei Ye, Yuan Yuan, and Robby T Tan. 2022. ShadowDiffusion: Diffusion-based Shadow Removal using Classifier-driven Attention and Structure Preservation. arXiv preprint arXiv:2211.08089 (2022).
|
| 540 |
+
[32] Harald Koschmieder. 1924. Theorie der horizontalen Sichteweite. Beitrage zur Physik der freiem Atmosphere (1924), 33-53.
|
| 541 |
+
[33] Shiba Kuanar, Dwarikanath Mahapatra, Monalisa Bilas, and KR Rao. 2022. Multipath dilated convolution network for haze and glow removal in nighttime images. The Visual Computer 38, 3 (2022), 1121-1134.
|
| 542 |
+
[34] Anat Levin, Dani Lischinski, and Yair Weiss. 2007. A closed-form solution to natural image matting. IEEE transactions on pattern analysis and machine intelligence 30, 2 (2007), 228-242.
|
| 543 |
+
[35] Boyun Li, Yuanbiao Gou, Shuhang Gu, Jerry Zitao Liu, Joey Tianyi Zhou, and Xi Peng. 2021. You only look yourself: Unsupervised and untrained single image dehazing neural network. International Journal of Computer Vision 129, 5 (2021), 1754-1767.
|
| 544 |
+
[36] Boyun Li, Yuanbiao Gou, Jerry Zitao Liu, Hongyuan Zhu, Joel Tianyi Zhou, and Xi Peng. 2020. Zero-shot image dehazing. IEEE Transactions on Image Processing 29 (2020), 8457-8466.
|
| 545 |
+
[37] Boyi Li, Xiulian Peng, Zhangyang Wang, Jizheng Xu, and Dan Feng. 2017. Aod-net: All-in-one dehazing network. In Proceedings of the IEEE international conference on computer vision. 4770-4778.
|
| 546 |
+
[38] Lerenhan Li, Yunlong Dong, Wenqi Ren, Jinshan Pan, Changxin Gao, Nong Sang, and Ming-Hsuan Yang. 2019. Semi-supervised image dehazing. IEEE Transactions on Image Processing 29 (2019), 2766-2779.
|
| 547 |
+
[39] Runde Li, Jinshan Pan, Zechao Li, and Jinhui Tang. 2018. Single image dehazing via conditional generative adversarial network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 8202-8211.
|
| 548 |
+
[40] Xin Li, Xin Jin, Jianxin Lin, Sen Liu, Yaojun Wu, Tao Yu, Wei Zhou, and Zhibo Chen. 2020. Learning disentangled feature representation for hybrid-distorted image restoration. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX 16. Springer, 313-329.
|
| 549 |
+
[41] Xin Li, Bingchen Li, Xin Jin, Cuiling Lan, and Zhibo Chen. 2023. Learning Distortion Invariant Representation for Image Restoration from A Causality Perspective. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 1714-1724.
|
| 550 |
+
[42] Yu Li and Michael S Brown. 2014. Single image layer separation using relative smoothness. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2752-2759.
|
| 551 |
+
[43] Yi Li, Yi Chang, Yan Gao, Changfeng Yu, and Luxin Yan. 2022. Physically Disentangled Intra-and Inter-Domain Adaptation for Varicolored Haze Removal. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 5841-5850.
|
| 552 |
+
[44] Yu Li, Fangfang Guo, Robby T Tan, and Michael S Brown. 2014. A contrast enhancement framework with JPEG artifacts suppression. In European conference on computer vision. Springer, 174-188.
|
| 553 |
+
[45] Yu Li, Robby T Tan, and Michael S Brown. 2015. Nighttime haze removal with glow and multiple light colors. In Proceedings of the IEEE international conference on computer vision. 226-234.
|
| 554 |
+
[46] Zhuwen Li, Ping Tan, Robby T Tan, Danping Zou, Steven Zhiying Zhou, and Loong-Fah Cheong. 2015. Simultaneous video defogging and stereo reconstruction. In Proceedings of the IEEE conference on computer vision and pattern recognition. 4988-4997.
|
| 555 |
+
|
| 556 |
+
[47] Beibei Lin, Yeying Jin, Wending Yan, Wei Ye, Yuan Yuan, Shunli Zhang, and Robiei Tan. 2024. NightRain: Nighttime Video Deraining via Adaptive-Rain-Removal and Adaptive-Correction. arXiv:2401.00729 [cs.CV]
|
| 557 |
+
[48] Wei Liu, Xianxu Hou, Jiang Duan, and Guoping Qiu. 2020. End-to-end single image fog removal using enhanced cycle consistent adversarial networks. IEEE Transactions on Image Processing 29 (2020), 7819-7833.
|
| 558 |
+
[49] Xiaohong Liu, Yongrui Ma, Zhihao Shi, and Jun Chen. 2019. Griddehazenet: Attention-based multi-scale network for image dehazing. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 7314-7323.
|
| 559 |
+
[50] Yun Liu, Zhongsheng Yan, Sixiang Chen, Tian Ye, Wenqi Ren, and Erkang Chen. 2023. NightHazeFormer: Single Nighttime Haze Removal Using Prior Query Transformer. arXiv preprint arXiv:2305.09533 (2023).
|
| 560 |
+
[51] Yun Liu, Zhongsheng Yan, Jinge Tan, and Yuche Li. 2023. Multi-Purpose Oriented Single Nighttime Image Haze Removal Based on Unified Variational Retinex Model. IEEE Transactions on Circuits and Systems for Video Technology 33, 4 (2023), 1643-1657. https://doi.org/10.1109/TCSVT.2022.3214430
|
| 561 |
+
[52] Yun Liu, Zhongsheng Yan, Aimin Wu, Tian Ye, and Yuche Li. 2022. Nighttime image dehazing based on variational decomposition model. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition Workshop. 640-649.
|
| 562 |
+
[53] Yun Liu, Zhongsheng Yan, Tian Ye, Aimin Wu, and Yuche Li. 2022. Single nighttime image dehazing based on unified variational decomposition model and multi-scale contrast enhancement. Engineering Applications of Artificial Intelligence 116 (2022), 105373.
|
| 563 |
+
[54] Samy Metari and François Deschenes. 2007. A new convolution kernel for atmospheric point spread function applied to computer vision. In 2007 IEEE 11th international conference on computer vision. IEEE, 1-8.
|
| 564 |
+
[55] William Edgar Knowles Middleton. 1957. Vision through the atmosphere. Springer.
|
| 565 |
+
[56] Srinivasa G Narasimhan and Shree K Nayar. 2000. Chromatic framework for vision in bad weather. In Proceedings IEEE Conference on Computer Vision and Pattern Recognition. CVPR 2000 (Cat. No. PR00662), Vol. 1. IEEE, 598-605.
|
| 566 |
+
[57] Srinivasa G Narasimhan and Shree K Nayar. 2003. Shedding light on the weather. In 2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings., Vol. 1. IEEE, I-I.
|
| 567 |
+
[58] Dubok Park, David K Han, and Hanseok Ko. 2016. Nighttime image dehazing with local atmospheric light and weighted entropy. In 2016 IEEE International Conference on Image Processing (ICIP). IEEE, 2261-2265.
|
| 568 |
+
[59] Soo-Chang Pei and Tzu-Yen Lee. 2012. Nighttime haze removal using color transfer pre-processing and dark channel prior. In 2012 19th IEEE International conference on image processing. IEEE, 957-960.
|
| 569 |
+
[60] Matti Pietikainen. 2010. Local binary patterns. *Scholarpedia* 5, 3 (2010), 9775.
|
| 570 |
+
[61] Xu Qin, Zhilin Wang, Yuanchao Bai, Xiaodong Xie, and Huizhu Jia. 2020. FFA-Net: Feature fusion attention network for single image dehazing. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 34. 11908–11915.
|
| 571 |
+
[62] Yanyun Qu, Yizi Chen, Jingying Huang, and Yuan Xie. 2019. Enhanced Pix2pix Dehazing Network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 8160-8168.
|
| 572 |
+
[63] Wenqi Ren, Si Liu, Hua Zhang, Jinshan Pan, Xiaochun Cao, and Ming-Hsuan Yang. 2016. Single image dehazing via multi-scale convolutional neural networks. In European conference on computer vision. Springer, 154-169.
|
| 573 |
+
[64] Wenqi Ren, Lin Ma, Jiawei Zhang, Jinshan Pan, Xiaochun Cao, Wei Liu, and Ming-Hsuan Yang. 2018. Gated fusion network for single image dehazing. In Proceedings of the IEEE conference on computer vision and pattern recognition. 3253-3261.
|
| 574 |
+
[65] Yuanjie Shao, Lerenhan Li, Wenqi Ren, Changxin Gao, and Nong Sang. 2020. Domain Adaptation for Image Dehazing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2808-2817.
|
| 575 |
+
[66] Aashish Sharma, Loong-Fah Cheong, Lionel Heng, and Robby T Tan. 2020. Nighttime Stereo Depth Estimation using Joint Translation-Stereo Learning: Light Effects and Uninformative Regions. In 2020 International Conference on 3D Vision (3DV). IEEE, 23-31.
|
| 576 |
+
[67] Aashish Sharma and Robby T Tan. 2021. Nighttime Visibility Enhancement by Increasing the Dynamic Range and Suppression of Light Effects. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 11977-11986.
|
| 577 |
+
[68] Aashish Sharma, Robby T Tan, and Loong-Fah Cheong. 2020. Single-Image Camera Response Function Using Prediction Consistency and Gradual Refinement. In Proceedings of the Asian Conference on Computer Vision.
|
| 578 |
+
[69] Yuda Song, Zhuqing He, Hui Qian, and Xin Du. 2023. Vision transformers for single image dehazing. IEEE Transactions on Image Processing 32 (2023), 1927-1941.
|
| 579 |
+
[70] Hang Su, Varun Jampani, Deqing Sun, Orazio Gallo, Erik Learned-Miller, and Jan Kautz. 2019. Pixel-adaptive convolutional neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 11166-11175.
|
| 580 |
+
[71] Zhuo Su, Wenzhe Liu, Zitong Yu, Dewen Hu, Qing Liao, Qi Tian, Matti Pietikainen, and Li Liu. 2021. Pixel difference networks for efficient edge detection.
|
| 581 |
+
|
| 582 |
+
In Proceedings of the IEEE/CVF International Conference on Computer Vision, 5117-5127.
|
| 583 |
+
[72] Robby T Tan. 2008. Visibility in bad weather from a single image. In 2008 IEEE conference on computer vision and pattern recognition. IEEE, 1-8.
|
| 584 |
+
[73] Qunfang Tang, Jie Yang, Xiangjian He, Wenjing Jia, Qingnian Zhang, and Haibo Liu. 2021. Nighttime image dehazing based on Retinex and dark channel prior using Taylor series expansion. Computer Vision and Image Understanding 202 (2021), 103086.
|
| 585 |
+
[74] Carlo Tomasi and Roberto Manduchi. 1998. Bilateral filtering for gray and color images. In Sixth international conference on computer vision (IEEE Cat. No. 98CH36271). IEEE, 839-846.
|
| 586 |
+
[75] Jiadong Wang, Xinyuan Qian, Malu Zhang, Robby T Tan, and Haizhou Li. 2023. Seeing What You Said: Talking Face Generation Guided by a Lip Reading Expert. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 14653-14662.
|
| 587 |
+
[76] Ming Wang, Xianda Guo, Beibei Lin, Tian Yang, Zheng Zhu, Lincheng Li, Shunli Zhang, and Xin Yu. 2023. DyGait: Exploiting Dynamic Representations for High-performance Gait Recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). 13424-13433.
|
| 588 |
+
[77] Wenhui Wang, Anna Wang, and Chen Liu. 2022. Variational Single Nighttime Image Haze Removal With a Gray Haze-Line Prior. IEEE Transactions on Image Processing 31 (2022), 1349-1363.
|
| 589 |
+
[78] Yufei Wang, Renjie Wan, Wenhan Yang, Haoliang Li, Lap-Pui Chau, and Alex Kot. 2022. Low-light image enhancement with normalizing flow. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 36. 2604-2612.
|
| 590 |
+
[79] Yufei Wang, Yi Yu, Wenhan Yang, Langting Guo, Lap-Pui Chau, Alex C Kot, and Bihan Wen. 2023. ExposureDiffusion: Learning to Expose for Low-light Image Enhancement. arXiv preprint arXiv:2307.07710 (2023).
|
| 591 |
+
[80] Haiyan Wu, Yanyun Qu, Shaohui Lin, Jian Zhou, Ruizhi Qiao, Zhizhong Zhang, Yuan Xie, and Lizhuang Ma. 2021. Contrastive learning for compact single image dehazing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 10551-10560.
|
| 592 |
+
[81] Ruiqi Wu, Zhengpeng Duan, Chunle Guo, Zhi Chai, and Chongyi Li. 2023. RIDCP: Revitalizing Real Image Dehazing via High-Quality Codebook Priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition.
|
| 593 |
+
[82] Wanyu Wu, Wei Wang, Zheng Wang, Kui Jiang, and Xin Xu. 2023. From Generation to Suppression: Towards Effective Irregular Glow Removal for Nighttime Visibility Enhancement. arXiv:2307.16783 [cs.CV]
|
| 594 |
+
[83] Boxue Xiao, Zhuoran Zheng, Yunliang Zhuang, Chen Lyu, and Xiuyi Jia. 2022. Single UHD image dehazing via interpretable pyramid network. Available at SSRN 4134196 (2022).
|
| 595 |
+
[84] Zeyu Xiao, Jiawang Bai, Zhihe Lu, and Zhiwei Xiong. 2023. A dive into sam prior in image restoration. arXiv preprint arXiv:2305.13620 (2023).
|
| 596 |
+
[85] Zeyu Xiao, Xueyang Fu, Jie Huang, Zhen Cheng, and Zhiwei Xiong. 2021. Space-time distillation for video super-resolution. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2113-2122.
|
| 597 |
+
[86] Zeyu Xiao, Zhiwei Xiong, Xueyang Fu, Dong Liu, and Zheng-Jun Zha. 2020. Space-time video super-resolution using temporal profiles. In Proceedings of the 28th ACM International Conference on Multimedia. 664-672.
|
| 598 |
+
[87] Wending Yan, Robby T Tan, and Dengxin Dai. 2020. Nighttime defogging using high-low frequency decomposition and grayscale-color networks. In European Conference on Computer Vision. Springer, 473-488.
|
| 599 |
+
[88] Minmin Yang, Jianchang Liu, and Zhengguo Li. 2018. Superpixel-based single nighttime image haze removal. IEEE transactions on multimedia 20, 11 (2018), 3008-3018.
|
| 600 |
+
[89] Yang Yang, Chaoyue Wang, Risheng Liu, Lin Zhang, Xiaojie Guo, and Dacheng Tao. 2022. Self-Augmented Unpaired Image Dehazing via Density and Depth Decomposition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2037-2046.
|
| 601 |
+
[90] Tian Ye, Sixiang Chen, Yun Liu, Yi Ye, Jinbin Bai, and Erkang Chen. 2022. Towards real-time high-definition image snow removal: Efficient pyramid network with asymmetrical encoder-decoder architecture. In Proceedings of the Asian Conference on Computer Vision. 366-381.
|
| 602 |
+
[91] Tian Ye, Sixiang Chen, Yun Liu, Yi Ye, Erkang Chen, and Yuche Li. 2022. Underwater light field retention: Neural rendering for underwater imaging. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 488-497.
|
| 603 |
+
[92] Tian Ye, Yunchen Zhang, Mingchao Jiang, Liang Chen, Yun Liu, Sixiang Chen, and Erkang Chen. 2022. Perceiving and modeling density for image dehazing. In European Conference on Computer Vision. Springer, 130-145.
|
| 604 |
+
[93] Hu Yu, Naishan Zheng, Man Zhou, Jie Huang, Zeyu Xiao, and Feng Zhao. 2022. Frequency and spatial dual guidance for image dehazing. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XIX. Springer, 181-198.
|
| 605 |
+
[94] Teng Yu, Kang Song, Pu Miao, Guowei Yang, Huan Yang, and Chenglizhao Chen. 2019. Nighttime single image dehazing via pixel-wise alpha blending. IEEE Access 7 (2019), 114619-114630.
|
| 606 |
+
|
| 607 |
+
[95] He Zhang and Vishal M Patel. 2018. Densely connected pyramid dehazing network. In Proceedings of the IEEE conference on computer vision and pattern recognition. 3194-3203.
|
| 608 |
+
[96] Jing Zhang, Yang Cao, Shuai Fang, Yu Kang, and Chang Wen Chen. 2017. Fast haze removal for nighttime image using maximum reflectance prior. In Proceedings of the IEEE conference on computer vision and pattern recognition. 7418-7426.
|
| 609 |
+
[97] Jing Zhang, Yang Cao, and Zengfu Wang. 2014. Nighttime haze removal based on a new imaging model. IEEE, 4557-4561.
|
| 610 |
+
[98] Jing Zhang, Yang Cao, Zheng-Jun Zha, and Dacheng Tao. 2020. Nighttime de-hazing with a synthetic benchmark. In Proceedings of the 28th ACM International
|
| 611 |
+
|
| 612 |
+
Conference on Multimedia. 2355-2363.
|
| 613 |
+
[99] Xin Zhang and Ying-Cong Chen. 2023. Adaptive domain generalization via online disagreement minimization. IEEE Transactions on Image Processing (2023).
|
| 614 |
+
[100] Shiyu Zhao, Lin Zhang, Ying Shen, and Yicong Zhou. 2021. RefineDNet: A weakly supervised refinement framework for single image dehazing. IEEE Transactions on Image Processing 30 (2021), 3391-3404.
|
| 615 |
+
[101] Zhuoran Zheng, Wenqi Ren, Xiaochun Cao, Xiaobin Hu, Tao Wang, Fenglong Song, and Xiuyi Jia. 2021. Ultra-high-definition image dehazing via multi-guided bilateral learning. In 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). IEEE, 16180-16189.
|
| 616 |
+
[102] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. 2017. Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks. In Computer Vision (ICCV), 2017 IEEE International Conference on.
|
| 617 |
+
|
| 618 |
+
Table 4: The comparison with dataset [98], for PSNR and SSIM, higher values indicate better performance. We tested our method and Yan [87] on a GPU GTX 3090 using an image resolution of $512 \times 512$ to measure the runtime. Other numbers in the table are borrowed from [98].
|
| 619 |
+
|
| 620 |
+
<table><tr><td rowspan="2">Type</td><td rowspan="2">Method</td><td rowspan="2">Venue</td><td colspan="2">NHR</td><td colspan="2">NHM</td><td colspan="2">NHC</td><td rowspan="2">Parameters</td><td rowspan="2">Time (s)</td></tr><tr><td>PSNR ↑</td><td>SSIM ↑</td><td>PSNR ↑</td><td>SSIM ↑</td><td>PSNR ↑</td><td>SSIM ↑</td></tr><tr><td rowspan="6">Opti.</td><td>Zhang NDIM [97]</td><td>ICIP'14</td><td>14.31</td><td>0.53</td><td>14.58</td><td>0.56</td><td>11.12</td><td>0.29</td><td>-</td><td>5.63</td></tr><tr><td>Li GS [45]</td><td>ICCV'15</td><td>17.32</td><td>0.63</td><td>16.84</td><td>0.69</td><td>18.84</td><td>0.55</td><td>-</td><td>22.52</td></tr><tr><td>Zhang FAST-MRP [96]</td><td>CVPR'17</td><td>16.95</td><td>0.67</td><td>13.85</td><td>0.61</td><td>19.17</td><td>0.58</td><td>-</td><td>0.236</td></tr><tr><td>Zhang MRP [96]</td><td>CVPR'17</td><td>19.93</td><td>0.78</td><td>17.74</td><td>0.71</td><td>23.02</td><td>0.69</td><td>-</td><td>1.769</td></tr><tr><td>Zhang OSFD [98]</td><td>MM'20</td><td>21.32</td><td>0.80</td><td>19.75</td><td>0.76</td><td>23.10</td><td>0.74</td><td>-</td><td>0.576</td></tr><tr><td>Zhang NDNET [98]</td><td>MM'20</td><td>28.74</td><td>0.95</td><td>21.55</td><td>0.91</td><td>26.12</td><td>0.85</td><td>-</td><td>0.0074</td></tr><tr><td>Learning</td><td>Yan [87]</td><td>ECCV'20</td><td>21.05</td><td>0.62</td><td>17.54</td><td>0.45</td><td>15.06</td><td>0.46</td><td>50M</td><td>0.97</td></tr><tr><td>Learning</td><td>Ours</td><td>MM'23</td><td>26.56</td><td>0.89</td><td>33.76</td><td>0.92</td><td>38.86</td><td>0.97</td><td>21M</td><td>1.20</td></tr></table>
|
| 621 |
+
|
| 622 |
+

|
| 623 |
+
|
| 624 |
+

|
| 625 |
+
|
| 626 |
+

|
| 627 |
+
|
| 628 |
+

|
| 629 |
+
|
| 630 |
+

|
| 631 |
+
Figure 12: We show with APSF, we can render glow $I_{g}$ (bottom) on night clean $I_{c}$ (top).
|
| 632 |
+
|
| 633 |
+

|
| 634 |
+
|
| 635 |
+

|
| 636 |
+
|
| 637 |
+

|
| 638 |
+
|
| 639 |
+

|
| 640 |
+
|
| 641 |
+

|
| 642 |
+
|
| 643 |
+

|
| 644 |
+
|
| 645 |
+

|
| 646 |
+
|
| 647 |
+

|
| 648 |
+
Figure 13: We show with APSF, we can render glow $I_{g}$ (bottom) on night clean $I_{c}$ (top).
|
| 649 |
+
|
| 650 |
+

|
| 651 |
+
|
| 652 |
+

|
| 653 |
+
|
| 654 |
+

|
| 655 |
+
|
| 656 |
+

|
| 657 |
+
|
| 658 |
+

|
| 659 |
+
|
| 660 |
+

|
| 661 |
+
|
| 662 |
+

|
| 663 |
+
|
| 664 |
+

|
| 665 |
+
Figure 14: We show with APSF, we can render glow $I_{g}$ (bottom) on night clean $I_{c}$ (top).
|
| 666 |
+
|
| 667 |
+

|
| 668 |
+
|
| 669 |
+

|
| 670 |
+
|
| 671 |
+

|
| 672 |
+
|
| 673 |
+

|
| 674 |
+
|
| 675 |
+

|
| 676 |
+
|
| 677 |
+

|
| 678 |
+
|
| 679 |
+

|
| 680 |
+
|
| 681 |
+

|
| 682 |
+
Figure 15: We show with APSF, we can render glow $I_{g}$ (bottom) on night clean $I_{c}$ (top).
|
| 683 |
+
|
| 684 |
+

|
| 685 |
+
|
| 686 |
+

|
| 687 |
+
|
| 688 |
+

|
data/2023/2308_01xxx/2308.01738/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0cbca206dec3d536ca0dd9ca8332688ad9908117fdfa7bb1a8bf4c52c742135d
|
| 3 |
+
size 1706153
|
data/2023/2308_01xxx/2308.01738/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01760/9e41b7c3-99e0-4489-a75e-5349b467d780_content_list.json
ADDED
|
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "NulnsSeg: A Fully Annotated Dataset for Nuclei Instance Segmentation in H&E-Stained Histological Images",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
88,
|
| 8 |
+
74,
|
| 9 |
+
885,
|
| 10 |
+
167
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Amirreza Mahbod $^{1,2,*}$ , Christine Polak $^{2}$ , Katharina Feldmann $^{2}$ , Rumsha Khan $^{2}$ , Katharina Gelles $^{2}$ , Georg Dorffner $^{3}$ , Ramona Woitek $^{1}$ , Sepideh Hatamikia $^{1,4}$ , and Isabella Ellinger $^{2}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
86,
|
| 19 |
+
179,
|
| 20 |
+
906,
|
| 21 |
+
219
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "list",
|
| 27 |
+
"sub_type": "text",
|
| 28 |
+
"list_items": [
|
| 29 |
+
"<sup>1</sup>Research Center for Medical Image Analysis and Artificial Intelligence, Department of Medicine, Danube Private University, Krems an der Donau, 3500, Austria",
|
| 30 |
+
"$^{2}$ Institute for Pathophysiology and Allergy Research, Medical University of Vienna, Vienna, 1090, Austria",
|
| 31 |
+
"$^{3}$ Institute of Artificial Intelligence, Medical University of Vienna, Vienna, 1090, Austria",
|
| 32 |
+
"$^{4}$ Austrian Center for Medical Innovation and Technology, Wiener Neustadt, 2700, Austria",
|
| 33 |
+
"* corresponding author(s): Amirreza Mahbod (amirreza.mahbod@dp-uni.ac.at)"
|
| 34 |
+
],
|
| 35 |
+
"bbox": [
|
| 36 |
+
84,
|
| 37 |
+
233,
|
| 38 |
+
903,
|
| 39 |
+
325
|
| 40 |
+
],
|
| 41 |
+
"page_idx": 0
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"type": "text",
|
| 45 |
+
"text": "ABSTRACT",
|
| 46 |
+
"text_level": 1,
|
| 47 |
+
"bbox": [
|
| 48 |
+
86,
|
| 49 |
+
349,
|
| 50 |
+
200,
|
| 51 |
+
366
|
| 52 |
+
],
|
| 53 |
+
"page_idx": 0
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"type": "text",
|
| 57 |
+
"text": "In computational pathology, automatic nuclei instance segmentation plays an essential role in whole slide image analysis. While many computerized approaches have been proposed for this task, supervised deep learning (DL) methods have shown superior segmentation performances compared to classical machine learning and image processing techniques. However, these models need fully annotated datasets for training which is challenging to acquire, especially in the medical domain. In this work, we release one of the biggest fully manually annotated datasets of nuclei in Hematoxylin and Eosin (H&E)-stained histological images, called NulnsSeg. This dataset contains 665 image patches with more than 30,000 manually segmented nuclei from 31 human and mouse organs. Moreover, for the first time, we provide additional ambiguous area masks for the entire dataset. These vague areas represent the parts of the images where precise and deterministic manual annotations are impossible, even for human experts. The dataset and detailed step-by-step instructions to generate related segmentation masks are publicly available at https://www.kaggle.com/datasets/ipateam/nuinsseg and https://github.com/masih4/NuInsSeg, respectively.",
|
| 58 |
+
"bbox": [
|
| 59 |
+
89,
|
| 60 |
+
386,
|
| 61 |
+
906,
|
| 62 |
+
541
|
| 63 |
+
],
|
| 64 |
+
"page_idx": 0
|
| 65 |
+
},
|
| 66 |
+
{
|
| 67 |
+
"type": "text",
|
| 68 |
+
"text": "Background & Summary",
|
| 69 |
+
"text_level": 1,
|
| 70 |
+
"bbox": [
|
| 71 |
+
86,
|
| 72 |
+
574,
|
| 73 |
+
321,
|
| 74 |
+
593
|
| 75 |
+
],
|
| 76 |
+
"page_idx": 0
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"type": "text",
|
| 80 |
+
"text": "With the advent of brightfield and fluorescent digital scanners that produce and store whole slide images (WSIs) in digital form, there is a growing trend to exploit computerized methods for semi or fully-automatic WSI analysis<sup>1</sup>. In digital pathology and biomedical image analysis, nuclei segmentation plays a fundamental role in image interpretation<sup>2</sup>. Specific nuclei characteristics such as nuclei density or nucleus-to-cytoplasm ratio can be used for cell and tissue identification or for diagnostic purposes such as cancer grading<sup>2-4</sup>. Nuclei instance segmentation masks enable the extraction of valuable statistics for each nucleus<sup>5</sup>. While experts can manually segment nuclei, this is a tedious and complex procedure as thousands of instances can appear in a small patch of a WSI<sup>4,6</sup>. It is also worth mentioning that due to various artifacts such as folded tissues, out-of-focus scanning, considerable variations of nuclei staining intensities within a single image, and the complex nature of some histological samples (e.g., high density of nuclei), accurate and deterministic manual annotation is not always possible, even for human experts. The inter- and intraobserver variability reported in previous studies showing a low level of agreement in the annotation of cell nuclei by medical experts confirms this general problem<sup>5,7</sup>.",
|
| 81 |
+
"bbox": [
|
| 82 |
+
84,
|
| 83 |
+
599,
|
| 84 |
+
911,
|
| 85 |
+
767
|
| 86 |
+
],
|
| 87 |
+
"page_idx": 0
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"type": "text",
|
| 91 |
+
"text": "In recent years, many semi- and fully-automatic computer-based methods have been proposed to perform nuclei instance segmentation automatically and more efficiently. A wide range of approaches from classical image processing to advanced machine learning methods have been proposed for this task $^{4,7}$ . Up to this point, supervised deep learning (DL) methods such as Mask R-CNN and its variants $^{8,9}$ , distance-based methods $^{10,11}$ and multi encoder-decoder approaches $^{6,12,13}$ have shown the best instance segmentation performances. However, to train these models, fully annotated datasets are required which is difficult to acquire in the medical domain $^{4,5,14}$ .",
|
| 92 |
+
"bbox": [
|
| 93 |
+
84,
|
| 94 |
+
768,
|
| 95 |
+
910,
|
| 96 |
+
859
|
| 97 |
+
],
|
| 98 |
+
"page_idx": 0
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"type": "text",
|
| 102 |
+
"text": "A number of fully annotated nuclei instance segmentation datasets are available. These datasets were introduced for various types of staining such as Hematoxylin and Eosin (H&E), immunohistochemical and immunofluorescence stainings[4,15-17]. The most common staining type in routine pathology is H&E-staining. Therefore, most introduced datasets were based on this staining method. Although these datasets are valuable contributions to the research field and help researchers to develop better",
|
| 103 |
+
"bbox": [
|
| 104 |
+
84,
|
| 105 |
+
861,
|
| 106 |
+
911,
|
| 107 |
+
925
|
| 108 |
+
],
|
| 109 |
+
"page_idx": 0
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"type": "aside_text",
|
| 113 |
+
"text": "arXiv:2308.01760v1 [eess.IV] 3 Aug 2023",
|
| 114 |
+
"bbox": [
|
| 115 |
+
22,
|
| 116 |
+
263,
|
| 117 |
+
60,
|
| 118 |
+
715
|
| 119 |
+
],
|
| 120 |
+
"page_idx": 0
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"type": "table",
|
| 124 |
+
"img_path": "images/8bab4dff9486507268b218cd80d8bd2e51453bb251fb3a3698117e2c34ec9241.jpg",
|
| 125 |
+
"table_caption": [
|
| 126 |
+
"Table 1. Publicly available H&E-stained nuclei segmentation datasets. In the table, TCGA refers to The Cancer Genome Atlas, UHCW refers to University Hospitals Coventry and Warwickshire, and MUV refers to Medical University of Vienna. The last row of the table represents the NuInsSeg dataset introduced in this work."
|
| 127 |
+
],
|
| 128 |
+
"table_footnote": [],
|
| 129 |
+
"table_body": "<table><tr><td>dataset</td><td>vague mask</td><td># image tiles</td><td># nuclei</td><td>magnification</td><td># organs</td><td>tile size (pixels)</td><td>source</td></tr><tr><td>Kumar et al.4</td><td>X</td><td>30</td><td>21,623</td><td>40×</td><td>7</td><td>1000 × 1000</td><td>TCGA</td></tr><tr><td>MoNuSeg7</td><td>X</td><td>44</td><td>28,846</td><td>40×</td><td>9</td><td>1000 × 1000</td><td>TCGA</td></tr><tr><td>MoNuSAC17</td><td>partial</td><td>209</td><td>31,411</td><td>40×</td><td>4</td><td>81 × 113 to 1422 × 2162</td><td>TCGA</td></tr><tr><td>CoNSEP6</td><td>X</td><td>41</td><td>24,319</td><td>40×</td><td>1</td><td>1000 × 1000</td><td>UHCW</td></tr><tr><td>CPM-1524</td><td>X</td><td>15</td><td>2,905</td><td>40×, 20×</td><td>2</td><td>400 × 400, 600 × 1000</td><td>TCGA</td></tr><tr><td>CPM-1724</td><td>X</td><td>32</td><td>7,570</td><td>40×, 20×</td><td>4</td><td>500 × 500 to 600 × 600</td><td>TCGA</td></tr><tr><td>TNBC10</td><td>X</td><td>50</td><td>4,022</td><td>40×</td><td>1</td><td>512 × 512</td><td>Curie Inst.</td></tr><tr><td>CRCHisto25</td><td>X</td><td>100</td><td>29,756</td><td>20×</td><td>1</td><td>500 × 500</td><td>UHCW</td></tr><tr><td>Janowczyk26</td><td>X</td><td>143</td><td>12,000</td><td>40×</td><td>1</td><td>2000 × 2000</td><td>n/a</td></tr><tr><td>Crowdsource27</td><td>X</td><td>64</td><td>2,532</td><td>40×</td><td>1</td><td>400 × 400</td><td>TCGA</td></tr><tr><td>CryoNuSeg5</td><td>X</td><td>30</td><td>7,596</td><td>40×</td><td>10</td><td>512 × 512</td><td>TCGA</td></tr><tr><td>NuInsSeg</td><td>✓</td><td>665</td><td>30.698</td><td>40×</td><td>31</td><td>512 × 512</td><td>MUV</td></tr></table>",
|
| 130 |
+
"bbox": [
|
| 131 |
+
86,
|
| 132 |
+
135,
|
| 133 |
+
918,
|
| 134 |
+
357
|
| 135 |
+
],
|
| 136 |
+
"page_idx": 1
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"type": "text",
|
| 140 |
+
"text": "segmentation models, providing more annotated datasets from different organs and centers to cover more data variability is still of high importance. Table 1 shows the most prominent fully manually annotated H&E-stained nuclei segmentation datasets that have been actively used by the research community in the past few years. Besides these datasets, some semi-automatically generated datasets such as PanNuke $^{18}$ , Lizard $^{19}$ and Hou et al. dataset $^{20}$ have also been introduced in the past. To generate these datasets, various approaches, such as using trained backbone models or point annotation, were exploited $^{21-23}$ . However, training models based on semi-automatically generated datasets may introduce a hidden bias towards the reference model instead of learning the true human expert style for nuclei instance segmentation.",
|
| 141 |
+
"bbox": [
|
| 142 |
+
86,
|
| 143 |
+
380,
|
| 144 |
+
911,
|
| 145 |
+
487
|
| 146 |
+
],
|
| 147 |
+
"page_idx": 1
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"type": "text",
|
| 151 |
+
"text": "In this work, we introduce NuInsSeg, one of the most extensive publicly available datasets for nuclei segmentation in H&E-stained histological images. The primary statistic of this dataset is presented in the last row of Table 1. Our dataset can be used alone to develop, test, and evaluate machine learning-based algorithms for nuclei instance segmentation or can be used as an independent test set to estimate the generalization capability of the already developed nuclei instance segmentation methods.",
|
| 152 |
+
"bbox": [
|
| 153 |
+
86,
|
| 154 |
+
486,
|
| 155 |
+
911,
|
| 156 |
+
547
|
| 157 |
+
],
|
| 158 |
+
"page_idx": 1
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"type": "text",
|
| 162 |
+
"text": "Methods",
|
| 163 |
+
"text_level": 1,
|
| 164 |
+
"bbox": [
|
| 165 |
+
86,
|
| 166 |
+
564,
|
| 167 |
+
174,
|
| 168 |
+
580
|
| 169 |
+
],
|
| 170 |
+
"page_idx": 1
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"type": "text",
|
| 174 |
+
"text": "Sample preparation",
|
| 175 |
+
"text_level": 1,
|
| 176 |
+
"bbox": [
|
| 177 |
+
86,
|
| 178 |
+
589,
|
| 179 |
+
246,
|
| 180 |
+
604
|
| 181 |
+
],
|
| 182 |
+
"page_idx": 1
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"type": "text",
|
| 186 |
+
"text": "The NuInsSeg dataset contains fully annotated brightfield images for nuclei instance segmentation. The H&E-stained sections of 23 different human tissues were provided by Associate Professor Adolf Ellinger, PhD from the specimen collection of the Department of Cell Biology and Ultrastructural Research, Center for Anatomy and Cell Biology, Medical University of Vienna. We only obtained the stained tissue sections, not the original tissues. These images were only used for teaching purposes for a long time where no ethic votum applied. Some of the human tissues were formaldehyde-fixed, embedded in celloidin and sectioned at $15\\approx 20\\mu m$ (jejunum, kidney, liver, oesophagus, palatine tonsil, pancreas, placenta, salivary gland, spleen, tongue). The other human tissues were formaldehyde-fixed and paraffin-embedded (FFPE) and sectioned at $4\\approx 5\\mu m$ (cerebellum, cerebrum, colon, epiglottis, lung, melanoma, muscle, peritoneum, stomach (cardia), stomach (pylorus), testis, umbilical cord, and urinary bladder). Mouse tissue samples from bone (femur), fat (subscapularis), heart, kidney, liver, muscle (tibialis anterior muscle), spleen, and thymus were obtained from 8-week-old male C57BL/6J mice28. $4\\mu m$ sections of the FFPE tissue samples were stained with H&E (ROTH, Austria) and coverslipped with Entellan (Merck, Germany).",
|
| 187 |
+
"bbox": [
|
| 188 |
+
86,
|
| 189 |
+
604,
|
| 190 |
+
911,
|
| 191 |
+
773
|
| 192 |
+
],
|
| 193 |
+
"page_idx": 1
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"type": "text",
|
| 197 |
+
"text": "Sample acquisition",
|
| 198 |
+
"text_level": 1,
|
| 199 |
+
"bbox": [
|
| 200 |
+
86,
|
| 201 |
+
786,
|
| 202 |
+
241,
|
| 203 |
+
801
|
| 204 |
+
],
|
| 205 |
+
"page_idx": 1
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"type": "text",
|
| 209 |
+
"text": "WSIs were generated with a TissueFAXS (TissueGnostics, Austria) scanning system composed of an Axio Imager Z1 (Zeiss, Oberkochen, Germany), equipped with a Plan-Neofluar $40 \\times 0.75$ objective ( $40 \\times$ air) in combination with the TissueFAXS Image Acquisition and Management Software (Version 6.0, TissueGnostics, Austria). Images were acquired at 8-bit resolution using a colour camera (Baumer HXG40c).",
|
| 210 |
+
"bbox": [
|
| 211 |
+
86,
|
| 212 |
+
801,
|
| 213 |
+
911,
|
| 214 |
+
863
|
| 215 |
+
],
|
| 216 |
+
"page_idx": 1
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"type": "text",
|
| 220 |
+
"text": "Field of view and patch selection",
|
| 221 |
+
"text_level": 1,
|
| 222 |
+
"bbox": [
|
| 223 |
+
86,
|
| 224 |
+
876,
|
| 225 |
+
346,
|
| 226 |
+
891
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 1
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "The scanning system stores individual $2048 \\times 2048$ Field of Views (FOV) with their respective locations in order to be able to combine them into a WSI. Instead of using WSIs, we utilized the FOVs to generate the dataset. A senior cell biologist selected",
|
| 233 |
+
"bbox": [
|
| 234 |
+
86,
|
| 235 |
+
891,
|
| 236 |
+
911,
|
| 237 |
+
925
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 1
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "page_number",
|
| 243 |
+
"text": "2/7",
|
| 244 |
+
"bbox": [
|
| 245 |
+
883,
|
| 246 |
+
946,
|
| 247 |
+
906,
|
| 248 |
+
958
|
| 249 |
+
],
|
| 250 |
+
"page_idx": 1
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"type": "text",
|
| 254 |
+
"text": "the most representative FOVs for each human and mouse WSI. From each FOV, a $512 \\times 512$ pixel image was extracted by central cropping. These images were saved in lossless Portable Network Graphics (PNG) format. In total, 665 raw image patches were created to build the NuInsSeg dataset.",
|
| 255 |
+
"bbox": [
|
| 256 |
+
86,
|
| 257 |
+
80,
|
| 258 |
+
911,
|
| 259 |
+
127
|
| 260 |
+
],
|
| 261 |
+
"page_idx": 2
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"type": "text",
|
| 265 |
+
"text": "Generation of ground truth, auxiliary, and ambiguous area segmentation masks",
|
| 266 |
+
"text_level": 1,
|
| 267 |
+
"bbox": [
|
| 268 |
+
86,
|
| 269 |
+
138,
|
| 270 |
+
709,
|
| 271 |
+
154
|
| 272 |
+
],
|
| 273 |
+
"page_idx": 2
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"type": "text",
|
| 277 |
+
"text": "We used ImageJ $^{28}$ (version 1.53, National Institutes of Health, USA) to generate the ground truth segmentation masks. We followed the same procedure suggested in $^{5}$ to label nuclei. We used the region of interest (ROI) manager tool (available on the Analysis tab) and the freehand option to delineate the nuclei borders. We manually draw the nuclei border for each instance until all nuclei were segmented for a given image patch. Although some semi-automatic tools such as AnnotatorJ with U-Net backbone $^{29}$ could be used to speed up the annotation, we stuck to fully manual segmentation to prevent any hidden bias toward the semi-autonomic annotation method. The delineated ROIs were saved as a zip file, and the Matlab software (version 2020a) was then used to create binary and labeled segmentation images (as PNG files). Besides the original raw image patches, binary and labeled segmentation masks, we also publish a number of auxiliary segmentation masks that can be useful for developing computer-based segmentation models. Auxiliary segmentation masks, including border-removed binary masks, elucidation distance maps of nuclei, weighted binary masks (where higher weights are assigned in the borders of touching objects), are published along with our dataset. The developed codes to generate these masks are available on the published GitHub repository. Moreover, we annotated the ambiguous areas in all images of the dataset for the first time. Indicating ambiguous regions was partially provided in the test set of the MoNuSAC challenge $^{30}$ , but in this work, we provide it for the entire dataset. We used an identical procedure and software to create the ambiguous segmentation masks. These vague areas consist of image parts with very complex appearances where the accurate and reliable manual annotation is impossible. This is potentially helpful for in-depth analysis and evaluation of any automatic model for nuclei instance segmentation. Manual segmentation of nuclei and ambiguous areas detection were performed by three students with a background in cell biology. The annotations were then controlled by a senior cell biologist and corrected when necessary. Some example images, along with related segmentation and vague masks, are shown in Figure 1.",
|
| 278 |
+
"bbox": [
|
| 279 |
+
84,
|
| 280 |
+
155,
|
| 281 |
+
911,
|
| 282 |
+
444
|
| 283 |
+
],
|
| 284 |
+
"page_idx": 2
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"type": "image",
|
| 288 |
+
"img_path": "images/783c9f17fbb00f13f61b3b7ef44421167305e6cdb3aaed8dabb6c8301a41c08e.jpg",
|
| 289 |
+
"image_caption": [
|
| 290 |
+
"Figure 1. Example images and manual segmentation masks of three human organs from the NuInsSeg dataset. The first three columns show the original images, the labeled and the binary mask, respectively. The represented images in the fourth to sixth columns show auxiliary segmentation masks that can be beneficial for the development of segmentation algorithms. The last column shows the vague areas where accurate and deterministic manual segmentation is impossible. Some images do not contain ambiguous regions, such as the represented spleen image in the last row."
|
| 291 |
+
],
|
| 292 |
+
"image_footnote": [],
|
| 293 |
+
"bbox": [
|
| 294 |
+
96,
|
| 295 |
+
458,
|
| 296 |
+
908,
|
| 297 |
+
750
|
| 298 |
+
],
|
| 299 |
+
"page_idx": 2
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"type": "text",
|
| 303 |
+
"text": "Data Records",
|
| 304 |
+
"text_level": 1,
|
| 305 |
+
"bbox": [
|
| 306 |
+
86,
|
| 307 |
+
869,
|
| 308 |
+
218,
|
| 309 |
+
883
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 2
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"text": "The NuInsSeg dataset is publicly available on a published page on the Kaggle platform (https://www.kaggle.com/datasets/ipateam/nuinsseg). The related code to generate the binary, labeled, and auxiliary segmentation masks from",
|
| 316 |
+
"bbox": [
|
| 317 |
+
86,
|
| 318 |
+
892,
|
| 319 |
+
911,
|
| 320 |
+
922
|
| 321 |
+
],
|
| 322 |
+
"page_idx": 2
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "page_number",
|
| 326 |
+
"text": "3/7",
|
| 327 |
+
"bbox": [
|
| 328 |
+
883,
|
| 329 |
+
946,
|
| 330 |
+
906,
|
| 331 |
+
957
|
| 332 |
+
],
|
| 333 |
+
"page_idx": 2
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "table",
|
| 337 |
+
"img_path": "images/cafd95bf0030aa8658a8813cc88459be9cd536e9c3614d8da9a0e42b096109db.jpg",
|
| 338 |
+
"table_caption": [
|
| 339 |
+
"Table 2. Details of the NuInsSeg dataset per human and mouse organ"
|
| 340 |
+
],
|
| 341 |
+
"table_footnote": [],
|
| 342 |
+
"table_body": "<table><tr><td>Organ</td><td>Type</td><td># Images</td><td># Nuclei</td><td>Avg. #Nuclei per image</td></tr><tr><td>Cerebellum</td><td>human</td><td>12</td><td>549</td><td>45.8</td></tr><tr><td>Cerebrum</td><td>human</td><td>12</td><td>146</td><td>12.2</td></tr><tr><td>Colon</td><td>human</td><td>12</td><td>349</td><td>29.1</td></tr><tr><td>Epiglottis</td><td>human</td><td>11</td><td>228</td><td>20.7</td></tr><tr><td>Jejunum</td><td>human</td><td>10</td><td>874</td><td>87.4</td></tr><tr><td>Kidney</td><td>human</td><td>11</td><td>1222</td><td>111.1</td></tr><tr><td>Liver</td><td>human</td><td>40</td><td>1370</td><td>34.3</td></tr><tr><td>Lung</td><td>human</td><td>11</td><td>318</td><td>28.9</td></tr><tr><td>Melanoma</td><td>human</td><td>12</td><td>533</td><td>44.4</td></tr><tr><td>Muscle</td><td>human</td><td>9</td><td>127</td><td>14.1</td></tr><tr><td>Oesophagus</td><td>human</td><td>47</td><td>2046</td><td>43.5</td></tr><tr><td>Palatine tonsil</td><td>human</td><td>12</td><td>1045</td><td>87.1</td></tr><tr><td>Pancreas</td><td>human</td><td>44</td><td>2178</td><td>49.5</td></tr><tr><td>Peritoneum</td><td>human</td><td>12</td><td>468</td><td>39.0</td></tr><tr><td>Placenta</td><td>human</td><td>40</td><td>1966</td><td>49.2</td></tr><tr><td>Salivary gland</td><td>human</td><td>44</td><td>3129</td><td>71.1</td></tr><tr><td>Spleen</td><td>human</td><td>34</td><td>3286</td><td>96.7</td></tr><tr><td>Stomach (cardia)</td><td>human</td><td>12</td><td>671</td><td>55.9</td></tr><tr><td>Stomach (pylorus)</td><td>human</td><td>12</td><td>441</td><td>36.8</td></tr><tr><td>Testis</td><td>human</td><td>12</td><td>380</td><td>31.7</td></tr><tr><td>Tongue</td><td>human</td><td>40</td><td>1415</td><td>35.4</td></tr><tr><td>Umbilical cord</td><td>human</td><td>11</td><td>106</td><td>9.6</td></tr><tr><td>Urinary bladder</td><td>human</td><td>12</td><td>400</td><td>33.3</td></tr><tr><td>Bone (femur)</td><td>mouse</td><td>6</td><td>757</td><td>126.2</td></tr><tr><td>Fat (subscapularis)</td><td>mouse</td><td>42</td><td>549</td><td>13.1</td></tr><tr><td>Heart</td><td>mouse</td><td>28</td><td>738</td><td>26.4</td></tr><tr><td>Kidney</td><td>mouse</td><td>40</td><td>1597</td><td>39.9</td></tr><tr><td>Liver</td><td>mouse</td><td>36</td><td>646</td><td>17.9</td></tr><tr><td>Muscle (tibialis anterior muscle)</td><td>mouse</td><td>28</td><td>165</td><td>5.9</td></tr><tr><td>Spleen</td><td>mouse</td><td>7</td><td>1657</td><td>236.7</td></tr><tr><td>Thymus</td><td>mouse</td><td>6</td><td>1342</td><td>223.7</td></tr><tr><td>All</td><td>human</td><td>472</td><td>23247</td><td>49.3</td></tr><tr><td>All</td><td>mouse</td><td>193</td><td>7451</td><td>38.6</td></tr><tr><td>All</td><td>human + mouse</td><td>665</td><td>30698</td><td>46.2</td></tr></table>",
|
| 343 |
+
"bbox": [
|
| 344 |
+
88,
|
| 345 |
+
104,
|
| 346 |
+
730,
|
| 347 |
+
657
|
| 348 |
+
],
|
| 349 |
+
"page_idx": 3
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"type": "text",
|
| 353 |
+
"text": "the ImageJ ROI files is also available on the NuInsSeg published GitHub repository https://github.com/masih4/NuInsSeg. This dataset contains 665 image patches with 30,698 segmented nuclei from 31 human and mouse organs. The organ-specific details of the generated dataset are shown in Table 2. As shown in the table, the nuclei density in some tissues/organs (e.g., mouse spleen) is much higher in comparison to other tissues/organs (e.g., mouse muscle).",
|
| 354 |
+
"bbox": [
|
| 355 |
+
86,
|
| 356 |
+
678,
|
| 357 |
+
911,
|
| 358 |
+
741
|
| 359 |
+
],
|
| 360 |
+
"page_idx": 3
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"type": "text",
|
| 364 |
+
"text": "Technical Validation",
|
| 365 |
+
"text_level": 1,
|
| 366 |
+
"bbox": [
|
| 367 |
+
86,
|
| 368 |
+
755,
|
| 369 |
+
282,
|
| 370 |
+
771
|
| 371 |
+
],
|
| 372 |
+
"page_idx": 3
|
| 373 |
+
},
|
| 374 |
+
{
|
| 375 |
+
"type": "text",
|
| 376 |
+
"text": "To create a baseline segmentation benchmark, we randomly split the dataset into five folds with an equal number of images per fold (i.e., 133 images per fold). We used the Scikit-learn Python package to create the folds with a fixed random state to reproduce the results (splitting code is available on the Kaggle and Github pages). Based on the created folds, we developed a number of DL-based segmentation models and evaluated their performance based on five-fold cross-validation. To facilitate to use of our dataset and developing segmentation models, we published our codes for two standard segmentation models, namely shallow U-Net and deep U-Net models<sup>31</sup> on the Kaggle platform<sup>1</sup>. The model architectures of the shallow U-Net and deep U-Net are very similar to the original U-Net model but we added drop out layers between all convolutional layers in both encoder and decoder parts. Four and five convolutional blocks were used in the encoder and decoder parts of the shallow U-Net",
|
| 377 |
+
"bbox": [
|
| 378 |
+
86,
|
| 379 |
+
777,
|
| 380 |
+
911,
|
| 381 |
+
902
|
| 382 |
+
],
|
| 383 |
+
"page_idx": 3
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"type": "page_footnote",
|
| 387 |
+
"text": "1https://www.kaggle.com/datasets/ipateam/nuinsseg/code?datasetId=1911713",
|
| 388 |
+
"bbox": [
|
| 389 |
+
104,
|
| 390 |
+
907,
|
| 391 |
+
671,
|
| 392 |
+
924
|
| 393 |
+
],
|
| 394 |
+
"page_idx": 3
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"type": "page_number",
|
| 398 |
+
"text": "4/7",
|
| 399 |
+
"bbox": [
|
| 400 |
+
883,
|
| 401 |
+
946,
|
| 402 |
+
908,
|
| 403 |
+
958
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 3
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "table",
|
| 409 |
+
"img_path": "images/0a02ec9a65264eeda3f715f75af763be3311cf68d9f6b264b7e24a30edf3ce0c.jpg",
|
| 410 |
+
"table_caption": [
|
| 411 |
+
"Table 3. NuInsSeg segmentation benchmark results based on five-fold cross-validation"
|
| 412 |
+
],
|
| 413 |
+
"table_footnote": [],
|
| 414 |
+
"table_body": "<table><tr><td>Model</td><td>Reference</td><td># Parameters</td><td>Avg.Dice (%)</td><td>Avg. AJI (%)</td><td>Avg. PQ (%)</td></tr><tr><td>Shallow U-Net</td><td>31</td><td>1.9 million</td><td>78.8</td><td>50.5</td><td>42.7</td></tr><tr><td>Deep U-Net</td><td>31</td><td>7.7 million</td><td>79.7</td><td>49.4</td><td>40.4</td></tr><tr><td>Attention U-Net</td><td>32</td><td>2.3 million</td><td>80.5</td><td>45.7</td><td>36.4</td></tr><tr><td>Residual attention U-Net</td><td>32,33</td><td>2.4 million</td><td>81.4</td><td>46.2</td><td>36.9</td></tr><tr><td>Two-stage U-Net</td><td>34</td><td>3.9 million</td><td>76.6</td><td>52.8</td><td>47.2</td></tr><tr><td>Dual decoder U-Net</td><td>13</td><td>3.5 million</td><td>79.4</td><td>55.9</td><td>51.3</td></tr></table>",
|
| 415 |
+
"bbox": [
|
| 416 |
+
88,
|
| 417 |
+
104,
|
| 418 |
+
787,
|
| 419 |
+
218
|
| 420 |
+
],
|
| 421 |
+
"page_idx": 4
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"text": "and deep U-Net, respectively. The model architecture of these two models is publicly available at our published kernels on our NuInsSeg page on the Kaggle platform. Besides these two models, we also evaluated the performance of the attention U-Net $^{32}$ , residual attention U-Net $^{32,33}$ , two-stage U-Net $^{34}$ , and the dual decoder U-Net $^{13}$ models. The architectural details of these models were published in the respective articles. We performed an identical five-fold cross-validation scheme in all experiments to compare the results. For evaluation, we utilized similarity Dice score, aggregate Jaccard index (AJI), and panoptic quality (PQ) scores as suggested in former studies $^{5,6,35}$ . The segmentation performance of the aforementioned models is reported in Table 3. As the results show, residual attention U-Net delivers the best overall Dice score between these models, but dual decoder U-Net provides the best average AJI and PQ scores. Interestingly, the dual decoder model achieved the best overall PQ score in the MoNuSAC post challenge leaderborad $^{17,36}$ , and it also achieved the best instance-based segmentation scores for the NuInsSeg dataset. It should be noted that these results can be potentially improved by using well-known strategies such as assembling $^{37}$ , stain augmentation $^{38}$ or test time augmentation $^{39}$ but achieving the best segmentation scores is out of the focus of this study. Instead, these results could be used as baseline segmentation scores for comparison to other segmentation models in the future, given that the same five-fold cross-validation scheme is used.",
|
| 426 |
+
"bbox": [
|
| 427 |
+
86,
|
| 428 |
+
243,
|
| 429 |
+
911,
|
| 430 |
+
441
|
| 431 |
+
],
|
| 432 |
+
"page_idx": 4
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"text": "Usage Notes",
|
| 437 |
+
"text_level": 1,
|
| 438 |
+
"bbox": [
|
| 439 |
+
86,
|
| 440 |
+
460,
|
| 441 |
+
212,
|
| 442 |
+
478
|
| 443 |
+
],
|
| 444 |
+
"page_idx": 4
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"type": "text",
|
| 448 |
+
"text": "Our dataset, including raw image patches, binary and labeled segmentation masks, and other auxiliary segmentation masks, is publicly available on the published NuInsSeg page on the Kaggle platform. Step-by-step instructions to perform manual annotations and related codes to generate the main and auxiliary segmentation masks are available at our published Github repository. We also provide three kernels on the Kaggle platform to facilitate using our dataset. One kernel is devoted to explanatory data analysis (EDA), where interested researchers can visualize and explore different statistics of the NuInsSeg dataset. The other two kernels consist of related codes to perform five-fold cross-validation based on two DL-based models, namely shallow U-Net and deep U-Net, as described in the previous section. Different Python packages were used in the coding of these kernels. To report statistics and visualize data in the EDA kernel, we mainly used Pandas (version 1.3.5) and Matplotlib (version 3.5.1) Python packages. For the DL-based model development, we mainly used Tensorflow (version 2.6.2), Keras (version 2.6.0) frameworks, and finally, for performing cross-validation, pre-and post-processing, and augmentation, Scikit-learn (version 0.23.2), Scikit-image (version 0.19.1) and Albumentation (version 1.1.0) were exploited, respectively.",
|
| 449 |
+
"bbox": [
|
| 450 |
+
86,
|
| 451 |
+
484,
|
| 452 |
+
911,
|
| 453 |
+
651
|
| 454 |
+
],
|
| 455 |
+
"page_idx": 4
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"type": "text",
|
| 459 |
+
"text": "We explicitly published our dataset on the Kaggle platform, where limited free computational resources are available. Therefore, interested researchers can directly access our dataset and develop ML- or DL-based algorithms to perform nuclei instance segmentation on the NuInsSeg dataset. However, there is no limitation to downloading and saving the dataset on local systems and performing analysis using local or other cloud-based computational resources.",
|
| 460 |
+
"bbox": [
|
| 461 |
+
86,
|
| 462 |
+
651,
|
| 463 |
+
911,
|
| 464 |
+
710
|
| 465 |
+
],
|
| 466 |
+
"page_idx": 4
|
| 467 |
+
},
|
| 468 |
+
{
|
| 469 |
+
"type": "text",
|
| 470 |
+
"text": "It is worth mentioning that the NuInsSeg dataset can be used alone to train, validate, and test any segmentation algorithm, or it can be used as an independent test set to measure the generalization capability of already developed segmentation models.",
|
| 471 |
+
"bbox": [
|
| 472 |
+
86,
|
| 473 |
+
712,
|
| 474 |
+
911,
|
| 475 |
+
743
|
| 476 |
+
],
|
| 477 |
+
"page_idx": 4
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"type": "text",
|
| 481 |
+
"text": "Code availability",
|
| 482 |
+
"text_level": 1,
|
| 483 |
+
"bbox": [
|
| 484 |
+
86,
|
| 485 |
+
762,
|
| 486 |
+
246,
|
| 487 |
+
780
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 4
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"text": "The dataset and required code to generate the dataset are publicly available on Kaggle (https://www.kaggle.com/datasets/ipateam/nuinsseg) and GitHub (https://github.com/masih4/NuInsSeg), respectively.",
|
| 494 |
+
"bbox": [
|
| 495 |
+
86,
|
| 496 |
+
787,
|
| 497 |
+
911,
|
| 498 |
+
818
|
| 499 |
+
],
|
| 500 |
+
"page_idx": 4
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"text": "Acknowledgements",
|
| 505 |
+
"text_level": 1,
|
| 506 |
+
"bbox": [
|
| 507 |
+
86,
|
| 508 |
+
837,
|
| 509 |
+
277,
|
| 510 |
+
854
|
| 511 |
+
],
|
| 512 |
+
"page_idx": 4
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"type": "text",
|
| 516 |
+
"text": "This work was supported by the Austrian Research Promotion Agency (FFG), No.872636. We would like to thank NVIDIA for their generous GPU donation and the TissueGnostics support team (https://www.tissuegnostics.com/) for their valuable advice to generate the NuInsSeg dataset. Moreover, we would like to thank Adolf Ellinger (MedUni Vienna) for providing the human tissue sections and Peter Pietschmann (MedUni Vienna) who provided the mouse samples.",
|
| 517 |
+
"bbox": [
|
| 518 |
+
86,
|
| 519 |
+
861,
|
| 520 |
+
911,
|
| 521 |
+
924
|
| 522 |
+
],
|
| 523 |
+
"page_idx": 4
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"type": "page_number",
|
| 527 |
+
"text": "5/7",
|
| 528 |
+
"bbox": [
|
| 529 |
+
883,
|
| 530 |
+
946,
|
| 531 |
+
906,
|
| 532 |
+
957
|
| 533 |
+
],
|
| 534 |
+
"page_idx": 4
|
| 535 |
+
},
|
| 536 |
+
{
|
| 537 |
+
"type": "text",
|
| 538 |
+
"text": "Author contributions statement",
|
| 539 |
+
"text_level": 1,
|
| 540 |
+
"bbox": [
|
| 541 |
+
86,
|
| 542 |
+
78,
|
| 543 |
+
383,
|
| 544 |
+
95
|
| 545 |
+
],
|
| 546 |
+
"page_idx": 5
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "text",
|
| 550 |
+
"text": "A.M. and I.E. conceptualized the paper idea, K.G. prepared the H&E-stained mouse sections and scanned all tissue sections, A.M., C.L., R.K., K.F., and I.E. performed annotations and controlled the segmentation masks, I.E. obtained funding, A.M conducted the experiments and reported the results, and G.D., S.H., R.W., and I.E. supervised the entire work. All authors reviewed the manuscript.",
|
| 551 |
+
"bbox": [
|
| 552 |
+
86,
|
| 553 |
+
102,
|
| 554 |
+
911,
|
| 555 |
+
164
|
| 556 |
+
],
|
| 557 |
+
"page_idx": 5
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "text",
|
| 561 |
+
"text": "Competing interests",
|
| 562 |
+
"text_level": 1,
|
| 563 |
+
"bbox": [
|
| 564 |
+
86,
|
| 565 |
+
178,
|
| 566 |
+
282,
|
| 567 |
+
196
|
| 568 |
+
],
|
| 569 |
+
"page_idx": 5
|
| 570 |
+
},
|
| 571 |
+
{
|
| 572 |
+
"type": "text",
|
| 573 |
+
"text": "The authors declare no competing interests.",
|
| 574 |
+
"bbox": [
|
| 575 |
+
86,
|
| 576 |
+
200,
|
| 577 |
+
375,
|
| 578 |
+
217
|
| 579 |
+
],
|
| 580 |
+
"page_idx": 5
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"type": "text",
|
| 584 |
+
"text": "References",
|
| 585 |
+
"text_level": 1,
|
| 586 |
+
"bbox": [
|
| 587 |
+
89,
|
| 588 |
+
232,
|
| 589 |
+
197,
|
| 590 |
+
248
|
| 591 |
+
],
|
| 592 |
+
"page_idx": 5
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"type": "list",
|
| 596 |
+
"sub_type": "ref_text",
|
| 597 |
+
"list_items": [
|
| 598 |
+
"1. Cui, M. & Zhang, D. Y. Artificial intelligence and computational pathology. Lab. Investig. 101, 412-422, 10.1038/s41374-020-00514-0 (2021).",
|
| 599 |
+
"2. Skinner, B. M. & Johnson, E. E. Nuclear morphologies: their diversity and functional relevance. Chromosoma 126, 195-212, 10.1007/s00412-016-0614-5 (2017).",
|
| 600 |
+
"3. Chan, J. K. C. The wonderful colors of the hematoxylin-eosin stain in diagnostic surgical pathology. Int. J. Surg. Pathol. 22, 12-32, 10.1177/1066896913517939 (2014).",
|
| 601 |
+
"4. Kumar, N. et al. A dataset and a technique for generalized nuclear segmentation for computational pathology. IEEE Transactions on Med. Imaging 36, 1550-1560, 10.1109/TMI.2017.2677499 (2017).",
|
| 602 |
+
"5. Mahmod, A. et al. CryoNuSeg: A dataset for nuclei instance segmentation of cryosectioned H&E-stained histological images. Comput. Biol. Medicine 132, 104349, 10.1016/j.compbiomed.2021.104349 (2021).",
|
| 603 |
+
"6. Graham, S. et al. Hover-Net: Simultaneous segmentation and classification of nuclei in multi-tissue histology images. Med. Image Analysis 58, 101563, 10.1016/j.media.2019.101563 (2019).",
|
| 604 |
+
"7. Kumar, N. et al. A multi-organ nucleus segmentation challenge. IEEE Transactions on Med. Imaging 39, 1380–1391, 10.1109/TMI.2019.2947628 (2020).",
|
| 605 |
+
"8. He, K., Gkioxari, G., Dollár, P. & Girshick, R. Mask R-CNN. In International Conference on Computer Vision, 2980-2988 (IEEE, 2017).",
|
| 606 |
+
"9. Bancher, B., Mahbod, A., Ellinger, I., Ecker, R. & Dorffner, G. Improving Mask R-CNN for nuclei instance segmentation in hematoxylin & eosin-stained histological images. In MICCAI Workshop on Computational Pathology, vol. 156, 20-35 (2021).",
|
| 607 |
+
"10. Naylor, P., Laé, M., Reyal, F. & Walter, T. Segmentation of nuclei in histopathology images by deep regression of the distance map. IEEE Transactions on Med. Imaging 38, 448-459, 10.1109/TMI.2018.2865709 (2019).",
|
| 608 |
+
"11. Naylor, P., Laé, M., Reyal, F. & Walter, T. Nuclei segmentation in histopathology images using deep neural networks. In IEEE International Symposium on Biomedical Imaging, 933-936, 10.1109/ISBI.2017.7950669 (2017).",
|
| 609 |
+
"12. Zhao, B. et al. Triple u-net: Hematoxylin-aware nuclei segmentation with progressive dense feature aggregation. Med. Image Analysis 65, 101786, 10.1016/j.media.2020.101786 (2020).",
|
| 610 |
+
"13. Mahmod, A. et al. A dual decoder U-Net-based model for nuclei instance segmentation in hematoxylin and eosin-stained histological images. Front. Medicine 9, 10.3389/fmed.2022.978146 (2022).",
|
| 611 |
+
"14. Mahmood, F., Chen, R. & Durr, N. J. Unsupervised reverse domain adaptation for synthetic medical images via adversarial training. IEEE Transactions on Med. Imaging 37, 2572-2581, 10.1109/TMI.2018.2842767 (2018).",
|
| 612 |
+
"15. Kromp, F. et al. An annotated fluorescence image dataset for training nuclear segmentation methods. Sci. Data 7, 1-8, https://doi.org/10.1038/s41597-020-00608-w (2020).",
|
| 613 |
+
"16. Mahmod, A. et al. Investigating the impact of the bit depth of fluorescence-stained images on the performance of deep learning-based nuclei instance segmentation. Diagnostics 11, 10.3390/diagnostics11060967 (2021).",
|
| 614 |
+
"17. Verma, R. et al. MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge. IEEE Transactions on Med. Imaging 1-1, 10.1109/TMI.2021.3085712 (2021).",
|
| 615 |
+
"18. Gamper, J., Alemi Koohbanani, N., Benet, K., Khuram, A. & Rajpoot, N. PanNuke: An open pan-cancer histology dataset for nuclei instance segmentation and classification. In Reyes-Aldasoro, C. C., Janowczyk, A., Veta, M., Bankhead, P. & Sirinukunwattana, K. (eds.) Digital Pathology, 11-19, 10.1007/978-3-030-23937-4_2 (Springer International Publishing, Cham, 2019)."
|
| 616 |
+
],
|
| 617 |
+
"bbox": [
|
| 618 |
+
89,
|
| 619 |
+
255,
|
| 620 |
+
910,
|
| 621 |
+
922
|
| 622 |
+
],
|
| 623 |
+
"page_idx": 5
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "page_number",
|
| 627 |
+
"text": "6/7",
|
| 628 |
+
"bbox": [
|
| 629 |
+
883,
|
| 630 |
+
946,
|
| 631 |
+
906,
|
| 632 |
+
957
|
| 633 |
+
],
|
| 634 |
+
"page_idx": 5
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "list",
|
| 638 |
+
"sub_type": "ref_text",
|
| 639 |
+
"list_items": [
|
| 640 |
+
"19. Graham, S. et al. Lizard: A large-scale dataset for colonic nuclear instance segmentation and classification. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, 684-693 (2021).",
|
| 641 |
+
"20. Hou, L. et al. Dataset of segmented nuclei in hematoxylin and eosin stained histopathology images of ten cancer types. Sci. data 7, 1-12, 10.1038/s41597-020-0528-1 (2020).",
|
| 642 |
+
"21. Graham, S. et al. CoNIC challenge: Pushing the frontiers of nuclear detection, segmentation, classification and counting. arXiv preprint arXiv:2303.06274 (2023).",
|
| 643 |
+
"22. Lin, Y. et al. Label propagation for annotation-efficient nuclei segmentation from pathology images. arXiv preprint arXiv:2202.08195 (2022).",
|
| 644 |
+
"23. Alemi Koohbanani, N., Jahanifar, M., Zamani Tajadin, N. & Rajpoot, N. NuClick: A deep learning framework for interactive segmentation of microscopic images. Med. Image Analysis 65, 101771, 10.1016/j.media.2020.101771 (2020).",
|
| 645 |
+
"24. Vu, Q. D. et al. Methods for segmentation and classification of digital microscopy tissue images. Front. Bioeng. Biotechnol. 7, 53, 10.3389/fbioe.2019.00053 (2019).",
|
| 646 |
+
"25. Sirin, K. et al. Locality sensitive deep learning for detection and classification of nuclei in routine colon cancer histology images. IEEE Transaction on Med. Imaging 35, 1196-1206, 10.1109/TMI.2016.2525803 (2016).",
|
| 647 |
+
"26. Janowczyk, A. & Madabhushi, A. Deep learning for digital pathology image analysis: A comprehensive tutorial with selected use cases. J. Pathol. Informatics 7, 29, 10.4103/2153-3539.186902 (2016).",
|
| 648 |
+
"27. Irshad, H. et al. Crowdsourcing image annotation for nucleus detection and segmentation in computational pathology: evaluating experts, automated methods, and the crowd. In Pacific symposium on biocomputing Co-chairs, 294-305, 10.1142/9789814644730_0029 (2014).",
|
| 649 |
+
"28. Schindelin, J. et al. Fiji: an open-source platform for biological-image analysis. Nat. Methods 9, 676 (2012).",
|
| 650 |
+
"29. Hollandi, R., Diosdi, A., Hollandi, G., Moshkov, N. & Horvath, P. AnnotatorJ: an imagej plugin to ease hand annotation of cellular compartments. Mol. Biol. Cell 31, 2179-2186, 10.1091/mbc.E20-02-0156 (2020).",
|
| 651 |
+
"30. Verma, R. et al. Author's reply to \"MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge\". IEEE Transactions on Med. Imaging 41, 1000–1003, 10.1109/TMI.2022.3157048 (2022).",
|
| 652 |
+
"31. Ronneberger, O., Fischer, P. & Brox, T. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical Image Computing and Computer-Assisted Intervention, 234-241, 10.1007/978-3-319-24574-4_28 (2015).",
|
| 653 |
+
"32. Oktay, O. et al. Attention U-Net: Learning where to look for the pancreas. arXiv preprint arXiv:1804.03999 (2018).",
|
| 654 |
+
"33. He, K., Zhang, X., Ren, S. & Sun, J. Deep residual learning for image recognition. In IEEE Conference on Computer Vision and Pattern Recognition, 770-778, 10.1109/CVPR.2016.90 (2016).",
|
| 655 |
+
"34. Mahmod, A. et al. A two-stage U-Net algorithm for segmentation of nuclei in H&E-stained tissues. In European Congress on Digital Pathology, 75–82, 10.1007/978-3-030-23937-4_9 (2019).",
|
| 656 |
+
"35. Kirillov, A., He, K., Girshick, R., Rother, C. & Dollar, P. Panoptic segmentation. In Conference on Computer Vision and Pattern Recognition, 9404-9413 (2019).",
|
| 657 |
+
"36. Foucart, A., Debeir, O. & Decaestecker, C. Comments on \"MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge\". IEEE Transactions on Med. Imaging 41, 997-999, 10.1109/TMI.2022.3156023 (2022).",
|
| 658 |
+
"37. Mahmod, A., Schaefer, G., Ecker, R. & Ellinger, I. Pollen grain microscopic image classification using an ensemble of fine-tuned deep convolutional neural networks. In International Conference on Pattern Recognition, 344-356, 10.1007/978-3-030-68763-2_26 (Springer, 2021).",
|
| 659 |
+
"38. Li, F., Hu, Z., Chen, W. & Kak, A. A laplacian pyramid based generative H&E stain augmentation network. arXiv preprint arXiv:2305.14301 (2023).",
|
| 660 |
+
"39. Wang, C. et al. FUSeg: The foot ulcer segmentation challenge. arXiv preprint arXiv:2201.00414 (2022)."
|
| 661 |
+
],
|
| 662 |
+
"bbox": [
|
| 663 |
+
86,
|
| 664 |
+
80,
|
| 665 |
+
911,
|
| 666 |
+
816
|
| 667 |
+
],
|
| 668 |
+
"page_idx": 6
|
| 669 |
+
},
|
| 670 |
+
{
|
| 671 |
+
"type": "page_number",
|
| 672 |
+
"text": "7/7",
|
| 673 |
+
"bbox": [
|
| 674 |
+
883,
|
| 675 |
+
946,
|
| 676 |
+
906,
|
| 677 |
+
957
|
| 678 |
+
],
|
| 679 |
+
"page_idx": 6
|
| 680 |
+
}
|
| 681 |
+
]
|
data/2023/2308_01xxx/2308.01760/9e41b7c3-99e0-4489-a75e-5349b467d780_model.json
ADDED
|
@@ -0,0 +1,1138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.265,
|
| 8 |
+
0.061,
|
| 9 |
+
0.717
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2308.01760v1 [eess.IV] 3 Aug 2023"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.089,
|
| 18 |
+
0.075,
|
| 19 |
+
0.887,
|
| 20 |
+
0.169
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "NulnsSeg: A Fully Annotated Dataset for Nuclei Instance Segmentation in H&E-Stained Histological Images"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.087,
|
| 29 |
+
0.18,
|
| 30 |
+
0.908,
|
| 31 |
+
0.22
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Amirreza Mahbod\\(^{1,2,*}\\), Christine Polak\\(^{2}\\), Katharina Feldmann\\(^{2}\\), Rumsha Khan\\(^{2}\\), Katharina Gelles\\(^{2}\\), Georg Dorffner\\(^{3}\\), Ramona Woitek\\(^{1}\\), Sepideh Hatamikia\\(^{1,4}\\), and Isabella Ellinger\\(^{2}\\)"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.086,
|
| 40 |
+
0.234,
|
| 41 |
+
0.905,
|
| 42 |
+
0.265
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "<sup>1</sup>Research Center for Medical Image Analysis and Artificial Intelligence, Department of Medicine, Danube Private University, Krems an der Donau, 3500, Austria"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.088,
|
| 51 |
+
0.266,
|
| 52 |
+
0.842,
|
| 53 |
+
0.281
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "\\(^{2}\\)Institute for Pathophysiology and Allergy Research, Medical University of Vienna, Vienna, 1090, Austria"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.089,
|
| 62 |
+
0.281,
|
| 63 |
+
0.699,
|
| 64 |
+
0.296
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "\\(^{3}\\)Institute of Artificial Intelligence, Medical University of Vienna, Vienna, 1090, Austria"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.089,
|
| 73 |
+
0.296,
|
| 74 |
+
0.727,
|
| 75 |
+
0.311
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "\\(^{4}\\)Austrian Center for Medical Innovation and Technology, Wiener Neustadt, 2700, Austria"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.089,
|
| 84 |
+
0.311,
|
| 85 |
+
0.701,
|
| 86 |
+
0.326
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "* corresponding author(s): Amirreza Mahbod (amirreza.mahbod@dp-uni.ac.at)"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "list",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.086,
|
| 95 |
+
0.234,
|
| 96 |
+
0.905,
|
| 97 |
+
0.326
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": null
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "title",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.088,
|
| 106 |
+
0.35,
|
| 107 |
+
0.201,
|
| 108 |
+
0.367
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "ABSTRACT"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.09,
|
| 117 |
+
0.387,
|
| 118 |
+
0.908,
|
| 119 |
+
0.542
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "In computational pathology, automatic nuclei instance segmentation plays an essential role in whole slide image analysis. While many computerized approaches have been proposed for this task, supervised deep learning (DL) methods have shown superior segmentation performances compared to classical machine learning and image processing techniques. However, these models need fully annotated datasets for training which is challenging to acquire, especially in the medical domain. In this work, we release one of the biggest fully manually annotated datasets of nuclei in Hematoxylin and Eosin (H&E)-stained histological images, called NulnsSeg. This dataset contains 665 image patches with more than 30,000 manually segmented nuclei from 31 human and mouse organs. Moreover, for the first time, we provide additional ambiguous area masks for the entire dataset. These vague areas represent the parts of the images where precise and deterministic manual annotations are impossible, even for human experts. The dataset and detailed step-by-step instructions to generate related segmentation masks are publicly available at https://www.kaggle.com/datasets/ipateam/nuinsseg and https://github.com/masih4/NuInsSeg, respectively."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "title",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.088,
|
| 128 |
+
0.575,
|
| 129 |
+
0.322,
|
| 130 |
+
0.594
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Background & Summary"
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.085,
|
| 139 |
+
0.601,
|
| 140 |
+
0.912,
|
| 141 |
+
0.768
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "With the advent of brightfield and fluorescent digital scanners that produce and store whole slide images (WSIs) in digital form, there is a growing trend to exploit computerized methods for semi or fully-automatic WSI analysis<sup>1</sup>. In digital pathology and biomedical image analysis, nuclei segmentation plays a fundamental role in image interpretation<sup>2</sup>. Specific nuclei characteristics such as nuclei density or nucleus-to-cytoplasm ratio can be used for cell and tissue identification or for diagnostic purposes such as cancer grading<sup>2-4</sup>. Nuclei instance segmentation masks enable the extraction of valuable statistics for each nucleus<sup>5</sup>. While experts can manually segment nuclei, this is a tedious and complex procedure as thousands of instances can appear in a small patch of a WSI<sup>4,6</sup>. It is also worth mentioning that due to various artifacts such as folded tissues, out-of-focus scanning, considerable variations of nuclei staining intensities within a single image, and the complex nature of some histological samples (e.g., high density of nuclei), accurate and deterministic manual annotation is not always possible, even for human experts. The inter- and intraobserver variability reported in previous studies showing a low level of agreement in the annotation of cell nuclei by medical experts confirms this general problem<sup>5,7</sup>."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.086,
|
| 150 |
+
0.77,
|
| 151 |
+
0.911,
|
| 152 |
+
0.861
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "In recent years, many semi- and fully-automatic computer-based methods have been proposed to perform nuclei instance segmentation automatically and more efficiently. A wide range of approaches from classical image processing to advanced machine learning methods have been proposed for this task\\(^{4,7}\\). Up to this point, supervised deep learning (DL) methods such as Mask R-CNN and its variants\\(^{8,9}\\), distance-based methods\\(^{10,11}\\) and multi encoder-decoder approaches\\(^{6,12,13}\\) have shown the best instance segmentation performances. However, to train these models, fully annotated datasets are required which is difficult to acquire in the medical domain\\(^{4,5,14}\\)."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.086,
|
| 161 |
+
0.862,
|
| 162 |
+
0.912,
|
| 163 |
+
0.926
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "A number of fully annotated nuclei instance segmentation datasets are available. These datasets were introduced for various types of staining such as Hematoxylin and Eosin (H&E), immunohistochemical and immunofluorescence stainings[4,15-17]. The most common staining type in routine pathology is H&E-staining. Therefore, most introduced datasets were based on this staining method. Although these datasets are valuable contributions to the research field and help researchers to develop better"
|
| 167 |
+
}
|
| 168 |
+
],
|
| 169 |
+
[
|
| 170 |
+
{
|
| 171 |
+
"type": "table_caption",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.087,
|
| 174 |
+
0.079,
|
| 175 |
+
0.912,
|
| 176 |
+
0.126
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "Table 1. Publicly available H&E-stained nuclei segmentation datasets. In the table, TCGA refers to The Cancer Genome Atlas, UHCW refers to University Hospitals Coventry and Warwickshire, and MUV refers to Medical University of Vienna. The last row of the table represents the NuInsSeg dataset introduced in this work."
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "table",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.088,
|
| 185 |
+
0.136,
|
| 186 |
+
0.919,
|
| 187 |
+
0.358
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "<table><tr><td>dataset</td><td>vague mask</td><td># image tiles</td><td># nuclei</td><td>magnification</td><td># organs</td><td>tile size (pixels)</td><td>source</td></tr><tr><td>Kumar et al.4</td><td>X</td><td>30</td><td>21,623</td><td>40×</td><td>7</td><td>1000 × 1000</td><td>TCGA</td></tr><tr><td>MoNuSeg7</td><td>X</td><td>44</td><td>28,846</td><td>40×</td><td>9</td><td>1000 × 1000</td><td>TCGA</td></tr><tr><td>MoNuSAC17</td><td>partial</td><td>209</td><td>31,411</td><td>40×</td><td>4</td><td>81 × 113 to 1422 × 2162</td><td>TCGA</td></tr><tr><td>CoNSEP6</td><td>X</td><td>41</td><td>24,319</td><td>40×</td><td>1</td><td>1000 × 1000</td><td>UHCW</td></tr><tr><td>CPM-1524</td><td>X</td><td>15</td><td>2,905</td><td>40×, 20×</td><td>2</td><td>400 × 400, 600 × 1000</td><td>TCGA</td></tr><tr><td>CPM-1724</td><td>X</td><td>32</td><td>7,570</td><td>40×, 20×</td><td>4</td><td>500 × 500 to 600 × 600</td><td>TCGA</td></tr><tr><td>TNBC10</td><td>X</td><td>50</td><td>4,022</td><td>40×</td><td>1</td><td>512 × 512</td><td>Curie Inst.</td></tr><tr><td>CRCHisto25</td><td>X</td><td>100</td><td>29,756</td><td>20×</td><td>1</td><td>500 × 500</td><td>UHCW</td></tr><tr><td>Janowczyk26</td><td>X</td><td>143</td><td>12,000</td><td>40×</td><td>1</td><td>2000 × 2000</td><td>n/a</td></tr><tr><td>Crowdsource27</td><td>X</td><td>64</td><td>2,532</td><td>40×</td><td>1</td><td>400 × 400</td><td>TCGA</td></tr><tr><td>CryoNuSeg5</td><td>X</td><td>30</td><td>7,596</td><td>40×</td><td>10</td><td>512 × 512</td><td>TCGA</td></tr><tr><td>NuInsSeg</td><td>✓</td><td>665</td><td>30.698</td><td>40×</td><td>31</td><td>512 × 512</td><td>MUV</td></tr></table>"
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.087,
|
| 196 |
+
0.381,
|
| 197 |
+
0.912,
|
| 198 |
+
0.488
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "segmentation models, providing more annotated datasets from different organs and centers to cover more data variability is still of high importance. Table 1 shows the most prominent fully manually annotated H&E-stained nuclei segmentation datasets that have been actively used by the research community in the past few years. Besides these datasets, some semi-automatically generated datasets such as PanNuke\\(^{18}\\), Lizard\\(^{19}\\) and Hou et al. dataset\\(^{20}\\) have also been introduced in the past. To generate these datasets, various approaches, such as using trained backbone models or point annotation, were exploited\\(^{21-23}\\). However, training models based on semi-automatically generated datasets may introduce a hidden bias towards the reference model instead of learning the true human expert style for nuclei instance segmentation."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.087,
|
| 207 |
+
0.487,
|
| 208 |
+
0.913,
|
| 209 |
+
0.549
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "In this work, we introduce NuInsSeg, one of the most extensive publicly available datasets for nuclei segmentation in H&E-stained histological images. The primary statistic of this dataset is presented in the last row of Table 1. Our dataset can be used alone to develop, test, and evaluate machine learning-based algorithms for nuclei instance segmentation or can be used as an independent test set to estimate the generalization capability of the already developed nuclei instance segmentation methods."
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "title",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.088,
|
| 218 |
+
0.565,
|
| 219 |
+
0.176,
|
| 220 |
+
0.582
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "Methods"
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "title",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.088,
|
| 229 |
+
0.59,
|
| 230 |
+
0.248,
|
| 231 |
+
0.606
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "Sample preparation"
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.087,
|
| 240 |
+
0.606,
|
| 241 |
+
0.912,
|
| 242 |
+
0.775
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "The NuInsSeg dataset contains fully annotated brightfield images for nuclei instance segmentation. The H&E-stained sections of 23 different human tissues were provided by Associate Professor Adolf Ellinger, PhD from the specimen collection of the Department of Cell Biology and Ultrastructural Research, Center for Anatomy and Cell Biology, Medical University of Vienna. We only obtained the stained tissue sections, not the original tissues. These images were only used for teaching purposes for a long time where no ethic votum applied. Some of the human tissues were formaldehyde-fixed, embedded in celloidin and sectioned at \\(15\\approx 20\\mu m\\) (jejunum, kidney, liver, oesophagus, palatine tonsil, pancreas, placenta, salivary gland, spleen, tongue). The other human tissues were formaldehyde-fixed and paraffin-embedded (FFPE) and sectioned at \\(4\\approx 5\\mu m\\) (cerebellum, cerebrum, colon, epiglottis, lung, melanoma, muscle, peritoneum, stomach (cardia), stomach (pylorus), testis, umbilical cord, and urinary bladder). Mouse tissue samples from bone (femur), fat (subscapularis), heart, kidney, liver, muscle (tibialis anterior muscle), spleen, and thymus were obtained from 8-week-old male C57BL/6J mice28. \\(4\\mu m\\) sections of the FFPE tissue samples were stained with H&E (ROTH, Austria) and coverslipped with Entellan (Merck, Germany)."
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "title",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.088,
|
| 251 |
+
0.787,
|
| 252 |
+
0.242,
|
| 253 |
+
0.802
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "Sample acquisition"
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.087,
|
| 262 |
+
0.802,
|
| 263 |
+
0.912,
|
| 264 |
+
0.864
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "WSIs were generated with a TissueFAXS (TissueGnostics, Austria) scanning system composed of an Axio Imager Z1 (Zeiss, Oberkochen, Germany), equipped with a Plan-Neofluar \\(40 \\times 0.75\\) objective (\\(40 \\times\\) air) in combination with the TissueFAXS Image Acquisition and Management Software (Version 6.0, TissueGnostics, Austria). Images were acquired at 8-bit resolution using a colour camera (Baumer HXG40c)."
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "title",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.088,
|
| 273 |
+
0.877,
|
| 274 |
+
0.348,
|
| 275 |
+
0.892
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "Field of view and patch selection"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.087,
|
| 284 |
+
0.892,
|
| 285 |
+
0.912,
|
| 286 |
+
0.926
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "The scanning system stores individual \\(2048 \\times 2048\\) Field of Views (FOV) with their respective locations in order to be able to combine them into a WSI. Instead of using WSIs, we utilized the FOVs to generate the dataset. A senior cell biologist selected"
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "page_number",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.884,
|
| 295 |
+
0.947,
|
| 296 |
+
0.908,
|
| 297 |
+
0.959
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "2/7"
|
| 301 |
+
}
|
| 302 |
+
],
|
| 303 |
+
[
|
| 304 |
+
{
|
| 305 |
+
"type": "text",
|
| 306 |
+
"bbox": [
|
| 307 |
+
0.087,
|
| 308 |
+
0.081,
|
| 309 |
+
0.912,
|
| 310 |
+
0.128
|
| 311 |
+
],
|
| 312 |
+
"angle": 0,
|
| 313 |
+
"content": "the most representative FOVs for each human and mouse WSI. From each FOV, a \\(512 \\times 512\\) pixel image was extracted by central cropping. These images were saved in lossless Portable Network Graphics (PNG) format. In total, 665 raw image patches were created to build the NuInsSeg dataset."
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"type": "title",
|
| 317 |
+
"bbox": [
|
| 318 |
+
0.087,
|
| 319 |
+
0.14,
|
| 320 |
+
0.71,
|
| 321 |
+
0.155
|
| 322 |
+
],
|
| 323 |
+
"angle": 0,
|
| 324 |
+
"content": "Generation of ground truth, auxiliary, and ambiguous area segmentation masks"
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"type": "text",
|
| 328 |
+
"bbox": [
|
| 329 |
+
0.086,
|
| 330 |
+
0.156,
|
| 331 |
+
0.912,
|
| 332 |
+
0.445
|
| 333 |
+
],
|
| 334 |
+
"angle": 0,
|
| 335 |
+
"content": "We used ImageJ\\(^{28}\\) (version 1.53, National Institutes of Health, USA) to generate the ground truth segmentation masks. We followed the same procedure suggested in \\(^{5}\\) to label nuclei. We used the region of interest (ROI) manager tool (available on the Analysis tab) and the freehand option to delineate the nuclei borders. We manually draw the nuclei border for each instance until all nuclei were segmented for a given image patch. Although some semi-automatic tools such as AnnotatorJ with U-Net backbone\\(^{29}\\) could be used to speed up the annotation, we stuck to fully manual segmentation to prevent any hidden bias toward the semi-autonomic annotation method. The delineated ROIs were saved as a zip file, and the Matlab software (version 2020a) was then used to create binary and labeled segmentation images (as PNG files). Besides the original raw image patches, binary and labeled segmentation masks, we also publish a number of auxiliary segmentation masks that can be useful for developing computer-based segmentation models. Auxiliary segmentation masks, including border-removed binary masks, elucidation distance maps of nuclei, weighted binary masks (where higher weights are assigned in the borders of touching objects), are published along with our dataset. The developed codes to generate these masks are available on the published GitHub repository. Moreover, we annotated the ambiguous areas in all images of the dataset for the first time. Indicating ambiguous regions was partially provided in the test set of the MoNuSAC challenge\\(^{30}\\), but in this work, we provide it for the entire dataset. We used an identical procedure and software to create the ambiguous segmentation masks. These vague areas consist of image parts with very complex appearances where the accurate and reliable manual annotation is impossible. This is potentially helpful for in-depth analysis and evaluation of any automatic model for nuclei instance segmentation. Manual segmentation of nuclei and ambiguous areas detection were performed by three students with a background in cell biology. The annotations were then controlled by a senior cell biologist and corrected when necessary. Some example images, along with related segmentation and vague masks, are shown in Figure 1."
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "image",
|
| 339 |
+
"bbox": [
|
| 340 |
+
0.097,
|
| 341 |
+
0.459,
|
| 342 |
+
0.91,
|
| 343 |
+
0.751
|
| 344 |
+
],
|
| 345 |
+
"angle": 0,
|
| 346 |
+
"content": null
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "image_caption",
|
| 350 |
+
"bbox": [
|
| 351 |
+
0.087,
|
| 352 |
+
0.761,
|
| 353 |
+
0.912,
|
| 354 |
+
0.837
|
| 355 |
+
],
|
| 356 |
+
"angle": 0,
|
| 357 |
+
"content": "Figure 1. Example images and manual segmentation masks of three human organs from the NuInsSeg dataset. The first three columns show the original images, the labeled and the binary mask, respectively. The represented images in the fourth to sixth columns show auxiliary segmentation masks that can be beneficial for the development of segmentation algorithms. The last column shows the vague areas where accurate and deterministic manual segmentation is impossible. Some images do not contain ambiguous regions, such as the represented spleen image in the last row."
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "title",
|
| 361 |
+
"bbox": [
|
| 362 |
+
0.088,
|
| 363 |
+
0.87,
|
| 364 |
+
0.22,
|
| 365 |
+
0.885
|
| 366 |
+
],
|
| 367 |
+
"angle": 0,
|
| 368 |
+
"content": "Data Records"
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "text",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.087,
|
| 374 |
+
0.893,
|
| 375 |
+
0.912,
|
| 376 |
+
0.924
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "The NuInsSeg dataset is publicly available on a published page on the Kaggle platform (https://www.kaggle.com/datasets/ipateam/nuinsseg). The related code to generate the binary, labeled, and auxiliary segmentation masks from"
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "page_number",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.884,
|
| 385 |
+
0.947,
|
| 386 |
+
0.908,
|
| 387 |
+
0.958
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "3/7"
|
| 391 |
+
}
|
| 392 |
+
],
|
| 393 |
+
[
|
| 394 |
+
{
|
| 395 |
+
"type": "table_caption",
|
| 396 |
+
"bbox": [
|
| 397 |
+
0.267,
|
| 398 |
+
0.079,
|
| 399 |
+
0.731,
|
| 400 |
+
0.096
|
| 401 |
+
],
|
| 402 |
+
"angle": 0,
|
| 403 |
+
"content": "Table 2. Details of the NuInsSeg dataset per human and mouse organ"
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"type": "table",
|
| 407 |
+
"bbox": [
|
| 408 |
+
0.089,
|
| 409 |
+
0.106,
|
| 410 |
+
0.732,
|
| 411 |
+
0.658
|
| 412 |
+
],
|
| 413 |
+
"angle": 0,
|
| 414 |
+
"content": "<table><tr><td>Organ</td><td>Type</td><td># Images</td><td># Nuclei</td><td>Avg. #Nuclei per image</td></tr><tr><td>Cerebellum</td><td>human</td><td>12</td><td>549</td><td>45.8</td></tr><tr><td>Cerebrum</td><td>human</td><td>12</td><td>146</td><td>12.2</td></tr><tr><td>Colon</td><td>human</td><td>12</td><td>349</td><td>29.1</td></tr><tr><td>Epiglottis</td><td>human</td><td>11</td><td>228</td><td>20.7</td></tr><tr><td>Jejunum</td><td>human</td><td>10</td><td>874</td><td>87.4</td></tr><tr><td>Kidney</td><td>human</td><td>11</td><td>1222</td><td>111.1</td></tr><tr><td>Liver</td><td>human</td><td>40</td><td>1370</td><td>34.3</td></tr><tr><td>Lung</td><td>human</td><td>11</td><td>318</td><td>28.9</td></tr><tr><td>Melanoma</td><td>human</td><td>12</td><td>533</td><td>44.4</td></tr><tr><td>Muscle</td><td>human</td><td>9</td><td>127</td><td>14.1</td></tr><tr><td>Oesophagus</td><td>human</td><td>47</td><td>2046</td><td>43.5</td></tr><tr><td>Palatine tonsil</td><td>human</td><td>12</td><td>1045</td><td>87.1</td></tr><tr><td>Pancreas</td><td>human</td><td>44</td><td>2178</td><td>49.5</td></tr><tr><td>Peritoneum</td><td>human</td><td>12</td><td>468</td><td>39.0</td></tr><tr><td>Placenta</td><td>human</td><td>40</td><td>1966</td><td>49.2</td></tr><tr><td>Salivary gland</td><td>human</td><td>44</td><td>3129</td><td>71.1</td></tr><tr><td>Spleen</td><td>human</td><td>34</td><td>3286</td><td>96.7</td></tr><tr><td>Stomach (cardia)</td><td>human</td><td>12</td><td>671</td><td>55.9</td></tr><tr><td>Stomach (pylorus)</td><td>human</td><td>12</td><td>441</td><td>36.8</td></tr><tr><td>Testis</td><td>human</td><td>12</td><td>380</td><td>31.7</td></tr><tr><td>Tongue</td><td>human</td><td>40</td><td>1415</td><td>35.4</td></tr><tr><td>Umbilical cord</td><td>human</td><td>11</td><td>106</td><td>9.6</td></tr><tr><td>Urinary bladder</td><td>human</td><td>12</td><td>400</td><td>33.3</td></tr><tr><td>Bone (femur)</td><td>mouse</td><td>6</td><td>757</td><td>126.2</td></tr><tr><td>Fat (subscapularis)</td><td>mouse</td><td>42</td><td>549</td><td>13.1</td></tr><tr><td>Heart</td><td>mouse</td><td>28</td><td>738</td><td>26.4</td></tr><tr><td>Kidney</td><td>mouse</td><td>40</td><td>1597</td><td>39.9</td></tr><tr><td>Liver</td><td>mouse</td><td>36</td><td>646</td><td>17.9</td></tr><tr><td>Muscle (tibialis anterior muscle)</td><td>mouse</td><td>28</td><td>165</td><td>5.9</td></tr><tr><td>Spleen</td><td>mouse</td><td>7</td><td>1657</td><td>236.7</td></tr><tr><td>Thymus</td><td>mouse</td><td>6</td><td>1342</td><td>223.7</td></tr><tr><td>All</td><td>human</td><td>472</td><td>23247</td><td>49.3</td></tr><tr><td>All</td><td>mouse</td><td>193</td><td>7451</td><td>38.6</td></tr><tr><td>All</td><td>human + mouse</td><td>665</td><td>30698</td><td>46.2</td></tr></table>"
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"type": "text",
|
| 418 |
+
"bbox": [
|
| 419 |
+
0.087,
|
| 420 |
+
0.679,
|
| 421 |
+
0.912,
|
| 422 |
+
0.742
|
| 423 |
+
],
|
| 424 |
+
"angle": 0,
|
| 425 |
+
"content": "the ImageJ ROI files is also available on the NuInsSeg published GitHub repository https://github.com/masih4/NuInsSeg. This dataset contains 665 image patches with 30,698 segmented nuclei from 31 human and mouse organs. The organ-specific details of the generated dataset are shown in Table 2. As shown in the table, the nuclei density in some tissues/organs (e.g., mouse spleen) is much higher in comparison to other tissues/organs (e.g., mouse muscle)."
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"type": "title",
|
| 429 |
+
"bbox": [
|
| 430 |
+
0.088,
|
| 431 |
+
0.756,
|
| 432 |
+
0.283,
|
| 433 |
+
0.772
|
| 434 |
+
],
|
| 435 |
+
"angle": 0,
|
| 436 |
+
"content": "Technical Validation"
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"type": "text",
|
| 440 |
+
"bbox": [
|
| 441 |
+
0.087,
|
| 442 |
+
0.779,
|
| 443 |
+
0.912,
|
| 444 |
+
0.903
|
| 445 |
+
],
|
| 446 |
+
"angle": 0,
|
| 447 |
+
"content": "To create a baseline segmentation benchmark, we randomly split the dataset into five folds with an equal number of images per fold (i.e., 133 images per fold). We used the Scikit-learn Python package to create the folds with a fixed random state to reproduce the results (splitting code is available on the Kaggle and Github pages). Based on the created folds, we developed a number of DL-based segmentation models and evaluated their performance based on five-fold cross-validation. To facilitate to use of our dataset and developing segmentation models, we published our codes for two standard segmentation models, namely shallow U-Net and deep U-Net models<sup>31</sup> on the Kaggle platform<sup>1</sup>. The model architectures of the shallow U-Net and deep U-Net are very similar to the original U-Net model but we added drop out layers between all convolutional layers in both encoder and decoder parts. Four and five convolutional blocks were used in the encoder and decoder parts of the shallow U-Net"
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "page_footnote",
|
| 451 |
+
"bbox": [
|
| 452 |
+
0.106,
|
| 453 |
+
0.909,
|
| 454 |
+
0.673,
|
| 455 |
+
0.925
|
| 456 |
+
],
|
| 457 |
+
"angle": 0,
|
| 458 |
+
"content": "1https://www.kaggle.com/datasets/ipateam/nuinsseg/code?datasetId=1911713"
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "page_number",
|
| 462 |
+
"bbox": [
|
| 463 |
+
0.885,
|
| 464 |
+
0.947,
|
| 465 |
+
0.91,
|
| 466 |
+
0.959
|
| 467 |
+
],
|
| 468 |
+
"angle": 0,
|
| 469 |
+
"content": "4/7"
|
| 470 |
+
}
|
| 471 |
+
],
|
| 472 |
+
[
|
| 473 |
+
{
|
| 474 |
+
"type": "table_caption",
|
| 475 |
+
"bbox": [
|
| 476 |
+
0.209,
|
| 477 |
+
0.079,
|
| 478 |
+
0.788,
|
| 479 |
+
0.094
|
| 480 |
+
],
|
| 481 |
+
"angle": 0,
|
| 482 |
+
"content": "Table 3. NuInsSeg segmentation benchmark results based on five-fold cross-validation"
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "table",
|
| 486 |
+
"bbox": [
|
| 487 |
+
0.089,
|
| 488 |
+
0.105,
|
| 489 |
+
0.789,
|
| 490 |
+
0.219
|
| 491 |
+
],
|
| 492 |
+
"angle": 0,
|
| 493 |
+
"content": "<table><tr><td>Model</td><td>Reference</td><td># Parameters</td><td>Avg.Dice (%)</td><td>Avg. AJI (%)</td><td>Avg. PQ (%)</td></tr><tr><td>Shallow U-Net</td><td>31</td><td>1.9 million</td><td>78.8</td><td>50.5</td><td>42.7</td></tr><tr><td>Deep U-Net</td><td>31</td><td>7.7 million</td><td>79.7</td><td>49.4</td><td>40.4</td></tr><tr><td>Attention U-Net</td><td>32</td><td>2.3 million</td><td>80.5</td><td>45.7</td><td>36.4</td></tr><tr><td>Residual attention U-Net</td><td>32,33</td><td>2.4 million</td><td>81.4</td><td>46.2</td><td>36.9</td></tr><tr><td>Two-stage U-Net</td><td>34</td><td>3.9 million</td><td>76.6</td><td>52.8</td><td>47.2</td></tr><tr><td>Dual decoder U-Net</td><td>13</td><td>3.5 million</td><td>79.4</td><td>55.9</td><td>51.3</td></tr></table>"
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"type": "text",
|
| 497 |
+
"bbox": [
|
| 498 |
+
0.087,
|
| 499 |
+
0.244,
|
| 500 |
+
0.912,
|
| 501 |
+
0.442
|
| 502 |
+
],
|
| 503 |
+
"angle": 0,
|
| 504 |
+
"content": "and deep U-Net, respectively. The model architecture of these two models is publicly available at our published kernels on our NuInsSeg page on the Kaggle platform. Besides these two models, we also evaluated the performance of the attention U-Net\\(^{32}\\), residual attention U-Net\\(^{32,33}\\), two-stage U-Net\\(^{34}\\), and the dual decoder U-Net\\(^{13}\\) models. The architectural details of these models were published in the respective articles. We performed an identical five-fold cross-validation scheme in all experiments to compare the results. For evaluation, we utilized similarity Dice score, aggregate Jaccard index (AJI), and panoptic quality (PQ) scores as suggested in former studies\\(^{5,6,35}\\). The segmentation performance of the aforementioned models is reported in Table 3. As the results show, residual attention U-Net delivers the best overall Dice score between these models, but dual decoder U-Net provides the best average AJI and PQ scores. Interestingly, the dual decoder model achieved the best overall PQ score in the MoNuSAC post challenge leaderborad\\(^{17,36}\\), and it also achieved the best instance-based segmentation scores for the NuInsSeg dataset. It should be noted that these results can be potentially improved by using well-known strategies such as assembling\\(^{37}\\), stain augmentation\\(^{38}\\) or test time augmentation\\(^{39}\\) but achieving the best segmentation scores is out of the focus of this study. Instead, these results could be used as baseline segmentation scores for comparison to other segmentation models in the future, given that the same five-fold cross-validation scheme is used."
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"type": "title",
|
| 508 |
+
"bbox": [
|
| 509 |
+
0.088,
|
| 510 |
+
0.461,
|
| 511 |
+
0.214,
|
| 512 |
+
0.479
|
| 513 |
+
],
|
| 514 |
+
"angle": 0,
|
| 515 |
+
"content": "Usage Notes"
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"type": "text",
|
| 519 |
+
"bbox": [
|
| 520 |
+
0.087,
|
| 521 |
+
0.485,
|
| 522 |
+
0.912,
|
| 523 |
+
0.652
|
| 524 |
+
],
|
| 525 |
+
"angle": 0,
|
| 526 |
+
"content": "Our dataset, including raw image patches, binary and labeled segmentation masks, and other auxiliary segmentation masks, is publicly available on the published NuInsSeg page on the Kaggle platform. Step-by-step instructions to perform manual annotations and related codes to generate the main and auxiliary segmentation masks are available at our published Github repository. We also provide three kernels on the Kaggle platform to facilitate using our dataset. One kernel is devoted to explanatory data analysis (EDA), where interested researchers can visualize and explore different statistics of the NuInsSeg dataset. The other two kernels consist of related codes to perform five-fold cross-validation based on two DL-based models, namely shallow U-Net and deep U-Net, as described in the previous section. Different Python packages were used in the coding of these kernels. To report statistics and visualize data in the EDA kernel, we mainly used Pandas (version 1.3.5) and Matplotlib (version 3.5.1) Python packages. For the DL-based model development, we mainly used Tensorflow (version 2.6.2), Keras (version 2.6.0) frameworks, and finally, for performing cross-validation, pre-and post-processing, and augmentation, Scikit-learn (version 0.23.2), Scikit-image (version 0.19.1) and Albumentation (version 1.1.0) were exploited, respectively."
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "text",
|
| 530 |
+
"bbox": [
|
| 531 |
+
0.087,
|
| 532 |
+
0.652,
|
| 533 |
+
0.912,
|
| 534 |
+
0.712
|
| 535 |
+
],
|
| 536 |
+
"angle": 0,
|
| 537 |
+
"content": "We explicitly published our dataset on the Kaggle platform, where limited free computational resources are available. Therefore, interested researchers can directly access our dataset and develop ML- or DL-based algorithms to perform nuclei instance segmentation on the NuInsSeg dataset. However, there is no limitation to downloading and saving the dataset on local systems and performing analysis using local or other cloud-based computational resources."
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "text",
|
| 541 |
+
"bbox": [
|
| 542 |
+
0.087,
|
| 543 |
+
0.713,
|
| 544 |
+
0.912,
|
| 545 |
+
0.744
|
| 546 |
+
],
|
| 547 |
+
"angle": 0,
|
| 548 |
+
"content": "It is worth mentioning that the NuInsSeg dataset can be used alone to train, validate, and test any segmentation algorithm, or it can be used as an independent test set to measure the generalization capability of already developed segmentation models."
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "title",
|
| 552 |
+
"bbox": [
|
| 553 |
+
0.088,
|
| 554 |
+
0.763,
|
| 555 |
+
0.248,
|
| 556 |
+
0.781
|
| 557 |
+
],
|
| 558 |
+
"angle": 0,
|
| 559 |
+
"content": "Code availability"
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"type": "text",
|
| 563 |
+
"bbox": [
|
| 564 |
+
0.087,
|
| 565 |
+
0.788,
|
| 566 |
+
0.912,
|
| 567 |
+
0.819
|
| 568 |
+
],
|
| 569 |
+
"angle": 0,
|
| 570 |
+
"content": "The dataset and required code to generate the dataset are publicly available on Kaggle (https://www.kaggle.com/datasets/ipateam/nuinsseg) and GitHub (https://github.com/masih4/NuInsSeg), respectively."
|
| 571 |
+
},
|
| 572 |
+
{
|
| 573 |
+
"type": "title",
|
| 574 |
+
"bbox": [
|
| 575 |
+
0.088,
|
| 576 |
+
0.838,
|
| 577 |
+
0.279,
|
| 578 |
+
0.856
|
| 579 |
+
],
|
| 580 |
+
"angle": 0,
|
| 581 |
+
"content": "Acknowledgements"
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"type": "text",
|
| 585 |
+
"bbox": [
|
| 586 |
+
0.087,
|
| 587 |
+
0.862,
|
| 588 |
+
0.912,
|
| 589 |
+
0.925
|
| 590 |
+
],
|
| 591 |
+
"angle": 0,
|
| 592 |
+
"content": "This work was supported by the Austrian Research Promotion Agency (FFG), No.872636. We would like to thank NVIDIA for their generous GPU donation and the TissueGnostics support team (https://www.tissuegnostics.com/) for their valuable advice to generate the NuInsSeg dataset. Moreover, we would like to thank Adolf Ellinger (MedUni Vienna) for providing the human tissue sections and Peter Pietschmann (MedUni Vienna) who provided the mouse samples."
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"type": "page_number",
|
| 596 |
+
"bbox": [
|
| 597 |
+
0.884,
|
| 598 |
+
0.947,
|
| 599 |
+
0.908,
|
| 600 |
+
0.958
|
| 601 |
+
],
|
| 602 |
+
"angle": 0,
|
| 603 |
+
"content": "5/7"
|
| 604 |
+
}
|
| 605 |
+
],
|
| 606 |
+
[
|
| 607 |
+
{
|
| 608 |
+
"type": "title",
|
| 609 |
+
"bbox": [
|
| 610 |
+
0.088,
|
| 611 |
+
0.079,
|
| 612 |
+
0.385,
|
| 613 |
+
0.096
|
| 614 |
+
],
|
| 615 |
+
"angle": 0,
|
| 616 |
+
"content": "Author contributions statement"
|
| 617 |
+
},
|
| 618 |
+
{
|
| 619 |
+
"type": "text",
|
| 620 |
+
"bbox": [
|
| 621 |
+
0.087,
|
| 622 |
+
0.103,
|
| 623 |
+
0.912,
|
| 624 |
+
0.165
|
| 625 |
+
],
|
| 626 |
+
"angle": 0,
|
| 627 |
+
"content": "A.M. and I.E. conceptualized the paper idea, K.G. prepared the H&E-stained mouse sections and scanned all tissue sections, A.M., C.L., R.K., K.F., and I.E. performed annotations and controlled the segmentation masks, I.E. obtained funding, A.M conducted the experiments and reported the results, and G.D., S.H., R.W., and I.E. supervised the entire work. All authors reviewed the manuscript."
|
| 628 |
+
},
|
| 629 |
+
{
|
| 630 |
+
"type": "title",
|
| 631 |
+
"bbox": [
|
| 632 |
+
0.088,
|
| 633 |
+
0.179,
|
| 634 |
+
0.283,
|
| 635 |
+
0.197
|
| 636 |
+
],
|
| 637 |
+
"angle": 0,
|
| 638 |
+
"content": "Competing interests"
|
| 639 |
+
},
|
| 640 |
+
{
|
| 641 |
+
"type": "text",
|
| 642 |
+
"bbox": [
|
| 643 |
+
0.088,
|
| 644 |
+
0.202,
|
| 645 |
+
0.376,
|
| 646 |
+
0.218
|
| 647 |
+
],
|
| 648 |
+
"angle": 0,
|
| 649 |
+
"content": "The authors declare no competing interests."
|
| 650 |
+
},
|
| 651 |
+
{
|
| 652 |
+
"type": "title",
|
| 653 |
+
"bbox": [
|
| 654 |
+
0.09,
|
| 655 |
+
0.233,
|
| 656 |
+
0.199,
|
| 657 |
+
0.249
|
| 658 |
+
],
|
| 659 |
+
"angle": 0,
|
| 660 |
+
"content": "References"
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "ref_text",
|
| 664 |
+
"bbox": [
|
| 665 |
+
0.097,
|
| 666 |
+
0.256,
|
| 667 |
+
0.91,
|
| 668 |
+
0.288
|
| 669 |
+
],
|
| 670 |
+
"angle": 0,
|
| 671 |
+
"content": "1. Cui, M. & Zhang, D. Y. Artificial intelligence and computational pathology. Lab. Investig. 101, 412-422, 10.1038/s41374-020-00514-0 (2021)."
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"type": "ref_text",
|
| 675 |
+
"bbox": [
|
| 676 |
+
0.096,
|
| 677 |
+
0.291,
|
| 678 |
+
0.911,
|
| 679 |
+
0.323
|
| 680 |
+
],
|
| 681 |
+
"angle": 0,
|
| 682 |
+
"content": "2. Skinner, B. M. & Johnson, E. E. Nuclear morphologies: their diversity and functional relevance. Chromosoma 126, 195-212, 10.1007/s00412-016-0614-5 (2017)."
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"type": "ref_text",
|
| 686 |
+
"bbox": [
|
| 687 |
+
0.096,
|
| 688 |
+
0.326,
|
| 689 |
+
0.911,
|
| 690 |
+
0.357
|
| 691 |
+
],
|
| 692 |
+
"angle": 0,
|
| 693 |
+
"content": "3. Chan, J. K. C. The wonderful colors of the hematoxylin-eosin stain in diagnostic surgical pathology. Int. J. Surg. Pathol. 22, 12-32, 10.1177/1066896913517939 (2014)."
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"type": "ref_text",
|
| 697 |
+
"bbox": [
|
| 698 |
+
0.096,
|
| 699 |
+
0.36,
|
| 700 |
+
0.911,
|
| 701 |
+
0.393
|
| 702 |
+
],
|
| 703 |
+
"angle": 0,
|
| 704 |
+
"content": "4. Kumar, N. et al. A dataset and a technique for generalized nuclear segmentation for computational pathology. IEEE Transactions on Med. Imaging 36, 1550-1560, 10.1109/TMI.2017.2677499 (2017)."
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"type": "ref_text",
|
| 708 |
+
"bbox": [
|
| 709 |
+
0.096,
|
| 710 |
+
0.395,
|
| 711 |
+
0.911,
|
| 712 |
+
0.427
|
| 713 |
+
],
|
| 714 |
+
"angle": 0,
|
| 715 |
+
"content": "5. Mahmod, A. et al. CryoNuSeg: A dataset for nuclei instance segmentation of cryosectioned H&E-stained histological images. Comput. Biol. Medicine 132, 104349, 10.1016/j.compbiomed.2021.104349 (2021)."
|
| 716 |
+
},
|
| 717 |
+
{
|
| 718 |
+
"type": "ref_text",
|
| 719 |
+
"bbox": [
|
| 720 |
+
0.096,
|
| 721 |
+
0.43,
|
| 722 |
+
0.911,
|
| 723 |
+
0.462
|
| 724 |
+
],
|
| 725 |
+
"angle": 0,
|
| 726 |
+
"content": "6. Graham, S. et al. Hover-Net: Simultaneous segmentation and classification of nuclei in multi-tissue histology images. Med. Image Analysis 58, 101563, 10.1016/j.media.2019.101563 (2019)."
|
| 727 |
+
},
|
| 728 |
+
{
|
| 729 |
+
"type": "ref_text",
|
| 730 |
+
"bbox": [
|
| 731 |
+
0.096,
|
| 732 |
+
0.464,
|
| 733 |
+
0.911,
|
| 734 |
+
0.496
|
| 735 |
+
],
|
| 736 |
+
"angle": 0,
|
| 737 |
+
"content": "7. Kumar, N. et al. A multi-organ nucleus segmentation challenge. IEEE Transactions on Med. Imaging 39, 1380–1391, 10.1109/TMI.2019.2947628 (2020)."
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"type": "ref_text",
|
| 741 |
+
"bbox": [
|
| 742 |
+
0.096,
|
| 743 |
+
0.499,
|
| 744 |
+
0.911,
|
| 745 |
+
0.531
|
| 746 |
+
],
|
| 747 |
+
"angle": 0,
|
| 748 |
+
"content": "8. He, K., Gkioxari, G., Dollár, P. & Girshick, R. Mask R-CNN. In International Conference on Computer Vision, 2980-2988 (IEEE, 2017)."
|
| 749 |
+
},
|
| 750 |
+
{
|
| 751 |
+
"type": "ref_text",
|
| 752 |
+
"bbox": [
|
| 753 |
+
0.096,
|
| 754 |
+
0.534,
|
| 755 |
+
0.911,
|
| 756 |
+
0.58
|
| 757 |
+
],
|
| 758 |
+
"angle": 0,
|
| 759 |
+
"content": "9. Bancher, B., Mahbod, A., Ellinger, I., Ecker, R. & Dorffner, G. Improving Mask R-CNN for nuclei instance segmentation in hematoxylin & eosin-stained histological images. In MICCAI Workshop on Computational Pathology, vol. 156, 20-35 (2021)."
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "ref_text",
|
| 763 |
+
"bbox": [
|
| 764 |
+
0.09,
|
| 765 |
+
0.584,
|
| 766 |
+
0.911,
|
| 767 |
+
0.616
|
| 768 |
+
],
|
| 769 |
+
"angle": 0,
|
| 770 |
+
"content": "10. Naylor, P., Laé, M., Reyal, F. & Walter, T. Segmentation of nuclei in histopathology images by deep regression of the distance map. IEEE Transactions on Med. Imaging 38, 448-459, 10.1109/TMI.2018.2865709 (2019)."
|
| 771 |
+
},
|
| 772 |
+
{
|
| 773 |
+
"type": "ref_text",
|
| 774 |
+
"bbox": [
|
| 775 |
+
0.09,
|
| 776 |
+
0.618,
|
| 777 |
+
0.911,
|
| 778 |
+
0.651
|
| 779 |
+
],
|
| 780 |
+
"angle": 0,
|
| 781 |
+
"content": "11. Naylor, P., Laé, M., Reyal, F. & Walter, T. Nuclei segmentation in histopathology images using deep neural networks. In IEEE International Symposium on Biomedical Imaging, 933-936, 10.1109/ISBI.2017.7950669 (2017)."
|
| 782 |
+
},
|
| 783 |
+
{
|
| 784 |
+
"type": "ref_text",
|
| 785 |
+
"bbox": [
|
| 786 |
+
0.09,
|
| 787 |
+
0.654,
|
| 788 |
+
0.911,
|
| 789 |
+
0.685
|
| 790 |
+
],
|
| 791 |
+
"angle": 0,
|
| 792 |
+
"content": "12. Zhao, B. et al. Triple u-net: Hematoxylin-aware nuclei segmentation with progressive dense feature aggregation. Med. Image Analysis 65, 101786, 10.1016/j.media.2020.101786 (2020)."
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "ref_text",
|
| 796 |
+
"bbox": [
|
| 797 |
+
0.09,
|
| 798 |
+
0.688,
|
| 799 |
+
0.911,
|
| 800 |
+
0.719
|
| 801 |
+
],
|
| 802 |
+
"angle": 0,
|
| 803 |
+
"content": "13. Mahmod, A. et al. A dual decoder U-Net-based model for nuclei instance segmentation in hematoxylin and eosin-stained histological images. Front. Medicine 9, 10.3389/fmed.2022.978146 (2022)."
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "ref_text",
|
| 807 |
+
"bbox": [
|
| 808 |
+
0.09,
|
| 809 |
+
0.723,
|
| 810 |
+
0.911,
|
| 811 |
+
0.755
|
| 812 |
+
],
|
| 813 |
+
"angle": 0,
|
| 814 |
+
"content": "14. Mahmood, F., Chen, R. & Durr, N. J. Unsupervised reverse domain adaptation for synthetic medical images via adversarial training. IEEE Transactions on Med. Imaging 37, 2572-2581, 10.1109/TMI.2018.2842767 (2018)."
|
| 815 |
+
},
|
| 816 |
+
{
|
| 817 |
+
"type": "ref_text",
|
| 818 |
+
"bbox": [
|
| 819 |
+
0.09,
|
| 820 |
+
0.758,
|
| 821 |
+
0.911,
|
| 822 |
+
0.789
|
| 823 |
+
],
|
| 824 |
+
"angle": 0,
|
| 825 |
+
"content": "15. Kromp, F. et al. An annotated fluorescence image dataset for training nuclear segmentation methods. Sci. Data 7, 1-8, https://doi.org/10.1038/s41597-020-00608-w (2020)."
|
| 826 |
+
},
|
| 827 |
+
{
|
| 828 |
+
"type": "ref_text",
|
| 829 |
+
"bbox": [
|
| 830 |
+
0.09,
|
| 831 |
+
0.792,
|
| 832 |
+
0.911,
|
| 833 |
+
0.824
|
| 834 |
+
],
|
| 835 |
+
"angle": 0,
|
| 836 |
+
"content": "16. Mahmod, A. et al. Investigating the impact of the bit depth of fluorescence-stained images on the performance of deep learning-based nuclei instance segmentation. Diagnostics 11, 10.3390/diagnostics11060967 (2021)."
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "ref_text",
|
| 840 |
+
"bbox": [
|
| 841 |
+
0.09,
|
| 842 |
+
0.827,
|
| 843 |
+
0.911,
|
| 844 |
+
0.859
|
| 845 |
+
],
|
| 846 |
+
"angle": 0,
|
| 847 |
+
"content": "17. Verma, R. et al. MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge. IEEE Transactions on Med. Imaging 1-1, 10.1109/TMI.2021.3085712 (2021)."
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "ref_text",
|
| 851 |
+
"bbox": [
|
| 852 |
+
0.09,
|
| 853 |
+
0.862,
|
| 854 |
+
0.911,
|
| 855 |
+
0.923
|
| 856 |
+
],
|
| 857 |
+
"angle": 0,
|
| 858 |
+
"content": "18. Gamper, J., Alemi Koohbanani, N., Benet, K., Khuram, A. & Rajpoot, N. PanNuke: An open pan-cancer histology dataset for nuclei instance segmentation and classification. In Reyes-Aldasoro, C. C., Janowczyk, A., Veta, M., Bankhead, P. & Sirinukunwattana, K. (eds.) Digital Pathology, 11-19, 10.1007/978-3-030-23937-4_2 (Springer International Publishing, Cham, 2019)."
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "list",
|
| 862 |
+
"bbox": [
|
| 863 |
+
0.09,
|
| 864 |
+
0.256,
|
| 865 |
+
0.911,
|
| 866 |
+
0.923
|
| 867 |
+
],
|
| 868 |
+
"angle": 0,
|
| 869 |
+
"content": null
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "page_number",
|
| 873 |
+
"bbox": [
|
| 874 |
+
0.884,
|
| 875 |
+
0.947,
|
| 876 |
+
0.908,
|
| 877 |
+
0.958
|
| 878 |
+
],
|
| 879 |
+
"angle": 0,
|
| 880 |
+
"content": "6/7"
|
| 881 |
+
}
|
| 882 |
+
],
|
| 883 |
+
[
|
| 884 |
+
{
|
| 885 |
+
"type": "ref_text",
|
| 886 |
+
"bbox": [
|
| 887 |
+
0.088,
|
| 888 |
+
0.081,
|
| 889 |
+
0.909,
|
| 890 |
+
0.113
|
| 891 |
+
],
|
| 892 |
+
"angle": 0,
|
| 893 |
+
"content": "19. Graham, S. et al. Lizard: A large-scale dataset for colonic nuclear instance segmentation and classification. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, 684-693 (2021)."
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "ref_text",
|
| 897 |
+
"bbox": [
|
| 898 |
+
0.088,
|
| 899 |
+
0.116,
|
| 900 |
+
0.912,
|
| 901 |
+
0.147
|
| 902 |
+
],
|
| 903 |
+
"angle": 0,
|
| 904 |
+
"content": "20. Hou, L. et al. Dataset of segmented nuclei in hematoxylin and eosin stained histopathology images of ten cancer types. Sci. data 7, 1-12, 10.1038/s41597-020-0528-1 (2020)."
|
| 905 |
+
},
|
| 906 |
+
{
|
| 907 |
+
"type": "ref_text",
|
| 908 |
+
"bbox": [
|
| 909 |
+
0.089,
|
| 910 |
+
0.151,
|
| 911 |
+
0.911,
|
| 912 |
+
0.182
|
| 913 |
+
],
|
| 914 |
+
"angle": 0,
|
| 915 |
+
"content": "21. Graham, S. et al. CoNIC challenge: Pushing the frontiers of nuclear detection, segmentation, classification and counting. arXiv preprint arXiv:2303.06274 (2023)."
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "ref_text",
|
| 919 |
+
"bbox": [
|
| 920 |
+
0.09,
|
| 921 |
+
0.187,
|
| 922 |
+
0.91,
|
| 923 |
+
0.217
|
| 924 |
+
],
|
| 925 |
+
"angle": 0,
|
| 926 |
+
"content": "22. Lin, Y. et al. Label propagation for annotation-efficient nuclei segmentation from pathology images. arXiv preprint arXiv:2202.08195 (2022)."
|
| 927 |
+
},
|
| 928 |
+
{
|
| 929 |
+
"type": "ref_text",
|
| 930 |
+
"bbox": [
|
| 931 |
+
0.091,
|
| 932 |
+
0.222,
|
| 933 |
+
0.912,
|
| 934 |
+
0.254
|
| 935 |
+
],
|
| 936 |
+
"angle": 0,
|
| 937 |
+
"content": "23. Alemi Koohbanani, N., Jahanifar, M., Zamani Tajadin, N. & Rajpoot, N. NuClick: A deep learning framework for interactive segmentation of microscopic images. Med. Image Analysis 65, 101771, 10.1016/j.media.2020.101771 (2020)."
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "ref_text",
|
| 941 |
+
"bbox": [
|
| 942 |
+
0.09,
|
| 943 |
+
0.257,
|
| 944 |
+
0.911,
|
| 945 |
+
0.288
|
| 946 |
+
],
|
| 947 |
+
"angle": 0,
|
| 948 |
+
"content": "24. Vu, Q. D. et al. Methods for segmentation and classification of digital microscopy tissue images. Front. Bioeng. Biotechnol. 7, 53, 10.3389/fbioe.2019.00053 (2019)."
|
| 949 |
+
},
|
| 950 |
+
{
|
| 951 |
+
"type": "ref_text",
|
| 952 |
+
"bbox": [
|
| 953 |
+
0.09,
|
| 954 |
+
0.293,
|
| 955 |
+
0.91,
|
| 956 |
+
0.323
|
| 957 |
+
],
|
| 958 |
+
"angle": 0,
|
| 959 |
+
"content": "25. Sirin, K. et al. Locality sensitive deep learning for detection and classification of nuclei in routine colon cancer histology images. IEEE Transaction on Med. Imaging 35, 1196-1206, 10.1109/TMI.2016.2525803 (2016)."
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "ref_text",
|
| 963 |
+
"bbox": [
|
| 964 |
+
0.09,
|
| 965 |
+
0.327,
|
| 966 |
+
0.908,
|
| 967 |
+
0.358
|
| 968 |
+
],
|
| 969 |
+
"angle": 0,
|
| 970 |
+
"content": "26. Janowczyk, A. & Madabhushi, A. Deep learning for digital pathology image analysis: A comprehensive tutorial with selected use cases. J. Pathol. Informatics 7, 29, 10.4103/2153-3539.186902 (2016)."
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "ref_text",
|
| 974 |
+
"bbox": [
|
| 975 |
+
0.09,
|
| 976 |
+
0.363,
|
| 977 |
+
0.91,
|
| 978 |
+
0.408
|
| 979 |
+
],
|
| 980 |
+
"angle": 0,
|
| 981 |
+
"content": "27. Irshad, H. et al. Crowdsourcing image annotation for nucleus detection and segmentation in computational pathology: evaluating experts, automated methods, and the crowd. In Pacific symposium on biocomputing Co-chairs, 294-305, 10.1142/9789814644730_0029 (2014)."
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"type": "ref_text",
|
| 985 |
+
"bbox": [
|
| 986 |
+
0.09,
|
| 987 |
+
0.413,
|
| 988 |
+
0.834,
|
| 989 |
+
0.429
|
| 990 |
+
],
|
| 991 |
+
"angle": 0,
|
| 992 |
+
"content": "28. Schindelin, J. et al. Fiji: an open-source platform for biological-image analysis. Nat. Methods 9, 676 (2012)."
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "ref_text",
|
| 996 |
+
"bbox": [
|
| 997 |
+
0.09,
|
| 998 |
+
0.434,
|
| 999 |
+
0.91,
|
| 1000 |
+
0.464
|
| 1001 |
+
],
|
| 1002 |
+
"angle": 0,
|
| 1003 |
+
"content": "29. Hollandi, R., Diosdi, A., Hollandi, G., Moshkov, N. & Horvath, P. AnnotatorJ: an imagej plugin to ease hand annotation of cellular compartments. Mol. Biol. Cell 31, 2179-2186, 10.1091/mbc.E20-02-0156 (2020)."
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"type": "ref_text",
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
0.09,
|
| 1009 |
+
0.469,
|
| 1010 |
+
0.91,
|
| 1011 |
+
0.499
|
| 1012 |
+
],
|
| 1013 |
+
"angle": 0,
|
| 1014 |
+
"content": "30. Verma, R. et al. Author's reply to \"MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge\". IEEE Transactions on Med. Imaging 41, 1000–1003, 10.1109/TMI.2022.3157048 (2022)."
|
| 1015 |
+
},
|
| 1016 |
+
{
|
| 1017 |
+
"type": "ref_text",
|
| 1018 |
+
"bbox": [
|
| 1019 |
+
0.089,
|
| 1020 |
+
0.504,
|
| 1021 |
+
0.91,
|
| 1022 |
+
0.549
|
| 1023 |
+
],
|
| 1024 |
+
"angle": 0,
|
| 1025 |
+
"content": "31. Ronneberger, O., Fischer, P. & Brox, T. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical Image Computing and Computer-Assisted Intervention, 234-241, 10.1007/978-3-319-24574-4_28 (2015)."
|
| 1026 |
+
},
|
| 1027 |
+
{
|
| 1028 |
+
"type": "ref_text",
|
| 1029 |
+
"bbox": [
|
| 1030 |
+
0.09,
|
| 1031 |
+
0.554,
|
| 1032 |
+
0.881,
|
| 1033 |
+
0.57
|
| 1034 |
+
],
|
| 1035 |
+
"angle": 0,
|
| 1036 |
+
"content": "32. Oktay, O. et al. Attention U-Net: Learning where to look for the pancreas. arXiv preprint arXiv:1804.03999 (2018)."
|
| 1037 |
+
},
|
| 1038 |
+
{
|
| 1039 |
+
"type": "ref_text",
|
| 1040 |
+
"bbox": [
|
| 1041 |
+
0.09,
|
| 1042 |
+
0.574,
|
| 1043 |
+
0.909,
|
| 1044 |
+
0.605
|
| 1045 |
+
],
|
| 1046 |
+
"angle": 0,
|
| 1047 |
+
"content": "33. He, K., Zhang, X., Ren, S. & Sun, J. Deep residual learning for image recognition. In IEEE Conference on Computer Vision and Pattern Recognition, 770-778, 10.1109/CVPR.2016.90 (2016)."
|
| 1048 |
+
},
|
| 1049 |
+
{
|
| 1050 |
+
"type": "ref_text",
|
| 1051 |
+
"bbox": [
|
| 1052 |
+
0.09,
|
| 1053 |
+
0.61,
|
| 1054 |
+
0.909,
|
| 1055 |
+
0.64
|
| 1056 |
+
],
|
| 1057 |
+
"angle": 0,
|
| 1058 |
+
"content": "34. Mahmod, A. et al. A two-stage U-Net algorithm for segmentation of nuclei in H&E-stained tissues. In European Congress on Digital Pathology, 75–82, 10.1007/978-3-030-23937-4_9 (2019)."
|
| 1059 |
+
},
|
| 1060 |
+
{
|
| 1061 |
+
"type": "ref_text",
|
| 1062 |
+
"bbox": [
|
| 1063 |
+
0.09,
|
| 1064 |
+
0.645,
|
| 1065 |
+
0.91,
|
| 1066 |
+
0.675
|
| 1067 |
+
],
|
| 1068 |
+
"angle": 0,
|
| 1069 |
+
"content": "35. Kirillov, A., He, K., Girshick, R., Rother, C. & Dollar, P. Panoptic segmentation. In Conference on Computer Vision and Pattern Recognition, 9404-9413 (2019)."
|
| 1070 |
+
},
|
| 1071 |
+
{
|
| 1072 |
+
"type": "ref_text",
|
| 1073 |
+
"bbox": [
|
| 1074 |
+
0.09,
|
| 1075 |
+
0.68,
|
| 1076 |
+
0.909,
|
| 1077 |
+
0.711
|
| 1078 |
+
],
|
| 1079 |
+
"angle": 0,
|
| 1080 |
+
"content": "36. Foucart, A., Debeir, O. & Decaestecker, C. Comments on \"MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge\". IEEE Transactions on Med. Imaging 41, 997-999, 10.1109/TMI.2022.3156023 (2022)."
|
| 1081 |
+
},
|
| 1082 |
+
{
|
| 1083 |
+
"type": "ref_text",
|
| 1084 |
+
"bbox": [
|
| 1085 |
+
0.09,
|
| 1086 |
+
0.716,
|
| 1087 |
+
0.91,
|
| 1088 |
+
0.761
|
| 1089 |
+
],
|
| 1090 |
+
"angle": 0,
|
| 1091 |
+
"content": "37. Mahmod, A., Schaefer, G., Ecker, R. & Ellinger, I. Pollen grain microscopic image classification using an ensemble of fine-tuned deep convolutional neural networks. In International Conference on Pattern Recognition, 344-356, 10.1007/978-3-030-68763-2_26 (Springer, 2021)."
|
| 1092 |
+
},
|
| 1093 |
+
{
|
| 1094 |
+
"type": "ref_text",
|
| 1095 |
+
"bbox": [
|
| 1096 |
+
0.09,
|
| 1097 |
+
0.766,
|
| 1098 |
+
0.909,
|
| 1099 |
+
0.796
|
| 1100 |
+
],
|
| 1101 |
+
"angle": 0,
|
| 1102 |
+
"content": "38. Li, F., Hu, Z., Chen, W. & Kak, A. A laplacian pyramid based generative H&E stain augmentation network. arXiv preprint arXiv:2305.14301 (2023)."
|
| 1103 |
+
},
|
| 1104 |
+
{
|
| 1105 |
+
"type": "ref_text",
|
| 1106 |
+
"bbox": [
|
| 1107 |
+
0.09,
|
| 1108 |
+
0.801,
|
| 1109 |
+
0.807,
|
| 1110 |
+
0.817
|
| 1111 |
+
],
|
| 1112 |
+
"angle": 0,
|
| 1113 |
+
"content": "39. Wang, C. et al. FUSeg: The foot ulcer segmentation challenge. arXiv preprint arXiv:2201.00414 (2022)."
|
| 1114 |
+
},
|
| 1115 |
+
{
|
| 1116 |
+
"type": "list",
|
| 1117 |
+
"bbox": [
|
| 1118 |
+
0.088,
|
| 1119 |
+
0.081,
|
| 1120 |
+
0.912,
|
| 1121 |
+
0.817
|
| 1122 |
+
],
|
| 1123 |
+
"angle": 0,
|
| 1124 |
+
"content": null
|
| 1125 |
+
},
|
| 1126 |
+
{
|
| 1127 |
+
"type": "page_number",
|
| 1128 |
+
"bbox": [
|
| 1129 |
+
0.884,
|
| 1130 |
+
0.947,
|
| 1131 |
+
0.908,
|
| 1132 |
+
0.958
|
| 1133 |
+
],
|
| 1134 |
+
"angle": 0,
|
| 1135 |
+
"content": "7/7"
|
| 1136 |
+
}
|
| 1137 |
+
]
|
| 1138 |
+
]
|
data/2023/2308_01xxx/2308.01760/9e41b7c3-99e0-4489-a75e-5349b467d780_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:92c26526274676930967898bc3d07f0df9d6322c3cba670d0a711a7f4abb2d20
|
| 3 |
+
size 1231460
|
data/2023/2308_01xxx/2308.01760/full.md
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NulnsSeg: A Fully Annotated Dataset for Nuclei Instance Segmentation in H&E-Stained Histological Images
|
| 2 |
+
|
| 3 |
+
Amirreza Mahbod $^{1,2,*}$ , Christine Polak $^{2}$ , Katharina Feldmann $^{2}$ , Rumsha Khan $^{2}$ , Katharina Gelles $^{2}$ , Georg Dorffner $^{3}$ , Ramona Woitek $^{1}$ , Sepideh Hatamikia $^{1,4}$ , and Isabella Ellinger $^{2}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Research Center for Medical Image Analysis and Artificial Intelligence, Department of Medicine, Danube Private University, Krems an der Donau, 3500, Austria
|
| 6 |
+
$^{2}$ Institute for Pathophysiology and Allergy Research, Medical University of Vienna, Vienna, 1090, Austria
|
| 7 |
+
$^{3}$ Institute of Artificial Intelligence, Medical University of Vienna, Vienna, 1090, Austria
|
| 8 |
+
$^{4}$ Austrian Center for Medical Innovation and Technology, Wiener Neustadt, 2700, Austria
|
| 9 |
+
* corresponding author(s): Amirreza Mahbod (amirreza.mahbod@dp-uni.ac.at)
|
| 10 |
+
|
| 11 |
+
# ABSTRACT
|
| 12 |
+
|
| 13 |
+
In computational pathology, automatic nuclei instance segmentation plays an essential role in whole slide image analysis. While many computerized approaches have been proposed for this task, supervised deep learning (DL) methods have shown superior segmentation performances compared to classical machine learning and image processing techniques. However, these models need fully annotated datasets for training which is challenging to acquire, especially in the medical domain. In this work, we release one of the biggest fully manually annotated datasets of nuclei in Hematoxylin and Eosin (H&E)-stained histological images, called NulnsSeg. This dataset contains 665 image patches with more than 30,000 manually segmented nuclei from 31 human and mouse organs. Moreover, for the first time, we provide additional ambiguous area masks for the entire dataset. These vague areas represent the parts of the images where precise and deterministic manual annotations are impossible, even for human experts. The dataset and detailed step-by-step instructions to generate related segmentation masks are publicly available at https://www.kaggle.com/datasets/ipateam/nuinsseg and https://github.com/masih4/NuInsSeg, respectively.
|
| 14 |
+
|
| 15 |
+
# Background & Summary
|
| 16 |
+
|
| 17 |
+
With the advent of brightfield and fluorescent digital scanners that produce and store whole slide images (WSIs) in digital form, there is a growing trend to exploit computerized methods for semi or fully-automatic WSI analysis<sup>1</sup>. In digital pathology and biomedical image analysis, nuclei segmentation plays a fundamental role in image interpretation<sup>2</sup>. Specific nuclei characteristics such as nuclei density or nucleus-to-cytoplasm ratio can be used for cell and tissue identification or for diagnostic purposes such as cancer grading<sup>2-4</sup>. Nuclei instance segmentation masks enable the extraction of valuable statistics for each nucleus<sup>5</sup>. While experts can manually segment nuclei, this is a tedious and complex procedure as thousands of instances can appear in a small patch of a WSI<sup>4,6</sup>. It is also worth mentioning that due to various artifacts such as folded tissues, out-of-focus scanning, considerable variations of nuclei staining intensities within a single image, and the complex nature of some histological samples (e.g., high density of nuclei), accurate and deterministic manual annotation is not always possible, even for human experts. The inter- and intraobserver variability reported in previous studies showing a low level of agreement in the annotation of cell nuclei by medical experts confirms this general problem<sup>5,7</sup>.
|
| 18 |
+
|
| 19 |
+
In recent years, many semi- and fully-automatic computer-based methods have been proposed to perform nuclei instance segmentation automatically and more efficiently. A wide range of approaches from classical image processing to advanced machine learning methods have been proposed for this task $^{4,7}$ . Up to this point, supervised deep learning (DL) methods such as Mask R-CNN and its variants $^{8,9}$ , distance-based methods $^{10,11}$ and multi encoder-decoder approaches $^{6,12,13}$ have shown the best instance segmentation performances. However, to train these models, fully annotated datasets are required which is difficult to acquire in the medical domain $^{4,5,14}$ .
|
| 20 |
+
|
| 21 |
+
A number of fully annotated nuclei instance segmentation datasets are available. These datasets were introduced for various types of staining such as Hematoxylin and Eosin (H&E), immunohistochemical and immunofluorescence stainings[4,15-17]. The most common staining type in routine pathology is H&E-staining. Therefore, most introduced datasets were based on this staining method. Although these datasets are valuable contributions to the research field and help researchers to develop better
|
| 22 |
+
|
| 23 |
+
Table 1. Publicly available H&E-stained nuclei segmentation datasets. In the table, TCGA refers to The Cancer Genome Atlas, UHCW refers to University Hospitals Coventry and Warwickshire, and MUV refers to Medical University of Vienna. The last row of the table represents the NuInsSeg dataset introduced in this work.
|
| 24 |
+
|
| 25 |
+
<table><tr><td>dataset</td><td>vague mask</td><td># image tiles</td><td># nuclei</td><td>magnification</td><td># organs</td><td>tile size (pixels)</td><td>source</td></tr><tr><td>Kumar et al.4</td><td>X</td><td>30</td><td>21,623</td><td>40×</td><td>7</td><td>1000 × 1000</td><td>TCGA</td></tr><tr><td>MoNuSeg7</td><td>X</td><td>44</td><td>28,846</td><td>40×</td><td>9</td><td>1000 × 1000</td><td>TCGA</td></tr><tr><td>MoNuSAC17</td><td>partial</td><td>209</td><td>31,411</td><td>40×</td><td>4</td><td>81 × 113 to 1422 × 2162</td><td>TCGA</td></tr><tr><td>CoNSEP6</td><td>X</td><td>41</td><td>24,319</td><td>40×</td><td>1</td><td>1000 × 1000</td><td>UHCW</td></tr><tr><td>CPM-1524</td><td>X</td><td>15</td><td>2,905</td><td>40×, 20×</td><td>2</td><td>400 × 400, 600 × 1000</td><td>TCGA</td></tr><tr><td>CPM-1724</td><td>X</td><td>32</td><td>7,570</td><td>40×, 20×</td><td>4</td><td>500 × 500 to 600 × 600</td><td>TCGA</td></tr><tr><td>TNBC10</td><td>X</td><td>50</td><td>4,022</td><td>40×</td><td>1</td><td>512 × 512</td><td>Curie Inst.</td></tr><tr><td>CRCHisto25</td><td>X</td><td>100</td><td>29,756</td><td>20×</td><td>1</td><td>500 × 500</td><td>UHCW</td></tr><tr><td>Janowczyk26</td><td>X</td><td>143</td><td>12,000</td><td>40×</td><td>1</td><td>2000 × 2000</td><td>n/a</td></tr><tr><td>Crowdsource27</td><td>X</td><td>64</td><td>2,532</td><td>40×</td><td>1</td><td>400 × 400</td><td>TCGA</td></tr><tr><td>CryoNuSeg5</td><td>X</td><td>30</td><td>7,596</td><td>40×</td><td>10</td><td>512 × 512</td><td>TCGA</td></tr><tr><td>NuInsSeg</td><td>✓</td><td>665</td><td>30.698</td><td>40×</td><td>31</td><td>512 × 512</td><td>MUV</td></tr></table>
|
| 26 |
+
|
| 27 |
+
segmentation models, providing more annotated datasets from different organs and centers to cover more data variability is still of high importance. Table 1 shows the most prominent fully manually annotated H&E-stained nuclei segmentation datasets that have been actively used by the research community in the past few years. Besides these datasets, some semi-automatically generated datasets such as PanNuke $^{18}$ , Lizard $^{19}$ and Hou et al. dataset $^{20}$ have also been introduced in the past. To generate these datasets, various approaches, such as using trained backbone models or point annotation, were exploited $^{21-23}$ . However, training models based on semi-automatically generated datasets may introduce a hidden bias towards the reference model instead of learning the true human expert style for nuclei instance segmentation.
|
| 28 |
+
|
| 29 |
+
In this work, we introduce NuInsSeg, one of the most extensive publicly available datasets for nuclei segmentation in H&E-stained histological images. The primary statistic of this dataset is presented in the last row of Table 1. Our dataset can be used alone to develop, test, and evaluate machine learning-based algorithms for nuclei instance segmentation or can be used as an independent test set to estimate the generalization capability of the already developed nuclei instance segmentation methods.
|
| 30 |
+
|
| 31 |
+
# Methods
|
| 32 |
+
|
| 33 |
+
# Sample preparation
|
| 34 |
+
|
| 35 |
+
The NuInsSeg dataset contains fully annotated brightfield images for nuclei instance segmentation. The H&E-stained sections of 23 different human tissues were provided by Associate Professor Adolf Ellinger, PhD from the specimen collection of the Department of Cell Biology and Ultrastructural Research, Center for Anatomy and Cell Biology, Medical University of Vienna. We only obtained the stained tissue sections, not the original tissues. These images were only used for teaching purposes for a long time where no ethic votum applied. Some of the human tissues were formaldehyde-fixed, embedded in celloidin and sectioned at $15\approx 20\mu m$ (jejunum, kidney, liver, oesophagus, palatine tonsil, pancreas, placenta, salivary gland, spleen, tongue). The other human tissues were formaldehyde-fixed and paraffin-embedded (FFPE) and sectioned at $4\approx 5\mu m$ (cerebellum, cerebrum, colon, epiglottis, lung, melanoma, muscle, peritoneum, stomach (cardia), stomach (pylorus), testis, umbilical cord, and urinary bladder). Mouse tissue samples from bone (femur), fat (subscapularis), heart, kidney, liver, muscle (tibialis anterior muscle), spleen, and thymus were obtained from 8-week-old male C57BL/6J mice28. $4\mu m$ sections of the FFPE tissue samples were stained with H&E (ROTH, Austria) and coverslipped with Entellan (Merck, Germany).
|
| 36 |
+
|
| 37 |
+
# Sample acquisition
|
| 38 |
+
|
| 39 |
+
WSIs were generated with a TissueFAXS (TissueGnostics, Austria) scanning system composed of an Axio Imager Z1 (Zeiss, Oberkochen, Germany), equipped with a Plan-Neofluar $40 \times 0.75$ objective ( $40 \times$ air) in combination with the TissueFAXS Image Acquisition and Management Software (Version 6.0, TissueGnostics, Austria). Images were acquired at 8-bit resolution using a colour camera (Baumer HXG40c).
|
| 40 |
+
|
| 41 |
+
# Field of view and patch selection
|
| 42 |
+
|
| 43 |
+
The scanning system stores individual $2048 \times 2048$ Field of Views (FOV) with their respective locations in order to be able to combine them into a WSI. Instead of using WSIs, we utilized the FOVs to generate the dataset. A senior cell biologist selected
|
| 44 |
+
|
| 45 |
+
the most representative FOVs for each human and mouse WSI. From each FOV, a $512 \times 512$ pixel image was extracted by central cropping. These images were saved in lossless Portable Network Graphics (PNG) format. In total, 665 raw image patches were created to build the NuInsSeg dataset.
|
| 46 |
+
|
| 47 |
+
# Generation of ground truth, auxiliary, and ambiguous area segmentation masks
|
| 48 |
+
|
| 49 |
+
We used ImageJ $^{28}$ (version 1.53, National Institutes of Health, USA) to generate the ground truth segmentation masks. We followed the same procedure suggested in $^{5}$ to label nuclei. We used the region of interest (ROI) manager tool (available on the Analysis tab) and the freehand option to delineate the nuclei borders. We manually draw the nuclei border for each instance until all nuclei were segmented for a given image patch. Although some semi-automatic tools such as AnnotatorJ with U-Net backbone $^{29}$ could be used to speed up the annotation, we stuck to fully manual segmentation to prevent any hidden bias toward the semi-autonomic annotation method. The delineated ROIs were saved as a zip file, and the Matlab software (version 2020a) was then used to create binary and labeled segmentation images (as PNG files). Besides the original raw image patches, binary and labeled segmentation masks, we also publish a number of auxiliary segmentation masks that can be useful for developing computer-based segmentation models. Auxiliary segmentation masks, including border-removed binary masks, elucidation distance maps of nuclei, weighted binary masks (where higher weights are assigned in the borders of touching objects), are published along with our dataset. The developed codes to generate these masks are available on the published GitHub repository. Moreover, we annotated the ambiguous areas in all images of the dataset for the first time. Indicating ambiguous regions was partially provided in the test set of the MoNuSAC challenge $^{30}$ , but in this work, we provide it for the entire dataset. We used an identical procedure and software to create the ambiguous segmentation masks. These vague areas consist of image parts with very complex appearances where the accurate and reliable manual annotation is impossible. This is potentially helpful for in-depth analysis and evaluation of any automatic model for nuclei instance segmentation. Manual segmentation of nuclei and ambiguous areas detection were performed by three students with a background in cell biology. The annotations were then controlled by a senior cell biologist and corrected when necessary. Some example images, along with related segmentation and vague masks, are shown in Figure 1.
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
Figure 1. Example images and manual segmentation masks of three human organs from the NuInsSeg dataset. The first three columns show the original images, the labeled and the binary mask, respectively. The represented images in the fourth to sixth columns show auxiliary segmentation masks that can be beneficial for the development of segmentation algorithms. The last column shows the vague areas where accurate and deterministic manual segmentation is impossible. Some images do not contain ambiguous regions, such as the represented spleen image in the last row.
|
| 53 |
+
|
| 54 |
+
# Data Records
|
| 55 |
+
|
| 56 |
+
The NuInsSeg dataset is publicly available on a published page on the Kaggle platform (https://www.kaggle.com/datasets/ipateam/nuinsseg). The related code to generate the binary, labeled, and auxiliary segmentation masks from
|
| 57 |
+
|
| 58 |
+
Table 2. Details of the NuInsSeg dataset per human and mouse organ
|
| 59 |
+
|
| 60 |
+
<table><tr><td>Organ</td><td>Type</td><td># Images</td><td># Nuclei</td><td>Avg. #Nuclei per image</td></tr><tr><td>Cerebellum</td><td>human</td><td>12</td><td>549</td><td>45.8</td></tr><tr><td>Cerebrum</td><td>human</td><td>12</td><td>146</td><td>12.2</td></tr><tr><td>Colon</td><td>human</td><td>12</td><td>349</td><td>29.1</td></tr><tr><td>Epiglottis</td><td>human</td><td>11</td><td>228</td><td>20.7</td></tr><tr><td>Jejunum</td><td>human</td><td>10</td><td>874</td><td>87.4</td></tr><tr><td>Kidney</td><td>human</td><td>11</td><td>1222</td><td>111.1</td></tr><tr><td>Liver</td><td>human</td><td>40</td><td>1370</td><td>34.3</td></tr><tr><td>Lung</td><td>human</td><td>11</td><td>318</td><td>28.9</td></tr><tr><td>Melanoma</td><td>human</td><td>12</td><td>533</td><td>44.4</td></tr><tr><td>Muscle</td><td>human</td><td>9</td><td>127</td><td>14.1</td></tr><tr><td>Oesophagus</td><td>human</td><td>47</td><td>2046</td><td>43.5</td></tr><tr><td>Palatine tonsil</td><td>human</td><td>12</td><td>1045</td><td>87.1</td></tr><tr><td>Pancreas</td><td>human</td><td>44</td><td>2178</td><td>49.5</td></tr><tr><td>Peritoneum</td><td>human</td><td>12</td><td>468</td><td>39.0</td></tr><tr><td>Placenta</td><td>human</td><td>40</td><td>1966</td><td>49.2</td></tr><tr><td>Salivary gland</td><td>human</td><td>44</td><td>3129</td><td>71.1</td></tr><tr><td>Spleen</td><td>human</td><td>34</td><td>3286</td><td>96.7</td></tr><tr><td>Stomach (cardia)</td><td>human</td><td>12</td><td>671</td><td>55.9</td></tr><tr><td>Stomach (pylorus)</td><td>human</td><td>12</td><td>441</td><td>36.8</td></tr><tr><td>Testis</td><td>human</td><td>12</td><td>380</td><td>31.7</td></tr><tr><td>Tongue</td><td>human</td><td>40</td><td>1415</td><td>35.4</td></tr><tr><td>Umbilical cord</td><td>human</td><td>11</td><td>106</td><td>9.6</td></tr><tr><td>Urinary bladder</td><td>human</td><td>12</td><td>400</td><td>33.3</td></tr><tr><td>Bone (femur)</td><td>mouse</td><td>6</td><td>757</td><td>126.2</td></tr><tr><td>Fat (subscapularis)</td><td>mouse</td><td>42</td><td>549</td><td>13.1</td></tr><tr><td>Heart</td><td>mouse</td><td>28</td><td>738</td><td>26.4</td></tr><tr><td>Kidney</td><td>mouse</td><td>40</td><td>1597</td><td>39.9</td></tr><tr><td>Liver</td><td>mouse</td><td>36</td><td>646</td><td>17.9</td></tr><tr><td>Muscle (tibialis anterior muscle)</td><td>mouse</td><td>28</td><td>165</td><td>5.9</td></tr><tr><td>Spleen</td><td>mouse</td><td>7</td><td>1657</td><td>236.7</td></tr><tr><td>Thymus</td><td>mouse</td><td>6</td><td>1342</td><td>223.7</td></tr><tr><td>All</td><td>human</td><td>472</td><td>23247</td><td>49.3</td></tr><tr><td>All</td><td>mouse</td><td>193</td><td>7451</td><td>38.6</td></tr><tr><td>All</td><td>human + mouse</td><td>665</td><td>30698</td><td>46.2</td></tr></table>
|
| 61 |
+
|
| 62 |
+
the ImageJ ROI files is also available on the NuInsSeg published GitHub repository https://github.com/masih4/NuInsSeg. This dataset contains 665 image patches with 30,698 segmented nuclei from 31 human and mouse organs. The organ-specific details of the generated dataset are shown in Table 2. As shown in the table, the nuclei density in some tissues/organs (e.g., mouse spleen) is much higher in comparison to other tissues/organs (e.g., mouse muscle).
|
| 63 |
+
|
| 64 |
+
# Technical Validation
|
| 65 |
+
|
| 66 |
+
To create a baseline segmentation benchmark, we randomly split the dataset into five folds with an equal number of images per fold (i.e., 133 images per fold). We used the Scikit-learn Python package to create the folds with a fixed random state to reproduce the results (splitting code is available on the Kaggle and Github pages). Based on the created folds, we developed a number of DL-based segmentation models and evaluated their performance based on five-fold cross-validation. To facilitate to use of our dataset and developing segmentation models, we published our codes for two standard segmentation models, namely shallow U-Net and deep U-Net models<sup>31</sup> on the Kaggle platform<sup>1</sup>. The model architectures of the shallow U-Net and deep U-Net are very similar to the original U-Net model but we added drop out layers between all convolutional layers in both encoder and decoder parts. Four and five convolutional blocks were used in the encoder and decoder parts of the shallow U-Net
|
| 67 |
+
|
| 68 |
+
Table 3. NuInsSeg segmentation benchmark results based on five-fold cross-validation
|
| 69 |
+
|
| 70 |
+
<table><tr><td>Model</td><td>Reference</td><td># Parameters</td><td>Avg.Dice (%)</td><td>Avg. AJI (%)</td><td>Avg. PQ (%)</td></tr><tr><td>Shallow U-Net</td><td>31</td><td>1.9 million</td><td>78.8</td><td>50.5</td><td>42.7</td></tr><tr><td>Deep U-Net</td><td>31</td><td>7.7 million</td><td>79.7</td><td>49.4</td><td>40.4</td></tr><tr><td>Attention U-Net</td><td>32</td><td>2.3 million</td><td>80.5</td><td>45.7</td><td>36.4</td></tr><tr><td>Residual attention U-Net</td><td>32,33</td><td>2.4 million</td><td>81.4</td><td>46.2</td><td>36.9</td></tr><tr><td>Two-stage U-Net</td><td>34</td><td>3.9 million</td><td>76.6</td><td>52.8</td><td>47.2</td></tr><tr><td>Dual decoder U-Net</td><td>13</td><td>3.5 million</td><td>79.4</td><td>55.9</td><td>51.3</td></tr></table>
|
| 71 |
+
|
| 72 |
+
and deep U-Net, respectively. The model architecture of these two models is publicly available at our published kernels on our NuInsSeg page on the Kaggle platform. Besides these two models, we also evaluated the performance of the attention U-Net $^{32}$ , residual attention U-Net $^{32,33}$ , two-stage U-Net $^{34}$ , and the dual decoder U-Net $^{13}$ models. The architectural details of these models were published in the respective articles. We performed an identical five-fold cross-validation scheme in all experiments to compare the results. For evaluation, we utilized similarity Dice score, aggregate Jaccard index (AJI), and panoptic quality (PQ) scores as suggested in former studies $^{5,6,35}$ . The segmentation performance of the aforementioned models is reported in Table 3. As the results show, residual attention U-Net delivers the best overall Dice score between these models, but dual decoder U-Net provides the best average AJI and PQ scores. Interestingly, the dual decoder model achieved the best overall PQ score in the MoNuSAC post challenge leaderborad $^{17,36}$ , and it also achieved the best instance-based segmentation scores for the NuInsSeg dataset. It should be noted that these results can be potentially improved by using well-known strategies such as assembling $^{37}$ , stain augmentation $^{38}$ or test time augmentation $^{39}$ but achieving the best segmentation scores is out of the focus of this study. Instead, these results could be used as baseline segmentation scores for comparison to other segmentation models in the future, given that the same five-fold cross-validation scheme is used.
|
| 73 |
+
|
| 74 |
+
# Usage Notes
|
| 75 |
+
|
| 76 |
+
Our dataset, including raw image patches, binary and labeled segmentation masks, and other auxiliary segmentation masks, is publicly available on the published NuInsSeg page on the Kaggle platform. Step-by-step instructions to perform manual annotations and related codes to generate the main and auxiliary segmentation masks are available at our published Github repository. We also provide three kernels on the Kaggle platform to facilitate using our dataset. One kernel is devoted to explanatory data analysis (EDA), where interested researchers can visualize and explore different statistics of the NuInsSeg dataset. The other two kernels consist of related codes to perform five-fold cross-validation based on two DL-based models, namely shallow U-Net and deep U-Net, as described in the previous section. Different Python packages were used in the coding of these kernels. To report statistics and visualize data in the EDA kernel, we mainly used Pandas (version 1.3.5) and Matplotlib (version 3.5.1) Python packages. For the DL-based model development, we mainly used Tensorflow (version 2.6.2), Keras (version 2.6.0) frameworks, and finally, for performing cross-validation, pre-and post-processing, and augmentation, Scikit-learn (version 0.23.2), Scikit-image (version 0.19.1) and Albumentation (version 1.1.0) were exploited, respectively.
|
| 77 |
+
|
| 78 |
+
We explicitly published our dataset on the Kaggle platform, where limited free computational resources are available. Therefore, interested researchers can directly access our dataset and develop ML- or DL-based algorithms to perform nuclei instance segmentation on the NuInsSeg dataset. However, there is no limitation to downloading and saving the dataset on local systems and performing analysis using local or other cloud-based computational resources.
|
| 79 |
+
|
| 80 |
+
It is worth mentioning that the NuInsSeg dataset can be used alone to train, validate, and test any segmentation algorithm, or it can be used as an independent test set to measure the generalization capability of already developed segmentation models.
|
| 81 |
+
|
| 82 |
+
# Code availability
|
| 83 |
+
|
| 84 |
+
The dataset and required code to generate the dataset are publicly available on Kaggle (https://www.kaggle.com/datasets/ipateam/nuinsseg) and GitHub (https://github.com/masih4/NuInsSeg), respectively.
|
| 85 |
+
|
| 86 |
+
# Acknowledgements
|
| 87 |
+
|
| 88 |
+
This work was supported by the Austrian Research Promotion Agency (FFG), No.872636. We would like to thank NVIDIA for their generous GPU donation and the TissueGnostics support team (https://www.tissuegnostics.com/) for their valuable advice to generate the NuInsSeg dataset. Moreover, we would like to thank Adolf Ellinger (MedUni Vienna) for providing the human tissue sections and Peter Pietschmann (MedUni Vienna) who provided the mouse samples.
|
| 89 |
+
|
| 90 |
+
# Author contributions statement
|
| 91 |
+
|
| 92 |
+
A.M. and I.E. conceptualized the paper idea, K.G. prepared the H&E-stained mouse sections and scanned all tissue sections, A.M., C.L., R.K., K.F., and I.E. performed annotations and controlled the segmentation masks, I.E. obtained funding, A.M conducted the experiments and reported the results, and G.D., S.H., R.W., and I.E. supervised the entire work. All authors reviewed the manuscript.
|
| 93 |
+
|
| 94 |
+
# Competing interests
|
| 95 |
+
|
| 96 |
+
The authors declare no competing interests.
|
| 97 |
+
|
| 98 |
+
# References
|
| 99 |
+
|
| 100 |
+
1. Cui, M. & Zhang, D. Y. Artificial intelligence and computational pathology. Lab. Investig. 101, 412-422, 10.1038/s41374-020-00514-0 (2021).
|
| 101 |
+
2. Skinner, B. M. & Johnson, E. E. Nuclear morphologies: their diversity and functional relevance. Chromosoma 126, 195-212, 10.1007/s00412-016-0614-5 (2017).
|
| 102 |
+
3. Chan, J. K. C. The wonderful colors of the hematoxylin-eosin stain in diagnostic surgical pathology. Int. J. Surg. Pathol. 22, 12-32, 10.1177/1066896913517939 (2014).
|
| 103 |
+
4. Kumar, N. et al. A dataset and a technique for generalized nuclear segmentation for computational pathology. IEEE Transactions on Med. Imaging 36, 1550-1560, 10.1109/TMI.2017.2677499 (2017).
|
| 104 |
+
5. Mahmod, A. et al. CryoNuSeg: A dataset for nuclei instance segmentation of cryosectioned H&E-stained histological images. Comput. Biol. Medicine 132, 104349, 10.1016/j.compbiomed.2021.104349 (2021).
|
| 105 |
+
6. Graham, S. et al. Hover-Net: Simultaneous segmentation and classification of nuclei in multi-tissue histology images. Med. Image Analysis 58, 101563, 10.1016/j.media.2019.101563 (2019).
|
| 106 |
+
7. Kumar, N. et al. A multi-organ nucleus segmentation challenge. IEEE Transactions on Med. Imaging 39, 1380–1391, 10.1109/TMI.2019.2947628 (2020).
|
| 107 |
+
8. He, K., Gkioxari, G., Dollár, P. & Girshick, R. Mask R-CNN. In International Conference on Computer Vision, 2980-2988 (IEEE, 2017).
|
| 108 |
+
9. Bancher, B., Mahbod, A., Ellinger, I., Ecker, R. & Dorffner, G. Improving Mask R-CNN for nuclei instance segmentation in hematoxylin & eosin-stained histological images. In MICCAI Workshop on Computational Pathology, vol. 156, 20-35 (2021).
|
| 109 |
+
10. Naylor, P., Laé, M., Reyal, F. & Walter, T. Segmentation of nuclei in histopathology images by deep regression of the distance map. IEEE Transactions on Med. Imaging 38, 448-459, 10.1109/TMI.2018.2865709 (2019).
|
| 110 |
+
11. Naylor, P., Laé, M., Reyal, F. & Walter, T. Nuclei segmentation in histopathology images using deep neural networks. In IEEE International Symposium on Biomedical Imaging, 933-936, 10.1109/ISBI.2017.7950669 (2017).
|
| 111 |
+
12. Zhao, B. et al. Triple u-net: Hematoxylin-aware nuclei segmentation with progressive dense feature aggregation. Med. Image Analysis 65, 101786, 10.1016/j.media.2020.101786 (2020).
|
| 112 |
+
13. Mahmod, A. et al. A dual decoder U-Net-based model for nuclei instance segmentation in hematoxylin and eosin-stained histological images. Front. Medicine 9, 10.3389/fmed.2022.978146 (2022).
|
| 113 |
+
14. Mahmood, F., Chen, R. & Durr, N. J. Unsupervised reverse domain adaptation for synthetic medical images via adversarial training. IEEE Transactions on Med. Imaging 37, 2572-2581, 10.1109/TMI.2018.2842767 (2018).
|
| 114 |
+
15. Kromp, F. et al. An annotated fluorescence image dataset for training nuclear segmentation methods. Sci. Data 7, 1-8, https://doi.org/10.1038/s41597-020-00608-w (2020).
|
| 115 |
+
16. Mahmod, A. et al. Investigating the impact of the bit depth of fluorescence-stained images on the performance of deep learning-based nuclei instance segmentation. Diagnostics 11, 10.3390/diagnostics11060967 (2021).
|
| 116 |
+
17. Verma, R. et al. MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge. IEEE Transactions on Med. Imaging 1-1, 10.1109/TMI.2021.3085712 (2021).
|
| 117 |
+
18. Gamper, J., Alemi Koohbanani, N., Benet, K., Khuram, A. & Rajpoot, N. PanNuke: An open pan-cancer histology dataset for nuclei instance segmentation and classification. In Reyes-Aldasoro, C. C., Janowczyk, A., Veta, M., Bankhead, P. & Sirinukunwattana, K. (eds.) Digital Pathology, 11-19, 10.1007/978-3-030-23937-4_2 (Springer International Publishing, Cham, 2019).
|
| 118 |
+
|
| 119 |
+
19. Graham, S. et al. Lizard: A large-scale dataset for colonic nuclear instance segmentation and classification. In Proceedings of the IEEE/CVF International Conference on Computer Vision Workshops, 684-693 (2021).
|
| 120 |
+
20. Hou, L. et al. Dataset of segmented nuclei in hematoxylin and eosin stained histopathology images of ten cancer types. Sci. data 7, 1-12, 10.1038/s41597-020-0528-1 (2020).
|
| 121 |
+
21. Graham, S. et al. CoNIC challenge: Pushing the frontiers of nuclear detection, segmentation, classification and counting. arXiv preprint arXiv:2303.06274 (2023).
|
| 122 |
+
22. Lin, Y. et al. Label propagation for annotation-efficient nuclei segmentation from pathology images. arXiv preprint arXiv:2202.08195 (2022).
|
| 123 |
+
23. Alemi Koohbanani, N., Jahanifar, M., Zamani Tajadin, N. & Rajpoot, N. NuClick: A deep learning framework for interactive segmentation of microscopic images. Med. Image Analysis 65, 101771, 10.1016/j.media.2020.101771 (2020).
|
| 124 |
+
24. Vu, Q. D. et al. Methods for segmentation and classification of digital microscopy tissue images. Front. Bioeng. Biotechnol. 7, 53, 10.3389/fbioe.2019.00053 (2019).
|
| 125 |
+
25. Sirin, K. et al. Locality sensitive deep learning for detection and classification of nuclei in routine colon cancer histology images. IEEE Transaction on Med. Imaging 35, 1196-1206, 10.1109/TMI.2016.2525803 (2016).
|
| 126 |
+
26. Janowczyk, A. & Madabhushi, A. Deep learning for digital pathology image analysis: A comprehensive tutorial with selected use cases. J. Pathol. Informatics 7, 29, 10.4103/2153-3539.186902 (2016).
|
| 127 |
+
27. Irshad, H. et al. Crowdsourcing image annotation for nucleus detection and segmentation in computational pathology: evaluating experts, automated methods, and the crowd. In Pacific symposium on biocomputing Co-chairs, 294-305, 10.1142/9789814644730_0029 (2014).
|
| 128 |
+
28. Schindelin, J. et al. Fiji: an open-source platform for biological-image analysis. Nat. Methods 9, 676 (2012).
|
| 129 |
+
29. Hollandi, R., Diosdi, A., Hollandi, G., Moshkov, N. & Horvath, P. AnnotatorJ: an imagej plugin to ease hand annotation of cellular compartments. Mol. Biol. Cell 31, 2179-2186, 10.1091/mbc.E20-02-0156 (2020).
|
| 130 |
+
30. Verma, R. et al. Author's reply to "MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge". IEEE Transactions on Med. Imaging 41, 1000–1003, 10.1109/TMI.2022.3157048 (2022).
|
| 131 |
+
31. Ronneberger, O., Fischer, P. & Brox, T. U-Net: Convolutional networks for biomedical image segmentation. In International Conference on Medical Image Computing and Computer-Assisted Intervention, 234-241, 10.1007/978-3-319-24574-4_28 (2015).
|
| 132 |
+
32. Oktay, O. et al. Attention U-Net: Learning where to look for the pancreas. arXiv preprint arXiv:1804.03999 (2018).
|
| 133 |
+
33. He, K., Zhang, X., Ren, S. & Sun, J. Deep residual learning for image recognition. In IEEE Conference on Computer Vision and Pattern Recognition, 770-778, 10.1109/CVPR.2016.90 (2016).
|
| 134 |
+
34. Mahmod, A. et al. A two-stage U-Net algorithm for segmentation of nuclei in H&E-stained tissues. In European Congress on Digital Pathology, 75–82, 10.1007/978-3-030-23937-4_9 (2019).
|
| 135 |
+
35. Kirillov, A., He, K., Girshick, R., Rother, C. & Dollar, P. Panoptic segmentation. In Conference on Computer Vision and Pattern Recognition, 9404-9413 (2019).
|
| 136 |
+
36. Foucart, A., Debeir, O. & Decaestecker, C. Comments on "MoNuSAC2020: A multi-organ nuclei segmentation and classification challenge". IEEE Transactions on Med. Imaging 41, 997-999, 10.1109/TMI.2022.3156023 (2022).
|
| 137 |
+
37. Mahmod, A., Schaefer, G., Ecker, R. & Ellinger, I. Pollen grain microscopic image classification using an ensemble of fine-tuned deep convolutional neural networks. In International Conference on Pattern Recognition, 344-356, 10.1007/978-3-030-68763-2_26 (Springer, 2021).
|
| 138 |
+
38. Li, F., Hu, Z., Chen, W. & Kak, A. A laplacian pyramid based generative H&E stain augmentation network. arXiv preprint arXiv:2305.14301 (2023).
|
| 139 |
+
39. Wang, C. et al. FUSeg: The foot ulcer segmentation challenge. arXiv preprint arXiv:2201.00414 (2022).
|
data/2023/2308_01xxx/2308.01760/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1c1679e4042ff9b5b440f1bb32b08a64e15946a4d6c559bd7cadfa099b8b173
|
| 3 |
+
size 433583
|
data/2023/2308_01xxx/2308.01760/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01779/43fba6bb-a379-4c73-93cb-4d72c4602f7c_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01779/43fba6bb-a379-4c73-93cb-4d72c4602f7c_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01779/43fba6bb-a379-4c73-93cb-4d72c4602f7c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e067bec8268e3a1c68c996e3627646cdda967dd6240b2c88e4b79b88e705575a
|
| 3 |
+
size 3203493
|
data/2023/2308_01xxx/2308.01779/full.md
ADDED
|
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Point2Mask: Point-supervised Panoptic Segmentation via Optimal Transport
|
| 2 |
+
|
| 3 |
+
Wentong Li $^{1}$ , Yuqian Yuan $^{1}$ , Song Wang $^{1}$ ,
|
| 4 |
+
|
| 5 |
+
Jianke Zhu $^{1*}$ , Jianshu Li $^{2}$ , Jian Liu $^{2}$ , Lei Zhang $^{3}$
|
| 6 |
+
|
| 7 |
+
$^{1}$ Zhejiang University $^{2}$ Ant Group $^{3}$ The HongKong Polytechnical University
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
Figure 1: Examples of pixel-wise mask predictions generated by Point2Mask on COCO with ResNet-101. Only a single point annotation per target is used as supervision during training to obtain these results.
|
| 11 |
+
|
| 12 |
+
# Abstract
|
| 13 |
+
|
| 14 |
+
Weakly-supervised image segmentation has recently attracted increasing research attentions, aiming to avoid the expensive pixel-wise labeling. In this paper, we present an effective method, namely Point2Mask, to achieve high-quality panoptic prediction using only a single random point annotation per target for training. Specifically, we formulate the panoptic pseudo-mask generation as an Optimal Transport (OT) problem, where each ground-truth $(gt)$ point label and pixel sample are defined as the label supplier and consumer, respectively. The transportation cost is calculated by the introduced task-oriented maps, which focus on the category-wise and instance-wise differences among the various thing and stuff targets. Furthermore, a centroid-based scheme is proposed to set the accurate unit number for each gt point supplier. Hence, the pseudo-mask generation is converted into finding the optimal transport plan at a globally minimal transportation cost, which can be solved via the Sinkhorn-Knopp Iteration. Experimental results on Pascal VOC and COCO demonstrate the promising performance of our proposed Point2Mask approach to point-supervised panoptic segmentation. Source code is available at: https://github.com/LiWentomng/Point2Mask.
|
| 15 |
+
|
| 16 |
+
# 1. Introduction
|
| 17 |
+
|
| 18 |
+
Panoptic segmentation aims to obtain the pixel-wise labels of instance things and semantic stuff in the whole image, which plays an important role in applications such as autonomous driving, image editing and robotic manipulation. Although having achieved promising performance, most of the existing panoptic segmentation approaches [29, 9, 50, 7, 19, 48] are trained in a fully supervised manner, which heavily depend on the pixel-wise mask annotations, incurring expensive labeling costs.
|
| 19 |
+
|
| 20 |
+
To deal with this problem, weakly-supervised methods have recently attracted research attentions to obtain high-quality pixel-wise masks with label-efficient sparse annotations, such as bounding box [44, 26, 22, 27], multiple points [28], or the combination of them [8, 42]. Such methods make image segmentation more accessible with lower annotation efforts for new categories or scene types. In this paper, we explore a simpler yet more efficient annotation form, i.e., a single random point for each thing and stuff target, to achieve high-quality panoptic segmentation. As discussed in [2], the cost of point-level labels is only marginally above image-level ones<sup>1</sup>. Such a setting has
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
Input
|
| 24 |
+
Prev. SOTA
|
| 25 |
+
Point2Mask
|
| 26 |
+
Figure 2: By taking an image with a single random $gt$ point label per target as the input, the method in [14] adopts the minimum distance for each pixel- $gt$ pair to determine the pseudo label, which cannot handle the ambiguous locations and heavily relies on the defined distance. For example, $\mathrm{d}_2$ is shorter than $\mathrm{d}_1$ for the current pixel in black color, which results in wrong assignment. Our Point2Mask formulates this task as a global Optimal Transport problem, and obtains accurate pseudo-mask labels.
|
| 27 |
+
|
| 28 |
+
been rarely studied due to the little available supervision information from a single point for pixel-wise mask prediction. Only one recent study [14] has attempted to build the minimum traversing distance between each pair of pixel sample and ground-truth (denoted as $gt$ ) point label to determine the accurate pseudo mask label.
|
| 29 |
+
|
| 30 |
+
Unfortunately, it is sub-optimal to assign the pixel samples independently for each random $gt$ point label according to the defined minimum distance. As shown in Fig. 2, the previous method [14] heavily relies on the defined distance and lacks the global context in dealing with the ambiguous locations (i.e., the border pixels among different thing-based targets with the same category). The pixel-to- $gt$ assignment for ambiguous samples is non-trivial, which requires further information beyond the local view. To this end, we model this task from a global optimization perspective to determine the high-quality pixel sample partition for all $gt$ point labels within an image.
|
| 31 |
+
|
| 32 |
+
In this paper, we propose a novel single point-supervised panoptic segmentation method, dubbed as Point2Mask, which formulates the pseudo-mask generation as an Optimal Transport (OT) problem. Specifically, we firstly define each $gt$ point label as a supplier who provides a certain number of labels, and regard each pixel sample as a consumer who needs one unit $gt$ label. To accurately define the transportation cost between each pixel- $gt$ pair, we introduce two types of task-oriented maps, including category-wise semantic map and instance-wise boundary map. The former focuses on the semantic differences among the categories, while the later aims to discriminate the thing-based objects with accurate boundary. Furthermore, we propose an effective centroid-based scheme to set the accurate unit number for each $gt$ point supplier in the OT problem.
|
| 33 |
+
|
| 34 |
+
Under our proposed framework, the pseudo-mask generation is converted into finding the optimal transport plan
|
| 35 |
+
|
| 36 |
+
at a globally minimal transportation cost, which can be efficiently solved via the Sinkhorn-Knopp Iteration [11]. By making use of the pseudo-mask labels, the panoptic segmentation sub-network is optimized in a fully-supervised manner. The proposed Point2Mask method is an end-to-end training framework, where only the fully-supervised sub-network is retained for inference. Extensive experiments are conducted on Pascal VOC [13] and COCO [31] benchmarks, and the promising qualitative and quantitative results demonstrate the effectiveness of our proposed approach. Notably, Point2Mask surpasses the state-of-the-art method [14] by $4.0\%$ PQ on Pascal VOC and $3.1\%$ PQ on COCO with the same ResNet-50 backbone [17], and achieves comparable performance with the fully-supervised methods using the Swin-L backbone [32]. Some qualitative results are shown in Fig. 1.
|
| 37 |
+
|
| 38 |
+
# 2. Related Work
|
| 39 |
+
|
| 40 |
+
Fully-supervised Panoptic segmentation. Image segmentation tackles the problem of grouping pixels. As the unified image segmentation task, panoptic segmentation [20] simultaneously incorporates semantic and instance segmentation, where each pixel is uniquely assigned with one of the stuff classes or one of the thing instances.
|
| 41 |
+
|
| 42 |
+
To this end, some methods [20, 46, 6] have been proposed by dealing with things and stuff using separate network branches within one model. Recently, some works [29, 9, 45, 50, 7, 23] aim to unify the model for this task. DETR [3] predicts the boxes for things and stuff categories with Transformer to perform panoptic segmentation. Mask2Former [7] further employs an additional pixel decoder to take into account of the high-resolution features and generates the mask predictions by the Transformer decoder with the masked-attention. Despite being able to segment objects with accurate boundaries, these methods rely on the expensive and laborious pixel-wise mask annotations, which hinders them from dealing with new categories or scene types in real-world applications [2, 37, 47].
|
| 43 |
+
|
| 44 |
+
Weakly-supervised Panoptic Segmentation. Weakly supervised segmentation intends to alleviate the annotation burden in segmentation tasks by label-efficient sparse labels for training. According to different kinds of tasks, it ranges from semantic segmentation [49, 30, 18, 43] to instance segmentation [8, 44, 22, 26, 27, 1] and to panoptic segmentation [14, 38, 28] tasks. As for panoptic segmentation, Li et al. [28] employed coarse polygons with multiple point annotations for each target to supervise the panoptic segmentation model. Recently, Fan et al. [14] adopted a simpler labeling form, i.e., a single point annotation, for each target in an image, and introduced the minimum traversing distance between each pixel sample and the target point label. In spite of its promising performance, it heavily relies on the defined distance, which cannot handle the ambiguous
|
| 45 |
+
|
| 46 |
+
border locations with a local view. Thus, it is still challenging to obtain the accurate mask predictions for single point-supervised panoptic segmentation.
|
| 47 |
+
|
| 48 |
+
Optimal Transport in Computer Vision. The Optimal Transport (OT) is a classical optimization problem with a wide range of computer vision applications. In the early years, the Wasserstein distance (WD), also known as the Earth Mover's distance, was adopted to capture the structure of color distribution and texture spaces for image retrieval [35]. Recently, Chen et al. [5] employed OT to explicitly encourage the fine-grained alignment between words and image regions for vision-and-language pre-training. Li et al. [24] built an attention-aware transport distance in OT to measure the discriminant information from domain knowledge for unsupervised domain adaptation. To achieve high-quality label assignment, Ge et al. [15] formulated the label assignment in object detection as the problem of solving an OT plan. In this work, we explore OT for point-supervised panoptic segmentation.
|
| 49 |
+
|
| 50 |
+
# 3. Method
|
| 51 |
+
|
| 52 |
+
# 3.1. Overview of Point2Mask
|
| 53 |
+
|
| 54 |
+
As illustrated in Fig. 3, we leverage a unified framework, namely Point2Mask, for single point-supervised panoptic segmentation. It consists of two network branches. One branch generates the mask pseudo-labels, and the other focuses on the fully supervised learning using Panoptic SegFormer model [29] based on the generated pseudo-labels. The two branches share the basic backbone and neck network, which are trained in an end-to-end fashion. The key of our proposed approach is how to model the process of mask pseudo-label generation as the global Optimal Transport (OT) problem, which aims to obtain the accurate pixelwise pseudo-masks with only a single point label per target.
|
| 55 |
+
|
| 56 |
+
# 3.2. Optimal Transport
|
| 57 |
+
|
| 58 |
+
We first give a brief review of OT [34], which aims to find a transportation plan $\Gamma$ minimizing the total cost of moving goods from one location to another. It is subject to certain constraints on the amount of goods to be transported and the cost of transportation.
|
| 59 |
+
|
| 60 |
+
Given a set of $m$ suppliers, another set of $n$ consumers, and a cost function $c_{ij}$ that specifies the cost of transporting one unit of goods from the $i$ -th supplier to the $j$ -th consumer. The goal of OT is to find a transportation plan $\Gamma = \{\Gamma_{i,j} | i = 1,2,\dots,m, j = 1,2,\dots,n\}$ that minimizes the total cost of transporting all the goods from the suppliers to the consumers. Thus, the OT problem can be formulated as follows:
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
\min _ {\Gamma_ {i j} \in \Gamma} \sum_ {i, j} ^ {m, n} \Gamma_ {i j} c _ {i j}, \tag {1}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
where $\Gamma_{ij} \geq 0$ . The constraints to be satisfied are: the $i$ -th supplier holds $x_i = \sum_{j=1}^n \Gamma_{ij}$ units of goods, and the $j$ -th consumer needs $y_j = \sum_{i=1}^m \Gamma_{ij}$ units goods. Meanwhile, the total amount of goods held by all suppliers are equal to the amount needed by all consumers, i.e., $\sum_{i=1}^m x_i = \sum_{j=1}^n y_j$ . To efficiently tackle this problem, we adopt the Sinkhorn Iteration method [11]. The details can be found in the Appendix.
|
| 67 |
+
|
| 68 |
+
# 3.3. Pseudo-mask Generation by OT
|
| 69 |
+
|
| 70 |
+
Given an input image $I^{H \times W \times 3}$ , supposing there are $m$ $gt$ point labels and $n$ pixel samples (i.e., $n = H \times W$ ), we view each $gt$ point label as a supplier who holds $k$ pixel samples (i.e., $x_{i} = k, i = 1,2,\dots,m$ ). Each pixel of $I$ is regarded as a consumer who needs one $gt$ point label (i.e., $y_{j} = 1, j = 1,2,\dots,n$ ). Given the defined cost $c_{ij}$ to transport one unit from the $i$ -th $gt$ point label to the $j$ -th pixel, the global OT plan $\Gamma \in \mathbb{R}^{m \times n}$ can be obtained by solving the OT problem via the Sinkhorn-Knopp Iteration [11]. Once $\Gamma$ is obtained, the pseudo-mask label generation can be decoded by assigning the pixel samples to the suppliers who transport point $gt$ labels to them with the minimal transportation costs.
|
| 71 |
+
|
| 72 |
+
The pseudo-mask generation consists of task-oriented map generation, transportation cost definition and centroid-based unit number calculation, which are introduced in details in the following subsections. The completed procedure is summarized in Algorithm 1.
|
| 73 |
+
|
| 74 |
+
# 3.3.1 Task-oriented Map Generation
|
| 75 |
+
|
| 76 |
+
The task-oriented map includes the category-wise semantic map $P^s$ and instance-wise boundary map $P^b$ . The former measures the semantic logit differences among the various categories. The latter discriminates the different thing-based targets under the same class from the accurate instance-level boundary. Based on these maps, the distance of the adjacent pixels can be calculated to obtain each pixel-to-gt cost $c_{ij}$ .
|
| 77 |
+
|
| 78 |
+
Category-wise Semantic Map. An input image for panoptic segmentation task is composed of the stuff-based and thing-based targets. The semantic parsing is important to obtain category-wise logits. As shown in Fig. 3, we adopt the transformer decoder layers [29] to construct the semantic decoder with a set of semantic query tokens, which is one-to-one match to the semantic categories. The semantic logits $P^{s}$ with $N_{c}$ classes can be generated by multiplying the mask scores and the class probabilities together as in [14]. The supervision information for category-wise semantic logits $P^{s}$ with the weak point labels is introduced in Sec. 3.4.1 in detail.
|
| 79 |
+
|
| 80 |
+
Instance-wise Boundary Map. To discriminate the instances for thing-based targets, especially for the instances
|
| 81 |
+
|
| 82 |
+

|
| 83 |
+
Figure 3: Overview of Point2Mask. It consists of two branches, one branch for mask pseudo-label generation, and another for panoptic segmentation based on the generated pseudo-labels. The mask pseudo-label generation is formulated as the OT problem, where the cost matrix is defined based on the task-oriented maps. The $k$ unit number is calculated by the centriod-based scheme. The global optimal transportation plan $\Gamma$ can be solved by the Sinkhorn-Knopp Iteration to obtain the accurate pseudo-mask labels. Only panoptic segmentation branch is kept for inference.
|
| 84 |
+
|
| 85 |
+
with the same category, we introduce the instance-wise boundary map $P^b$ for each target.
|
| 86 |
+
|
| 87 |
+
To generate the pure boundary, we suggest the high-level boundary $P_{high}^{b}$ that is learnt by the boundary decoder. In specific, we firstly sum the multi-level feature tokens from the Transformer-based neck in 2D spatial feature. Then, two $1 \times 1$ convolution layers interleaved by a ReLU activation are employed. The one-channel boundary map $P_{high}^{b}$ is obtained via the sigmoid function. For high-level boundary learning objective, we design an effective boundary loss function and explain it with details in Sec. 3.4.1.
|
| 88 |
+
|
| 89 |
+
Besides, we employ the Structured Edge (SE) detection method [12] based on the original input image to capture the low-level contour $P_{low}^{b}$ , which takes advantage of the inherent structure in edge patches to focus on the sparse object-level boundary map.
|
| 90 |
+
|
| 91 |
+
# 3.3.2 Transportation Cost
|
| 92 |
+
|
| 93 |
+
Based on the obtained task-oriented maps, the transportation cost can be calculated.
|
| 94 |
+
|
| 95 |
+
In our method, each map can be represented as an 8-connected planar graph $G(V,E)$ , where each pixel is adjacent to eight neighbors. The vertex set $V$ consists of all pixels of the map, and the edge set $E$ is made of the edges between two adjacent vertices. Let the vertex $l$ and vertex $k$ be adjacent on the graph. Based on the $P^s$ and $P^b$ maps, the
|
| 96 |
+
|
| 97 |
+
corresponding distance function $d_{k,l}^{s}$ and $d_{k,l}^{b}$ can be defined as follows:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
d _ {k, l} ^ {s} = \left| P ^ {s} (k) - P ^ {s} (l) \right|,
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
d _ {k, l} ^ {b} = \max \left\{P ^ {b} (k), P ^ {b} (l) \right\}, \tag {2}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where $P(l), P(k)$ are the map values of vertex $l$ and vertex $k$ , respectively. Once the edge length is obtained from the $P^s$ and $P^b$ maps, we define the transportation cost $c_{i,j}$ from the $i$ -th pixel to the $j$ -th $gt$ point label as the sum of the lengths of their connected edges along the shortest path $\mathbb{P}$ :
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
c _ {i, j} = \sum_ {(k, l) \in \mathbb {P} _ {i, j}} \left(d _ {k, l} ^ {s} + \beta d _ {k, l} ^ {b}\right), \tag {3}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\beta$ is the balanced weight. The shortest path $\mathbb{P}$ is implemented by the classical Dijkstra algorithm like [14].
|
| 114 |
+
|
| 115 |
+
# 3.3.3 Centroid-based Unit Number Calculation
|
| 116 |
+
|
| 117 |
+
Each $gt$ point label $\mathcal{P}_i$ is regarded as the supplier in our proposed OT problem, which holds $x_{i} = k$ pixels of pseudo mask label $M$ . To set the accurate number of $k$ , we introduce the centroid-based unit number calculation scheme that can be divided into two steps, as shown in Fig. 4.
|
| 118 |
+
|
| 119 |
+
Firstly, we obtain the pair-wise cost values along the shortest path $\mathbb{P}$ for each undetermined pixel to each $gt$ point
|
| 120 |
+
|
| 121 |
+
Algorithm 1 Optimal Transport for Pseudo-mask Generation
|
| 122 |
+
Input:
|
| 123 |
+
```txt
|
| 124 |
+
$I^{H\times W\times 3}$ is an input image.
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
```txt
|
| 128 |
+
$M^{H\times W\times 1}$ is the pseudo-mask label with ZerosInit.
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
```txt
|
| 132 |
+
$\mathcal{P}$ is a set of $gt$ point labels.
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
Output:
|
| 136 |
+
```txt
|
| 137 |
+
$T$ is the iteration number in Sinkhorn-Knopp Iter.
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
```txt
|
| 141 |
+
$M$ is the assigned pseudo-mask label.
|
| 142 |
+
```
|
| 143 |
+
|
| 144 |
+
```txt
|
| 145 |
+
1: $m\gets |\mathcal{P}|,n\gets |M|$
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
```txt
|
| 149 |
+
2: $P^{s}, P_{high}^{b}, P_{low}^{b} \gets \text{Forward}(I, \mathcal{P})$
|
| 150 |
+
```
|
| 151 |
+
|
| 152 |
+
```txt
|
| 153 |
+
3: Compute pairwise pixel-to-gt cost $c_{ij}$ .
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
```txt
|
| 157 |
+
4: $x_{i}(i = 1,2,\dots,m) \gets \text{Centriod-based } k$ calculation
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
```javascript
|
| 161 |
+
5: $y_{j}(j = 1,2,\dots,n)\gets \mathbb{1}$ ▷ Init y with ones
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
```javascript
|
| 165 |
+
6: $u^0, v^0 \gets \mathbb{1}$ ▷ Init $u$ and $v$ with ones
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
```txt
|
| 169 |
+
7: for $t = 0$ to $T$ do:
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
```txt
|
| 173 |
+
8: $u^{t + 1},v^{t + 1}\gets$ SinkhornIter $(c,u^t,v^t,x,y)$
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
```txt
|
| 177 |
+
9: Compute optimal plan $\Gamma$
|
| 178 |
+
```
|
| 179 |
+
|
| 180 |
+
```txt
|
| 181 |
+
10: Compute pseudo-mask label: $M = \operatorname{argmax}(\Gamma)$ .
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
```txt
|
| 185 |
+
11: return $M$
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
label $\mathcal{P}_i$ . The initial $gt$ point label assignment for each pixel can be achieved with its minimum cost among all $gt$ labels in the whole image. Note that the $gt$ points are randomly labeled on each target in the image, which can be located at any position of the target to be segmented, such as the corner or the edge. This cannot reflect the typical and accurate characteristics, especially for the border pixels between thing-based instances belonging to the same category.
|
| 189 |
+
|
| 190 |
+
Based on the initial $gt$ point label assignment, the initial mask label for each target can be obtained. We then calculate the corresponding centroid $\mathcal{C}_i$ of initial mask label as the substitution of $gt$ point label $\mathcal{P}_i$ for each target. The pairwise cost $c_{ij}$ for each pixel and $\mathcal{C}_i$ can be re-calculated along the corresponding shortest path. The $k$ unit number $(x_i)$ is computed by counting the ones in $N_{ij}$ with the minimum cost values to each centroid $\mathcal{C}$ , which can be formulated as follows:
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
x _ {i} = \sum_ {j} ^ {n} N _ {i j}, \quad N _ {i j} = \left\{ \begin{array}{l l} 1, & \underset {i} {\operatorname {a r g m i n}} c _ {i j} = i, \\ 0, & \text {o t h e r w i s e .} \end{array} \right. \tag {4}
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
The iterated calculation scheme can obtain a more accurate unit number $k$ , and we leave the detailed performance analysis in Sec. 4.4 to examine the effectiveness of the proposed scheme.
|
| 197 |
+
|
| 198 |
+
# 3.4. Learning and Inference
|
| 199 |
+
|
| 200 |
+
# 3.4.1 Weakly Supervised Learning
|
| 201 |
+
|
| 202 |
+
In this section, we introduce the objective for category-wise semantic map $P^s$ and instance-wise boundary map $P^b$ in a weakly-supervised manner with only a single point label.
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
Figure 4: The process of centroid-based $k$ calculation with two targets in an image. Step 1: The initial assignment (i.e., the pixels with yellow and green color divided by the middle curve line of dashes) with the minimal cost can be achieved based on the $gt$ point labels $\mathcal{P}_1$ and $\mathcal{P}_2$ . Step2: The centroids $\mathcal{C}_1$ and $\mathcal{C}_2$ of each initially assigned mask are the substitutions of $gt$ points, and the minimal cost can be re-calculated to achieve the refined assignment and determine the accurate unit number $k$ for each target.
|
| 206 |
+
|
| 207 |
+
Semantic Map Learning. Like the weakly-supervised semantic methods [30, 43], we adopt the partial cross-entropy loss $\mathcal{L}_{\text{partial}}$ , which is able to make full use of the available $gt$ point labels to achieve region supervised learning and generate sparse semantic map.
|
| 208 |
+
|
| 209 |
+
To obtain the accurate semantic logits for the unlabeled regions, we further take advantage of both local LAB affinity and long-range RGB affinity based on the input image. Local LAB affinity explores the color similarity in LAB color space with the local kernel, which is employed as the loss term $\mathcal{L}_{sem}^{LAB}$ as in [44]. Long-range RGB affinity absorbs the pixel similarity in RGB space, which is implemented by the minimum spanning tree. As in [30], it is utilized as the loss term $\mathcal{L}_{sem}^{RGB}$ . The objective for semantic map learning is denoted as:
|
| 210 |
+
|
| 211 |
+
$$
|
| 212 |
+
\mathcal {L} _ {s e m} = \mathcal {L} _ {\text {p a r t i a l}} + \alpha_ {1} \mathcal {L} _ {s e m} ^ {L A B} + \alpha_ {2} \mathcal {L} _ {s e m} ^ {R G B}. \tag {5}
|
| 213 |
+
$$
|
| 214 |
+
|
| 215 |
+
Please refer to the Appendix for the detailed formulation of these loss terms.
|
| 216 |
+
|
| 217 |
+
High-level Boundary Map Learning. To encourage the boundary decoder to predict the high-level instance-wise boundary map $P_{high}^{b}$ , we suggest an effective loss function $\mathcal{L}_{bou}$ for panoptic segmentation task. In terms of the existence of a boundary between two adjacent pixels, we assume that their affinity is small as in [1]. Hence, we introduce the high-level affinity $\mathcal{A}$ representation. For each pixel $p_k$ on $P_{high}^{b}$ , $p_l$ is one of its eight neighbors $\mathcal{N}_8$ . The $\mathcal{A}_{kl}$ can be represented as follows:
|
| 218 |
+
|
| 219 |
+
$$
|
| 220 |
+
\mathcal {A} _ {k l} = 1 - \max P _ {h i g h} ^ {b} (p _ {k}, p _ {l}). \tag {6}
|
| 221 |
+
$$
|
| 222 |
+
|
| 223 |
+
Then, we make full use of the mask affinity equivalence among the neighbor pixels based on the generated pseudo
|
| 224 |
+
|
| 225 |
+
mask $M$ . The loss function $\mathcal{L}_{bou}$ can be defined as:
|
| 226 |
+
|
| 227 |
+
$$
|
| 228 |
+
\begin{array}{l} \mathcal {L} _ {b o u} = - \sum_ {(k, l) \in M _ {t h i n g} ^ {+}} \frac {\log \mathcal {A} _ {k l}}{2 \left| M _ {t h i n g} ^ {+} \right|} - \sum_ {(k, l) \in M _ {s t u f f} ^ {+}} \frac {\log \mathcal {A} _ {k l}}{2 \left| M _ {s t u f f} ^ {+} \right|} \\ - \sum_ {(k, l) \in M ^ {-}} \frac {\log (1 - \mathcal {A} _ {k l})}{| M ^ {-} |}, \tag {7} \\ \end{array}
|
| 229 |
+
$$
|
| 230 |
+
|
| 231 |
+
where $M_{thing}^{+}$ denotes that the pair of adjacent pixels $p_k$ and $p_l$ are inside the same thing-based pseudo mask. Similarly, $M_{stuff}^{+}$ represents that $p_k$ and $p_l$ are inside the same stuff-based pseudo mask. Instead, $M^{-}$ denotes that a pair of pixels are with different pseudo-mask labels. Driven by the $\mathcal{L}_{bou}$ term, we can learn the accurate high-level boundary. The Appendix shows some visual examples for better illustration.
|
| 232 |
+
|
| 233 |
+
# 3.4.2 Training and Inference
|
| 234 |
+
|
| 235 |
+
Loss Function. Once the pseudo-masks are obtained, the panoptic segmentation sub-model is trained with these generated labels in a fully supervised manner. We adopt Panoptic SegFormer [29] as the panoptic sub-network. The fully-supervised loss terms consist of the focal loss for classification prediction, the localization loss for box localization, and the dice loss on mask decoder for final panoptic segmentation, respectively. For simplicity, we denote these losses to train the panoptic segmentation model as $\mathcal{L}_{\text {full }}$ . The total loss $\mathcal{L}_{\text {total }}$ can be formulated as follows:
|
| 236 |
+
|
| 237 |
+
$$
|
| 238 |
+
\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {f u l l}} + \mathcal {L} _ {\text {s e m}} + \mathcal {L} _ {\text {b o u}}. \tag {8}
|
| 239 |
+
$$
|
| 240 |
+
|
| 241 |
+
Inference. For the inference process of Point2Mask, only the panoptic segmentation model is maintained after training, which is the same as the original Panoptic SegFormer model [28]. The process of pseudo-mask generation with OT incurs about $25\%$ extra computational load in training, but it is totally cost-free during inference.
|
| 242 |
+
|
| 243 |
+
# 4. Experiments
|
| 244 |
+
|
| 245 |
+
To evaluate our proposed approach, we conduct experiments on Pascal VOC [13] and COCO [31]. Only a single point label per target is used to train our method, which is randomly sampled with the uniform distribution from the original pixel-wise mask annotations.
|
| 246 |
+
|
| 247 |
+
# 4.1. Datasets
|
| 248 |
+
|
| 249 |
+
Pascal VOC [13]. Pascal VOC consists of 20 "thing" and 1 "stuff" categories. It contains 10,582 images for model training and 1,449 validation images for evaluation [16].
|
| 250 |
+
|
| 251 |
+
COCO [31]. COCO has 80 "thing" and 53 "stuff" categories, which is a challenging benchmark. Our models are trained on train2017 (115K images), and evaluated on val2017 (5K images).
|
| 252 |
+
|
| 253 |
+
# 4.2. Implementation Details
|
| 254 |
+
|
| 255 |
+
The models are trained with the AdamW optimizer [33]. We make use of the mmdetection toolbox [4] and follow the commonly used training settings on each dataset. ResNet [17] and Swin-Transformer [32] are employed as the backbones, which are pre-trained on ImageNet [36]. On Pascal VOC, the initial learning rate is set to $10^{-4}$ , and the weight decay is 0.1 with eight images per mini-batch. The models are trained with $2 \times$ schedule at 24 epochs. On COCO, the initial learning rate is set to $2 \times 10^{-4}$ , which is reduced by a factor of 10 at the 8-th epoch and 12-th epoch with 16 images per mini-batch. The models are trained with 15 epochs. The iteration number in Sinkhorn Iteration for solving the defined OT problem is set to 80. $\beta$ is 0.1 in Eq. 3, and $\alpha_{1} = \alpha_{2} = 3.0$ in Eq. 5 in our implementation. As in [28], the number of query tokens for fully panoptic segmentation sub-model is set to 300. The manifold projector proposed in [14] is employed to better stand for the instance-wise representation based on our baseline model. Unless specified, our centroid-based unit number calculation scheme is not iterated in the main experiments. We report the standard evaluation metrics [20] of panoptic segmentation task, including panoptic quality (PQ), segmentation quality (SQ) and recognition quality (RQ).
|
| 256 |
+
|
| 257 |
+
# 4.3. Main Results
|
| 258 |
+
|
| 259 |
+
We compare our proposed Point2Mask method against state-of-the-art weakly supervised panoptic segmentation approaches. Moreover, the results of representative fully mask-supervised methods are reported for reference.
|
| 260 |
+
|
| 261 |
+
Results on Pascal VOC. Table 1 reports the comparison results on Pascal VOC val. It can be clearly seen that Point2Mask with the ResNet-50 backbone outperforms the recent single point-supervised method PSPs [14] by absolute $4.0\%$ PQ (from $49.8\%$ to $53.8\%$ ). The performance improvement mainly stems from the thing-based objects, from $47.8\%$ $\mathrm{PQ^{th}}$ to $51.9\%$ $\mathrm{PQ^{th}}$ ( $+4.1\%$ $\mathrm{PQ^{th}}$ ), in contrast to the improvements on $\mathrm{PQ^{st}}$ ( $89.5\%$ vs. $90.3\%$ ). It demonstrates the effectiveness of our presented pseudo-mask generation scheme by OT for thing-based instances. Our approach even outperforms Panoptic FCN [28] with 10 point labels by $5.8\%$ PQ ( $53.8\%$ vs. $48.0\%$ ). Moreover, our proposed method obtains $61.0\%$ PQ with Swin-L [32] backbone, which achieves comparable results against the fully supervised methods. When the point-label COCO dataset is used for model pre-training, we achieve significant performance improvements, such as from $53.8\%$ PQ to $60.7\%$ PQ under the ResNet-50 backbone. With the Swin-L backbone, Point2Mask obtains $64.2\%$ PQ, surpassing the fully supervised method [25] by $1.1\%$ PQ.
|
| 262 |
+
|
| 263 |
+
Results on COCO. Table 2 gives the evaluation results comparing to the state-of-the-art (SOTA) methods on COCO. Our proposed Point2Mask method achieves $32.4\%$
|
| 264 |
+
|
| 265 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Backbone</td><td rowspan="2">Supervision</td><td colspan="3">VOC 2012</td><td colspan="3">VOC 2012 with COCO</td></tr><tr><td>PQ</td><td>\( PQ^{th} \)</td><td>\( PQ^{st} \)</td><td>PQ</td><td>\( PQ^{th} \)</td><td>\( PQ^{st} \)</td></tr><tr><td>Li et al. [25]</td><td>ResNet-101</td><td>\(\mathcal{M}\)</td><td>62.7</td><td>-</td><td>-</td><td>63.1</td><td>-</td><td>-</td></tr><tr><td>Panoptic FPN [20]</td><td>ResNet-50</td><td>\(\mathcal{M}\)</td><td>65.7</td><td>64.5</td><td>90.8</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Panoptic FCN [28]</td><td>ResNet-50</td><td>\(\mathcal{M}\)</td><td>67.9</td><td>66.6</td><td>92.9</td><td>73.1</td><td>72.1</td><td>93.8</td></tr><tr><td>Panoptic SegFormer [29]</td><td>ResNet-50</td><td>\(\mathcal{M}\)</td><td>67.9</td><td>66.6</td><td>92.7</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Li et al. [25]</td><td>ResNet-101</td><td>\(\mathcal{B}+\mathcal{I}\)</td><td>59.0</td><td>-</td><td>-</td><td>59.5</td><td>-</td><td>-</td></tr><tr><td>JTSM [38]</td><td>ResNet-18-WS [39]</td><td>\(\mathcal{I}\)</td><td>39.0</td><td>37.1</td><td>77.7</td><td>-</td><td>-</td><td>-</td></tr><tr><td>PSPS [14]</td><td>ResNet-50</td><td>\(\mathcal{P}\)</td><td>49.8</td><td>47.8</td><td>89.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Panoptic FCN [28]</td><td>ResNet-50</td><td>\(\mathcal{P}_{10}\)</td><td>48.0</td><td>46.2</td><td>85.2</td><td>52.4</td><td>50.8</td><td>86.0</td></tr><tr><td>Point2Mask</td><td>ResNet-50</td><td>\(\mathcal{P}\)</td><td>53.8</td><td>51.9</td><td>90.5</td><td>60.7</td><td>59.1</td><td>91.8</td></tr><tr><td>Point2Mask</td><td>ResNet-101</td><td>\(\mathcal{P}\)</td><td>54.8</td><td>53.0</td><td>90.4</td><td>63.2</td><td>61.8</td><td>92.3</td></tr><tr><td>Point2Mask</td><td>Swin-L</td><td>\(\mathcal{P}\)</td><td>61.0</td><td>59.4</td><td>93.0</td><td>64.2</td><td>62.7</td><td>93.2</td></tr></table>
|
| 266 |
+
|
| 267 |
+
Table 1: Performance comparisons on Pascal VOC2012 val. $\mathcal{M}$ denotes the pixel-wise mask annotations. $\mathcal{P}$ and $\mathcal{P}_{10}$ are point-level supervision with 1 and 10 points per target, respectively. $\mathcal{I}$ and $\mathcal{B}$ are the image-level and box-level supervisions (the same below). Besides, VOC 2012 with COCO represents training and validation on VOC 2012 dataset with COCO pre-trained model.
|
| 268 |
+
|
| 269 |
+
<table><tr><td>Method</td><td>Backbone</td><td>Supervision</td><td>PQ</td><td>\( PQ^{th} \)</td><td>\( PQ^{st} \)</td><td>SQ</td><td>RQ</td></tr><tr><td>AdaptIS [41]</td><td>ResNet-50</td><td>M</td><td>35.9</td><td>40.3</td><td>29.3</td><td>-</td><td>-</td></tr><tr><td>Panoptic FPN [20]</td><td>ResNet-50</td><td>M</td><td>39.4</td><td>45.9</td><td>29.6</td><td>77.8</td><td>48.3</td></tr><tr><td>Panoptic-DeepLab [6]</td><td>Xception-71 [10]</td><td>M</td><td>39.7</td><td>43.9</td><td>33.2</td><td>-</td><td>-</td></tr><tr><td>Panoptic FCN [28]</td><td>ResNet-50</td><td>M</td><td>43.6</td><td>49.3</td><td>35.0</td><td>80.6</td><td>52.6</td></tr><tr><td>Panoptic SegFormer [29]</td><td>ResNet-50</td><td>M</td><td>48.0</td><td>52.3</td><td>41.5</td><td>-</td><td>-</td></tr><tr><td>Mask2Former [7]</td><td>ResNet-50</td><td>M</td><td>51.9</td><td>57.7</td><td>43.0</td><td>-</td><td>-</td></tr><tr><td>JTSM [38]</td><td>ResNet-18-WS</td><td>I</td><td>5.3</td><td>8.4</td><td>0.7</td><td>30.8</td><td>7.8</td></tr><tr><td>PSPS [14]</td><td>ResNet-50</td><td>P</td><td>29.3</td><td>29.3</td><td>29.4</td><td>-</td><td>-</td></tr><tr><td>Panoptic FCN [28]</td><td>ResNet-50</td><td>\( P_{10} \)</td><td>31.2</td><td>35.7</td><td>24.3</td><td>-</td><td>-</td></tr><tr><td>Point2Mask</td><td>ResNet-50</td><td>P</td><td>32.4</td><td>32.6</td><td>32.2</td><td>75.1</td><td>41.5</td></tr><tr><td>Point2Mask</td><td>ResNet-101</td><td>P</td><td>34.0</td><td>34.3</td><td>33.5</td><td>75.1</td><td>43.5</td></tr><tr><td>Point2Mask</td><td>Swin-L</td><td>P</td><td>37.0</td><td>37.0</td><td>36.9</td><td>75.8</td><td>47.2</td></tr></table>
|
| 270 |
+
|
| 271 |
+
Table 2: Panoptic segmentation results on COCO val2017. Weakly and fully supervised methods are compared.
|
| 272 |
+
|
| 273 |
+
PQ with single point supervision when ResNet-50 is employed as the backbone. It outperforms the previous SOTA method PSPS [14] by $3.1\%$ PQ, $3.3\%$ $\mathrm{PQ^{th}}$ and $2.8\%$ $\mathrm{PQ^{st}}$ under the same setting. Compared with Panoptic FCN [28] with 10 point labels, our approach surpasses it by $1.2\%$ PQ (32.4% vs. 31.2%). With Swin-L as the backbone, Point2Mask achieves $37.0\%$ PQ performance, which is comparable with some fully mask-supervised methods, including AdaptIS [41], Panoptic FPN [20] and Panoptic-DeepLab [6] with ResNet-50 backbone.
|
| 274 |
+
|
| 275 |
+
# 4.4. Ablation Studies
|
| 276 |
+
|
| 277 |
+
We analyze the design of each component in Point2Mask on Pascal VOC dataset.
|
| 278 |
+
|
| 279 |
+
Different Task-oriented Maps. We employ the category-wise semantic map $P^s$ , low-level and high-level boundary map $P_{low}^{b}$ , $P_{high}^{b}$ to calculate the cost for optimal transport. Table 3 shows the evaluation results with
|
| 280 |
+
|
| 281 |
+
different task-oriented maps. Our method achieves $50.6\%$ PQ using the $P^s$ map only, which focuses on the semantic logit differences among the categories. When $P_{low}^b$ and $P_{high}^b$ are employed separately, our method achieves $51.1\%$ PQ and $53.4\%$ PQ, respectively. More specifically, $P_{high}^b$ brings $+2.9\%$ PQ gains driven by the designed boundary loss function $\mathcal{L}_{bou}$ . When all maps are adopted, Point2Mask achieves the best performance of $53.8\%$ PQ.
|
| 282 |
+
|
| 283 |
+
Semantic Map Learning. Single point-supervised semantic parsing is the bedrock to obtain the panoptic segmentation results in our Point2Mask. As shown in Table 4, when both local LAB loss $\mathcal{L}_{sem}^{LAB}$ and long-range RGB loss $\mathcal{L}_{sem}^{RGB}$ are adopted for the semantic map learning, the best $69.5\%$ mIoU and $53.8\%$ PQ are obtained comparing to each individual loss term.
|
| 284 |
+
|
| 285 |
+
Different Unit Number Calculation Schemes. We explore three different schemes to calculate the unit number $k$ for $gt$ supplier, including "Equal Division", "Nearest $gt$
|
| 286 |
+
|
| 287 |
+
<table><tr><td>P^s</td><td>P^b_{low}</td><td>P^b_{high}</td><td>PQ</td><td>PQ^th</td><td>PQ^st</td></tr><tr><td>✓</td><td></td><td></td><td>50.6</td><td>48.7</td><td>90.1</td></tr><tr><td>✓</td><td>✓</td><td></td><td>51.1</td><td>49.1</td><td>90.3</td></tr><tr><td>✓</td><td></td><td>✓</td><td>53.4</td><td>51.6</td><td>90.3</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>53.8</td><td>51.9</td><td>90.5</td></tr></table>
|
| 288 |
+
|
| 289 |
+
Table 3: The impact of different task-oriented maps to calculate the pixel-to-gt point label cost $c_{ij}$ in OT.
|
| 290 |
+
|
| 291 |
+
<table><tr><td>\( \mathcal{L}_{\text{partial}} \)</td><td>\( \mathcal{L}_{\text{sem}}^{LAB} \)</td><td>\( \mathcal{L}_{\text{sem}}^{RGB} \)</td><td>mIoU</td><td>PQ</td><td>\( PQ^{th} \)</td><td>\( PQ^{st} \)</td></tr><tr><td>✓</td><td></td><td></td><td>61.6</td><td>40.4</td><td>38.1</td><td>86.1</td></tr><tr><td>✓</td><td>✓</td><td></td><td>69.0</td><td>51.2</td><td>49.3</td><td>90.0</td></tr><tr><td>✓</td><td></td><td>✓</td><td>68.0</td><td>49.5</td><td>47.5</td><td>89.3</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>69.5</td><td>53.8</td><td>51.9</td><td>90.5</td></tr></table>
|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
Figure 5: Visual comparisons on distance heatmap with different calculation schemes of $k$ . (a) shows the $gt$ point label and pixel-wise mask label. (b) indicates the heatmap based on the Nearest $gt$ Point scheme. (c) is the heatmap based on our proposed Nearest Centroid scheme. The corresponding shortest paths are shown for better illustration.
|
| 295 |
+
|
| 296 |
+
Point" and "Nearest Centroid". The Equal Division treats the mean value as $k$ for each $gt$ point supplier from all pixels. The Nearest $gt$ Point indicates that the total number of pixels are with the nearest distances measured by the cost for each $gt$ point. For simplicity, we denote the presented centroid-based unit number calculation scheme in Sec. 3.3.3 as the Nearest Centroid. Table 5 reports the comparison results. Our Nearest Centroid scheme obtains the best performance with $53.8\%$ PQ, which outperforms Equal Division and Nearest $gt$ Point by $1.4\%$ PQ and $1.0\%$ PQ, respectively. Furthermore, we report the visual comparisons on distance heatmap, as shown in Fig. 5. It can be clearly seen that the proposed Nearest Centroid scheme obtains the accurate unit number $k$ for each $gt$ point supplier.
|
| 297 |
+
|
| 298 |
+
In addition, as shown in Table 6, the Nearest Centroid scheme with more iterations (8 iterations) can bring a performance gain of $+0.48\%$ PQ. With 10 iterations, the model achieves the saturated performance with $54.07\%$ PQ.
|
| 299 |
+
|
| 300 |
+
Different Pseudo-mask Generation Methods. To ex
|
| 301 |
+
|
| 302 |
+
Table 4: Comparison of different weakly-supervised loss terms for category-wise semantic map learning.
|
| 303 |
+
|
| 304 |
+
<table><tr><td>Scheme</td><td>PQ</td><td>PQth</td><td>PQst</td></tr><tr><td>Equal Division</td><td>52.4</td><td>50.5</td><td>90.2</td></tr><tr><td>Nearest gt Point</td><td>52.8</td><td>50.9</td><td>90.1</td></tr><tr><td>Nearest Centriod</td><td>53.8</td><td>51.9</td><td>90.5</td></tr></table>
|
| 305 |
+
|
| 306 |
+
Table 5: Performance with different calculation schemes of $k$ for our defined OT problem in Point2Mask.
|
| 307 |
+
|
| 308 |
+
<table><tr><td>Iterations</td><td>1</td><td>2</td><td>4</td><td>8</td><td>10</td></tr><tr><td>PQ</td><td>53.76</td><td>53.80</td><td>53.91</td><td>54.24</td><td>54.07</td></tr></table>
|
| 309 |
+
|
| 310 |
+
Table 6: Performance with various iterations in centroid updating of the Nearest Centroid scheme.
|
| 311 |
+
|
| 312 |
+
<table><tr><td>Method</td><td>PQ</td><td>PQth</td><td>PQst</td></tr><tr><td>Minimum Cost</td><td>51.9</td><td>50.1</td><td>90.2</td></tr><tr><td>Optimal Transport</td><td>54.2(↑2.3)</td><td>52.4(↑2.3)</td><td>90.3(↑0.1)</td></tr></table>
|
| 313 |
+
|
| 314 |
+
Table 7: Comparisons between Minimum Cost (MC) and Optimal Transport (OT) based on the defined cost for pseudo-mask label generation.
|
| 315 |
+
|
| 316 |
+
amine the effectiveness of our proposed OT-based scheme, we study the different methods on pseudo-mask generation in Point2Mask. Based on the presented cost on the task-oriented maps, we compared OT with the direct minimum cost (MC) method. Similar to [14], MC assigns the $gt$ point label to each pixel with its corresponding minimum cost individually. Table 7 shows the comparison results. Point2Mask with our proposed OT method outperforms the MC scheme by $+2.3\%$ PQ. Specifically, the performance gains mainly stem from the thing-based targets $(+2.3\% \mathrm{PQ}^{th}$ vs. $+0.1\% \mathrm{PQ}^{st}$ ). This is because it takes consideration of the global optimization in dealing with the ambiguous locations, like the border pixels between different thing-based targets with the same category.
|
| 317 |
+
|
| 318 |
+
# 5. Conclusion
|
| 319 |
+
|
| 320 |
+
An effective single point-supervised panoptic segmentation approach, namely Point2Mask, was presented. The accurate pseudo-mask was obtained by finding the optimal transport plan at a globally minimal transportation cost, which was defined according to the task-oriented maps. Moreover, an effective centroid-based scheme was introduced to obtain the accurate unit number for each $gt$ point supplier. Extensive experiments were conducted on Pascal VOC and COCO benchmarks, validating the leading performance of the proposed Point2Mask over the previous state-of-the-arts on point-supervised panoptic segmentation.
|
| 321 |
+
|
| 322 |
+
# Acknowledgments
|
| 323 |
+
|
| 324 |
+
This work is supported by National Natural Science Foundation of China under Grants (61831015). Corresponding author is Jianke Zhu.
|
| 325 |
+
|
| 326 |
+
# References
|
| 327 |
+
|
| 328 |
+
[1] Jiwoon Ahn, Sunghyun Cho, and Suha Kwak. Weakly supervised learning of instance segmentation with inter-pixel relations. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 2209-2218, 2019.
|
| 329 |
+
[2] Amy Bearman, Olga Russakovsky, Vittorio Ferrari, and Li Fei-Fei. What's the point: Semantic segmentation with point supervision. In Proc. Eur. Conf. Comp. Vis., pages 549-565. Springer, 2016.
|
| 330 |
+
[3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In Proc. Eur. Conf. Comp. Vis., pages 213-229. Springer, 2020.
|
| 331 |
+
[4] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019.
|
| 332 |
+
[5] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. Uniter: Universal image-text representation learning. In Proc. Eur. Conf. Comp. Vis., pages 104-120. Springer, 2020.
|
| 333 |
+
[6] Bowen Cheng, Maxwell D Collins, Yukun Zhu, Ting Liu, Thomas S Huang, Hartwig Adam, and Liang-Chieh Chen. Panoptic-deeplab: A simple, strong, and fast baseline for bottom-up panoptic segmentation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 12475-12485, 2020.
|
| 334 |
+
[7] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 1290–1299, 2022.
|
| 335 |
+
[8] Bowen Cheng, Omkar Parkhi, and Alexander Kirillov. Pointly-supervised instance segmentation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 2617-2626, 2022.
|
| 336 |
+
[9] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. In Proc. Advances in Neural Inf. Process. Syst., volume 34, pages 17864-17875, 2021.
|
| 337 |
+
[10] François Chollet. Xception: Deep learning with depthwise separable convolutions. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 1251-1258, 2017.
|
| 338 |
+
[11] Marco Cuturi. Sinkhorn distances: Lightspeed computation of optimal transport. In Proc. Advances in Neural Inf. Process. Syst., volume 26, 2013.
|
| 339 |
+
[12] Piotr Dólár and C Lawrence Zitnick. Structured forests for fast edge detection. In Proc. IEEE Int. Conf. Comp. Vis., pages 1841-1848, 2013.
|
| 340 |
+
[13] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. Int. J. Comput. Vision, 88(2):303-338, 2010.
|
| 341 |
+
[14] Junsong Fan, Zhaoxiang Zhang, and Tieniu Tan. Pointly-supervised panoptic segmentation. In Proc. Eur. Conf. Comp. Vis., pages 319-336. Springer, 2022.
|
| 342 |
+
[15] Zheng Ge, Songtao Liu, Zeming Li, Osamu Yoshie, and Jian Sun. Ota: Optimal transport assignment for object detection. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 303-312, 2021.
|
| 343 |
+
|
| 344 |
+
[16] Bharath Hariharan, Pablo Arbeláez, Lubomir Bourdev, Subhransu Maji, and Jitendra Malik. Semantic contours from inverse detectors. In Proc. IEEE Int. Conf. Comp. Vis., pages 991-998, 2011.
|
| 345 |
+
[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 770-778, 2016.
|
| 346 |
+
[18] Tsung-Wei Ke, Jyh-Jing Hwang, and Stella X Yu. Universal weakly supervised segmentation by pixel-to-segment contrastive learning. In Proc. Int. Conf. Learning Represent., 2021.
|
| 347 |
+
[19] Alexander Kirillov, Ross Girshick, Kaiming He, and Piotr Dóllár. Panoptic feature pyramid networks. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 6399-6408, 2019.
|
| 348 |
+
[20] Alexander Kirillov, Ross Girshick, Kaiming He, and Piotr Dólar. Panoptic feature pyramid networks. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 6399-6408, 2019.
|
| 349 |
+
[21] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023.
|
| 350 |
+
[22] Shiyi Lan, Zhiding Yu, Christopher Choy, Subhashree Radhakrishnan, Guilin Liu, Yuke Zhu, Larry S Davis, and Anima Anandkumar. Discobox: Weakly supervised instance segmentation and semantic correspondence from box supervision. In Proc. IEEE Int. Conf. Comp. Vis., pages 3406-3416, 2021.
|
| 351 |
+
[23] Feng Li, Hao Zhang, Shilong Liu, Lei Zhang, Lionel M Ni, Heung-Yeung Shum, et al. Mask dino: Towards a unified transformer-based framework for object detection and segmentation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., 2023.
|
| 352 |
+
[24] Mengxue Li, Yi-Ming Zhai, You-Wei Luo, Peng-Fei Ge, and Chuan-Xian Ren. Enhanced transport distance for unsupervised domain adaptation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 13936-13944, 2020.
|
| 353 |
+
[25] Qizhu Li, Anurag Arnab, and Philip HS Torr. Weakly-and semi-supervised panoptic segmentation. In Proc. Eur. Conf. Comp. Vis., pages 102-118, 2018.
|
| 354 |
+
[26] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, Xian-Sheng Hua, and Lei Zhang. Box-supervised instance segmentation with level set evolution. In Proc. Eur. Conf. Comp. Vis., pages 1-18. Springer, 2022.
|
| 355 |
+
[27] Wentong Li, Wenyu Liu, Jianke Zhu, Miaomiao Cui, Risheng Yu, Xiansheng Hua, and Lei Zhang. Box2mask: Box-supervised instance segmentation via level-set evolution. arXiv preprint arXiv:2212.01579, 2022.
|
| 356 |
+
[28] Yanwei Li, Hengshuang Zhao, Xiaojuan Qi, Yukang Chen, Lu Qi, Liwei Wang, Zeming Li, Jian Sun, and Jiaya Jia. Fully convolutional networks for panoptic segmentation with point-based supervision. IEEE Trans. Pattern Anal. Mach. Intell., 2022.
|
| 357 |
+
[29] Zhiqi Li, Wenhai Wang, Enze Xie, Zhiding Yu, Anima Anandkumar, Jose M Alvarez, Ping Luo, and Tong Lu. Panoptic segformer: Delving deeper into panoptic segmentation with transformers. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 1280-1289, 2022.
|
| 358 |
+
|
| 359 |
+
[30] Zhiyuan Liang, Tiancai Wang, Xiangyu Zhang, Jian Sun, and Jianbing Shen. Tree energy loss: Towards sparsely annotated semantic segmentation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 16907-16916, 2022.
|
| 360 |
+
[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Proc. Eur. Conf. Comp. Vis., pages 740-755. Springer, 2014.
|
| 361 |
+
[32] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proc. IEEE Int. Conf. Comp. Vis., pages 10012-10022, 2021.
|
| 362 |
+
[33] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In Proc. Int. Conf. Learning Represent., 2019.
|
| 363 |
+
[34] Svetlozar T Rachev. The monge-kantorovich mass transference problem and its stochastic applications. Theory of Probability & Its Applications, 29(4):647-676, 1985.
|
| 364 |
+
[35] Yossi Rubner, Carlo Tomasi, and Leonidas J Guibas. A metric for distributions with applications to image databases. In Proc. IEEE Int. Conf. Comp. Vis., pages 59-66. IEEE, 1998.
|
| 365 |
+
[36] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. Int. J. Comput. Vision, 115(3):211-252, 2015.
|
| 366 |
+
[37] Wei Shen, Zelin Peng, Xuehui Wang, Huayu Wang, Jiazhong Cen, Dongsheng Jiang, Lingxi Xie, Xiaokang Yang, and Q Tian. A survey on label-efficient deep image segmentation: Bridging the gap between weak supervision and dense prediction. IEEE Trans. Pattern Anal. Mach. Intell., 2023.
|
| 367 |
+
[38] Yunhang Shen, Liujuan Cao, Zhiwei Chen, Feihong Lian, Baochang Zhang, Chi Su, Yongjian Wu, Feiyue Huang, and Rongrong Ji. Toward joint thing-and-stuff mining for weakly supervised panoptic segmentation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 16694-16705, 2021.
|
| 368 |
+
[39] Yunhang Shen, Rongrong Ji, Yan Wang, Zhiwei Chen, Feng Zheng, Feiyue Huang, and Yunsheng Wu. Enabling deep residual networks for weakly supervised object detection. In Proc. Eur. Conf. Comp. Vis., pages 118-136. Springer, 2020.
|
| 369 |
+
[40] Richard Sinkhorn and Paul Knopp. Concerning nonnegative matrices and doubly stochastic matrices. Pacific Journal of Mathematics, 21(2):343-348, 1967.
|
| 370 |
+
[41] Konstantin Sofiuk, Olga Barinova, and Anton Konushin. Adaptis: Adaptive instance selection network. In Proc. IEEE Int. Conf. Comp. Vis., pages 7355-7363, 2019.
|
| 371 |
+
[42] Chufeng Tang, Lingxi Xie, Gang Zhang, Xiaopeng Zhang, Qi Tian, and Xiaolin Hu. Active pointly-supervised instance segmentation. In Proc. Eur. Conf. Comp. Vis., pages 606-623. Springer, 2022.
|
| 372 |
+
[43] Meng Tang, Abdelaziz Djelouah, Federico Perazzi, Yuri Boykov, and Christopher Schroers. Normalized cut loss for weakly-supervised cnn segmentation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 1818-1827, 2018.
|
| 373 |
+
[44] Zhi Tian, Chunhua Shen, Xinlong Wang, and Hao Chen. Boxinst: High-performance instance segmentation with box annotations. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 5443-5452, 2021.
|
| 374 |
+
|
| 375 |
+
[45] Huiyu Wang, Yukun Zhu, Hartwig Adam, Alan Yuille, and Liang-Chieh Chen. Max-deeplab: End-to-end panoptic segmentation with mask transformers. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 5463-5474, 2021.
|
| 376 |
+
[46] Yuwen Xiong, Renjie Liao, Hengshuang Zhao, Rui Hu, Min Bai, Ersin Yumer, and Raquel Urtasun. Upsnet: A unified panoptic segmentation network. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 8818-8826, 2019.
|
| 377 |
+
[47] Xue Yang, Gefan Zhang, Wentong Li, Xuehui Wang, Yue Zhou, and Junchi Yan. H2rbox: Horizontal box annotation is all you need for oriented object detection. 2023.
|
| 378 |
+
[48] Qihang Yu, Huiyu Wang, Dahun Kim, Siyuan Qiao, Maxwell Collins, Yukun Zhu, Hartwig Adam, Alan Yuille, and Liang-Chieh Chen. Cmt-deeplab: Clustering mask transformers for panoptic segmentation. In Proc. IEEE Conf. Comp. Vis. Patt. Recogn., pages 2560-2570, 2022.
|
| 379 |
+
[49] Bingfeng Zhang, Jimin Xiao, Jianbo Jiao, Yunchao Wei, and Yao Zhao. Affinity attention graph neural network for weakly supervised semantic segmentation. IEEE Trans. Pattern Anal. Mach. Intell., 2021.
|
| 380 |
+
[50] Wenwei Zhang, Jiangmiao Pang, Kai Chen, and Chen Change Loy. K-net: Towards unified image segmentation. In Proc. Advances in Neural Inf. Process. Syst., volume 34, pages 10326-10338, 2021.
|
| 381 |
+
|
| 382 |
+
# Appendix
|
| 383 |
+
|
| 384 |
+
# A. Sinkhorn Iteration
|
| 385 |
+
|
| 386 |
+
The transport solver involves the resolution of a linear program in polynomial time. In our OT-based approach, the dimension of pixel samples can be as high as the square of hundreds. To efficiently tackle such a large-scale transport problem, we adopt the Sinkhorn Iteration method [11, 15], which computes the OT problem through the Sinkhorn's matrix scaling algorithm.
|
| 387 |
+
|
| 388 |
+
The Sinkhorn Iteration converts the OT optimization target into a non-linear but convex form with an entropic regularization term $R$ , which can be formulated as below:
|
| 389 |
+
|
| 390 |
+
$$
|
| 391 |
+
\min _ {\Gamma_ {i j} \in \Gamma} \sum_ {i, j = 1} ^ {m, n} \Gamma_ {i j} c _ {i j} + \lambda R (\Gamma_ {i j}), \tag {9}
|
| 392 |
+
$$
|
| 393 |
+
|
| 394 |
+
where $R(\Gamma_{ij}) = \Gamma_{ij} (\log \Gamma_{ij} - 1)$ , and $\lambda$ is a regularization coefficient. According to the Sinkhorn-Knopp Iteration method [11, 40], $v_i$ and $u_j$ are introduced for updating the solution:
|
| 395 |
+
|
| 396 |
+
$$
|
| 397 |
+
u _ {j} ^ {t + 1} = \frac {y _ {j}}{\sum_ {i} K _ {i j} v _ {i} ^ {t}}, \quad v _ {i} ^ {t + 1} = \frac {x _ {i}}{\sum_ {j} K _ {i j} u _ {j} ^ {t + 1}}, \tag {10}
|
| 398 |
+
$$
|
| 399 |
+
|
| 400 |
+
where $K_{ij} = e^{(-c_{ij} / \lambda)}$ . After performing the iteration for $T$ times, the optimal plan $\Gamma$ can be obtained as:
|
| 401 |
+
|
| 402 |
+
$$
|
| 403 |
+
\Gamma = \operatorname {d i a g} (u) K \operatorname {d i a g} (v). \tag {11}
|
| 404 |
+
$$
|
| 405 |
+
|
| 406 |
+
# B. Semantic Map Learning
|
| 407 |
+
|
| 408 |
+
The local LAB affinity and the long-range RGB affinity are integrated to generate the accurate semantic map $P^{s}$ for the unlabeled regions. In the following, we introduce the two loss terms in detail.
|
| 409 |
+
|
| 410 |
+
Local LAB Loss. As in [44], the local LAB loss $\mathcal{L}_{sem}^{LAB}$ explores the color similarity $S_{LAB}$ in LAB color space of the input image with the local kernel. $S_{LAB}$ is defined as:
|
| 411 |
+
|
| 412 |
+
$$
|
| 413 |
+
\mathcal {S} _ {L A B} = \mathcal {S} \left(r _ {i}, r _ {j}\right) = \exp \left(- \frac {\left\| r _ {i} - r _ {j} \right\|}{\theta_ {1}}\right), \tag {12}
|
| 414 |
+
$$
|
| 415 |
+
|
| 416 |
+
where $r_i$ is the LAB color value of pixel $i$ and $\mathcal{N}_8(i)$ denotes its eight local neighbors. $\theta_{1}$ is the constant parameter. The $\mathcal{L}_{sem}^{LAB}$ loss term is formulated as follows:
|
| 417 |
+
|
| 418 |
+
$$
|
| 419 |
+
\mathcal {L} _ {s e m} ^ {L A B} = - \frac {1}{z _ {1}} \sum_ {i = 1} ^ {n} \sum_ {j \in \mathcal {N} _ {8} (i)} \mathbb {1} _ {\left\{\mathcal {S} _ {i, j} ^ {L A B} \geq \tau \right\}} \log P _ {i} ^ {s T} P _ {j} ^ {s}, \tag {13}
|
| 420 |
+
$$
|
| 421 |
+
|
| 422 |
+
where $z_{1} = \sum_{i = 1}^{n}\sum_{j\in \mathcal{N}_{8}(i)}\mathbb{1}_{\{\mathcal{S}_{i,j}^{LAB}\geq \tau \}}\cdot \mathbb{1}_{\{\mathcal{S}_{i,j}^{LAB}\geq \tau \}}$ is the indicator function, being 1 if $\mathcal{S}_{i,j}^{LAB}\geq \tau$ and 0 otherwise. As in [44], $\tau$ is set to 0.3 and $\theta_{1}$ is set to 2 by default.
|
| 423 |
+
|
| 424 |
+
Long-range RGB Loss. Similar to [30], the long-range RGB loss $\mathcal{L}_{sem}^{RGB}$ absorbs the global pixel affinity in RGB space. Each pixel in the input image can be constructed by the global RGB pixel similarity $S_{RGB}$ through the minimum spanning tree (MST) algorithm. The pixel similarity $S_{RGB}$ in each tree-connected edge $\mathbb{E}$ is defined as follows:
|
| 425 |
+
|
| 426 |
+
$$
|
| 427 |
+
\mathcal {S} _ {R G B} = \mathcal {S} (r _ {i}, r _ {j}) _ {(l, k) \in \mathbb {E} (i, j)} = \exp \left(- \frac {\sum \| r _ {l} - r _ {k} \| ^ {2}}{\theta_ {2}}\right), \tag {14}
|
| 428 |
+
$$
|
| 429 |
+
|
| 430 |
+
where $r_i$ is the RGB pixel value of pixel $i$ . $l$ and $k$ are the adjacent pixels in the tree-connected edge $\mathbb{E}_{i,j}$ . Like $\theta_{1}, \theta_{2}$ is a constant value, which is set to 0.02 by default. The $\mathcal{L}_{RGB}^{sem}$ loss term is defined as:
|
| 431 |
+
|
| 432 |
+
$$
|
| 433 |
+
\mathcal {L} _ {s e m} ^ {R G B} = - \frac {1}{n} \sum_ {i = 1} ^ {n} \left| P _ {i} ^ {s} - \frac {1}{z _ {2}} \sum_ {\forall j \in \Omega} \mathcal {S} _ {i, j} ^ {R G B} P _ {j} ^ {s} \right|, \tag {15}
|
| 434 |
+
$$
|
| 435 |
+
|
| 436 |
+
where $z_{2} = \sum_{j}\mathcal{S}_{i,j}^{RGB}$ , $\Omega$ denotes the set of pixels in $P^s$ .
|
| 437 |
+
|
| 438 |
+
# C. Additional Results
|
| 439 |
+
|
| 440 |
+
# C.1. Performance on Multiple Point Labels
|
| 441 |
+
|
| 442 |
+
To further investigate the effectiveness of our approach with multiple point labels, we conduct the experiments with ten-points annotation per target. The results of fully mask-supervised and single point-supervised methods are also listed as reference. As shown in Table A1, we compare Point2Mask with the state-of-the-art methods, including Panoptic FCN [28] and PSPs [14] with ten-points labels on Pascal VOC and COCO datasets. With ResNet-50 backbone, Point2Mask outperforms Panoptic FCN [28] by $11.1\%$ PQ $(59.1\%$ vs. $48.0\%)$ on Pascal VOC and $4.0\%$ PQ $(31.2\%$ vs. $35.2\%)$ on COCO. Compared with PSPs [14], Point2Mask surpasses PSPs [14] by $2.5\%$ PQ and $2.1\%$ PQ on Pascal VOC and COCO, respectively. Furthermore, Point2Mask achieves more competitive performance with $60.2\%$ PQ on Pascal VOC and $36.7\%$ PQ on COCO using ResNet-101 backbone.
|
| 443 |
+
|
| 444 |
+
# C.2. Hyper-parameter Selection in OT
|
| 445 |
+
|
| 446 |
+
We perform the following experiments to examine the impact of hyper-parameters in our OT-based method.
|
| 447 |
+
|
| 448 |
+
Different Number of Sinkhorn Iterations. We perform Sinkhorn Iteration with different number of iterations to solve the OT problem. Table A2 reports the panoptic segmentation results. When the iteration number is set to 80, Point2Mask achieves the best performance with $53.8\%$ PQ.
|
| 449 |
+
|
| 450 |
+
Impact of $\beta$ . In our paper, $\beta$ in Eq. 3 indicates the importance of boundary map $P^b$ to calculate the pixel-to-gt cost $c_{i,j}$ . Table A3 shows the results with different values of $\beta$ . When $\beta = 0.1$ , Point2Mask obtains the best performance. This indicates that the cost from instance-wise
|
| 451 |
+
|
| 452 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Backbone</td><td rowspan="2">Supervision</td><td colspan="3">VOC 2012</td><td colspan="3">COCO</td></tr><tr><td>PQ</td><td>\( PQ^{th} \)</td><td>\( PQ^{st} \)</td><td>PQ</td><td>\( PQ^{th} \)</td><td>\( PQ^{st} \)</td></tr><tr><td>Panoptic FPN [20]</td><td>ResNet-50</td><td>M</td><td>65.7</td><td>64.5</td><td>90.8</td><td>41.5</td><td>48.3</td><td>31.2</td></tr><tr><td>Panoptic FCN [28]</td><td>ResNet-50</td><td>M</td><td>67.9</td><td>66.6</td><td>92.9</td><td>43.6</td><td>49.3</td><td>35.0</td></tr><tr><td>Panoptic SegFormer [29]</td><td>ResNet-50</td><td>M</td><td>67.9</td><td>66.6</td><td>92.7</td><td>48.0</td><td>52.3</td><td>41.5</td></tr><tr><td>PSPS [14]</td><td>ResNet-50</td><td>P</td><td>49.8</td><td>47.8</td><td>89.5</td><td>29.3</td><td>29.3</td><td>29.4</td></tr><tr><td>Point2Mask (Ours)</td><td>ResNet-50</td><td>P</td><td>54.2</td><td>52.4</td><td>90.3</td><td>32.4</td><td>32.6</td><td>32.2</td></tr><tr><td>Panoptic FCN [28]</td><td>ResNet-50</td><td>\( P_{10} \)</td><td>48.0</td><td>46.2</td><td>85.2</td><td>31.2</td><td>35.7</td><td>24.3</td></tr><tr><td>PSPS [14]</td><td>ResNet-50</td><td>\( P_{10} \)</td><td>56.6</td><td>54.8</td><td>91.4</td><td>33.1</td><td>33.6</td><td>32.2</td></tr><tr><td>Point2Mask (Ours)</td><td>ResNet-50</td><td>\( P_{10} \)</td><td>59.1</td><td>57.5</td><td>91.8</td><td>35.2</td><td>36.1</td><td>34.0</td></tr><tr><td>Point2Mask (Ours)</td><td>ResNet-101</td><td>\( P_{10} \)</td><td>60.2</td><td>58.6</td><td>92.1</td><td>36.7</td><td>37.3</td><td>35.7</td></tr></table>
|
| 453 |
+
|
| 454 |
+
Table A1: Performance comparison on Pascal VOC val and COCO val2017. $\mathcal{M}$ is pixel-wise mask label. $\mathcal{P}$ and $\mathcal{P}_{10}$ denote 1 and 10 point labels per target, respectively. The results with $\mathcal{M}$ and $\mathcal{P}$ supervision are listed as reference to illustrate the performance with 10 point labels.
|
| 455 |
+
|
| 456 |
+
<table><tr><td>Iter. Num.</td><td>PQ</td><td>PQth</td><td>PQst</td></tr><tr><td>40</td><td>53.0</td><td>51.2</td><td>90.1</td></tr><tr><td>60</td><td>53.5</td><td>51.7</td><td>90.1</td></tr><tr><td>80</td><td>53.8</td><td>51.9</td><td>90.5</td></tr><tr><td>100</td><td>52.7</td><td>50.8</td><td>90.1</td></tr><tr><td>120</td><td>52.2</td><td>50.3</td><td>90.2</td></tr></table>
|
| 457 |
+
|
| 458 |
+
Table A2: The results with different number of iterations in the Sinkhorn Iteration.
|
| 459 |
+
|
| 460 |
+
<table><tr><td>β</td><td>PQ</td><td>PQth</td><td>PQst</td></tr><tr><td>1.0</td><td>52.3</td><td>50.4</td><td>90.2</td></tr><tr><td>0.5</td><td>52.4</td><td>50.5</td><td>90.2</td></tr><tr><td>0.2</td><td>52.8</td><td>50.9</td><td>90.3</td></tr><tr><td>0.1</td><td>53.8</td><td>51.9</td><td>90.5</td></tr><tr><td>0.05</td><td>53.1</td><td>51.2</td><td>90.1</td></tr><tr><td>0.01</td><td>51.9</td><td>50.0</td><td>89.6</td></tr></table>
|
| 461 |
+
|
| 462 |
+
Table A3: Results with different values of $\beta$ in Eq. 3.
|
| 463 |
+
|
| 464 |
+
boundary map $P^b$ plays a complementary role to the main cost term based on $P^s$ . Furthermore, the visual examples of learnt high-level boundary $P_{high}^b$ are shown in Fig. A1.
|
| 465 |
+
|
| 466 |
+
# C.3. More Visualization Results
|
| 467 |
+
|
| 468 |
+
To further illustrate the performance of our single point-supervised approach, we give more visualization results.
|
| 469 |
+
|
| 470 |
+
Fig. A2 shows the qualitative comparison with the state-of-the-art method PSPS [14]. It can be seen that our proposed Point2Mask approach is able to find the ambiguous locations of nearby instances precisely. This demonstrates that our OT-based approach can discriminate the thing-based targets with the accurate boundaries. In addition, Fig. A3 provides the panoptic segmentation results of Point2Mask on general COCO and Pascal VOC datasets.
|
| 471 |
+
|
| 472 |
+
# D. Discussion
|
| 473 |
+
|
| 474 |
+
Differences against the existing works. Like previous weakly-supervised methods [14, 44, 27, 26], our method
|
| 475 |
+
|
| 476 |
+

|
| 477 |
+
Figure A1: Visual examples of high-level boundary map. The accurate boundary for thing-based objects can be learnt.
|
| 478 |
+
|
| 479 |
+
aims to achieve high-quality segmentation with the label-efficient sparse labels, which is different from the existing promptable segmentation model [21] with a large amount of data and the corresponding mask labels.
|
| 480 |
+
|
| 481 |
+
We adopt the same base architecture as PSPS [14], i.e., generating pseudo labels firstly and then training the panoptic segmentation branch. To generate the panoptic pseudo labels, both our method and PSPs [14] employ the category-wise and instance-wise representations. For category-wise representation, we firstly employ the local LAB and long-range RGB pixel similarities (Sec.3.4.1), instead of the local LAB semantic parsing only as in [14]. Secondly, for instance-wise representation, we adopt the boundary map and define different distance functions. Compared with the high-level manifold cues in [14], the boundary map is more suitable for the shortest path-based implementation to calculate the instance-wise differences. More importantly, the key difference lies in the presented OT formulation for global assignment to generate more accurate mask labels.
|
| 482 |
+
|
| 483 |
+
Limitations. For the dense objects with the same categories, such as in autonomous driving and remote sensing scenarios, the proposed method may not perform well with the supervision of only a single point label. Better performance can be obtained by adopting the more powerful segmentation network, like Mask2Former [7] and MaskDINO [23], into our method.
|
| 484 |
+
|
| 485 |
+

|
| 486 |
+
Figure A2: Qualitative comparisons on Pascal VOC. The left two columns show that Point2Mask can precisely discriminate the nearby instances of the same category. The right two columns indicate that Point2Mask can obtain more fine-grained boundaries.
|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
Figure A3: Visual examples of panoptic segmentation by our Point2Mask with single point label per target on COCO and Pascal VOC datasets.
|
data/2023/2308_01xxx/2308.01779/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f6141c3be5af714436afb824dd80e9d01651c4a3db6cf52a89b78b8d15120109
|
| 3 |
+
size 1370335
|
data/2023/2308_01xxx/2308.01779/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01825/63f7c919-8b1b-4e16-882d-40fa30d5fc3e_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01825/63f7c919-8b1b-4e16-882d-40fa30d5fc3e_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01825/63f7c919-8b1b-4e16-882d-40fa30d5fc3e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b9d060dd44a5dca64693897640a4027528540d274599498a155eb18d8a260e3
|
| 3 |
+
size 3102856
|
data/2023/2308_01xxx/2308.01825/full.md
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SCALING RELATIONSHIP ON LEARNING MATHEMATICAL REASONING WITH LARGE LANGUAGE MODELS
|
| 2 |
+
|
| 3 |
+
Zheng Yuan*, Hongyi Yuan\*†, Chengpeng Li†, Guanting Dong†, Keming Lu Chuanqi Tan, Chang Zhou, Jingren Zhou
|
| 4 |
+
|
| 5 |
+
Alibaba DAMO Academy
|
| 6 |
+
|
| 7 |
+
{yuanzheng.yuanzhen,yuanhongyi.yhy}@alibaba-inc.com
|
| 8 |
+
|
| 9 |
+
{lichengpeng.lcp,dongguanting.dgt,lukeming.lkm}@alibaba-inc.com
|
| 10 |
+
|
| 11 |
+
{chuanqi.tcq,ericzhou.zc,jingren.zhou}@alibaba-inc.com
|
| 12 |
+
|
| 13 |
+
# ABSTRACT
|
| 14 |
+
|
| 15 |
+
Mathematical reasoning is a challenging task for large language models (LLMs), while the scaling relationship of it with respect to LLM capacity is under-explored. In this paper, we investigate how the pre-training loss, supervised data amount, and augmented data amount influence the reasoning performances of a supervised LLM. We find that pre-training loss is a better indicator of the model's performance than the model's parameter count. We apply supervised fine-tuning (SFT) with different amounts of supervised data and empirically find a log-linear relation between data amount and model performance, and we find better models improve less with enlarged supervised datasets. To augment more data samples for improving model performances without any human effort, we propose to apply Rejection sampling Fine-Tuning (RFT). RFT uses supervised models to generate and collect correct reasoning paths as augmented fine-tuning datasets. We find with augmented samples containing more distinct reasoning paths, RFT improves mathematical reasoning performance more for LLMs. We also find RFT brings more improvement for less performant LLMs. Furthermore, we combine rejection samples from multiple models which push LLaMA-7B to an accuracy of $49.3\%$ on GSM8K which outperforms the supervised fine-tuning (SFT) accuracy of $35.9\%$ significantly. We release our codes and rejection sampling augmented data in https://github.com/OFA-Sys/gsm8k-ScRel.
|
| 16 |
+
|
| 17 |
+
# 1 INTRODUCTION
|
| 18 |
+
|
| 19 |
+
Large language models (LLMs) (Anil et al., 2023; Touvron et al., 2023b; OpenAI, 2023) have shown considerable abilities in various math reasoning tasks (Saxton et al., 2019; Cobbe et al., 2021; Lightman et al., 2023). It is of interest to understand, predict, and improve an LLM's math reasoning ability based on different pre-trained LLMs and supervised datasets. With this knowledge, we can better decide the effort we put into improving the LLM or augmenting the dataset. Many recent works are focusing on using different prompts (Wei et al., 2022b; Yao et al., 2023) or assembling / reranking multiple times of inferences (Cobbe et al., 2021; Uesato et al., 2022; Wang et al., 2023; Lightman et al., 2023) to improve models' reasoning performances. While in-context learning (ICL) and performing multiple inferences can improve performance, it is computationally expensive and not suitable for online deployment scenarios. Therefore, we focus on the performance of the supervised LLMs with inference only once which is a setting closer to online deployment.
|
| 20 |
+
|
| 21 |
+
To this end, we empirically investigate the scaling relationship of factors that influence the math reasoning abilities of a supervised LLM, including pre-training losses, the amount of supervised data, and the amount of augmented data. Firstly, we analyze the supervised fine-tuning (SFT) and ICL performance of LLMs. We observe that the pre-training loss is approximately negatively linear correlated to the SFT and ICL accuracy in a given interval which is a better performance indicator than pre-trained model sizes or pre-trained token counts. Secondly, we analyze the relationship
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: The key findings of scaling relationship on learning math reasoning ability with LLMs.
|
| 25 |
+
|
| 26 |
+
between SFT and different amounts of supervised data. We observe that the model performance has a log-linear relation versus the supervised data amount while the increase diminishes with the better pre-trained model. Thirdly, we want to leverage the model itself to generate more supervised data to reinforce its reasoning ability and analyze the scaling relationship of the augmented data amount. We apply rejection sampling on SFT models to sample and select correct reasoning paths as augmented dataset (Uesato et al., 2022; Zhu et al., 2023). We use these augmented datasets to fine-tune base LLMs which would achieve better performances compared to SFT and we denote it as rejection sampling fine-tuning (RFT). We find the key factor influencing RFT performance is the distinct reasoning path amount which can be increased by sampling more times or combing samples from multiple models. We apply RFT on several pre-trained LLMs and show larger improvement on less performant models. We discuss the reason RFT works is it provides multiple reasoning paths which makes LLMs have better reasoning generalization. We also discuss that RFT is much cheaper than pre-training in computational resources while training an LLM with lower pre-training loss is the fundamental solution.
|
| 27 |
+
|
| 28 |
+
The key findings of this paper are shown in Figure 1 and are summarized here:
|
| 29 |
+
|
| 30 |
+
- When the pre-training loss gets smaller (i.e. the pre-trained model gets better), the model reasoning performances of SFT and ICL increase linearly within a range. The SFT performance improves slower than ICL.
|
| 31 |
+
- SFT improves in a log-linear manner with the increase of supervised data amount. The benefits of increasing data amount diminish as the pre-trained model gets better.
|
| 32 |
+
- The model performance for RFT improves as the distinct reasoning path amount increases. The RFT performance improves slower than SFT.
|
| 33 |
+
- The combination of rejection sampling samples from multiple models further enhances the RFT performance, resulting in an accuracy of 49.3 for LLaMA-7B (+13.4 compared to SFT), 50.3 for LLaMA2-7B (+8.7 compared to SFT), 52.1 for LLaMA-13B (+9.1 compared to SFT), and 55.4 for LLaMA2-13B (+5.4 compared to SFT).
|
| 34 |
+
|
| 35 |
+
# 2 RELATED WORKS
|
| 36 |
+
|
| 37 |
+
Learning Math Reasoning with LLMs Recent research on LLMs has discovered the emergent ability to solve reasoning tasks beyond a certain model scale (Wei et al., 2022a). Such reasoning abilities in LLMs can be elicited by fine-tuning, few-shot prompting, or zero-shot prompting (Cobbe et al., 2021; Wei et al., 2021; Nye et al., 2021; Wei et al., 2022b; Kojima et al., 2022). A large
|
| 38 |
+
|
| 39 |
+
amount of research focuses on the reasoning tasks of math word problems (MWP), and methods are evaluated on the benchmarks spanning different levels of MwPs (Koncel-Kedziorski et al. (2016); Patel et al. (2021); Lan et al. (2021); Cobbe et al. (2021); Jie et al. (2022); Yuan et al. (2023a); Fu et al. (2023a), inter alia). The core idea of improving the mathematical reasoning ability of LLMs is to aggregate various sampled reasoning paths during either fine-tuning or inference. Cobbe et al. (2021) trained and devised a reasoning path verifier to select the correct results during inference. Wang et al. (2023) proposed to sample various reasoning paths during inference and then derive the final result by majority voting on the answers or through verifiers (Li et al., 2023). Several works applied the idea of rejection sampling along with other techniques to filter the diverse sampled reasoning paths for fine-tuning data augmentation (Huang et al., 2022; Zelikman et al., 2022; Ni et al., 2023; Zhu et al., 2023). Rejection sampling is a simple-yet-effective fine-tuning augmentation technique and is also used for LLM alignment with human preference (Bai et al., 2022; Yuan et al., 2023b; Dong et al., 2023; Touvron et al., 2023b; Song et al., 2023). Uesato et al. (2022) explored to use of reinforcement learning methods for improving the mathematical reasoning abilities of LLMs and they further discussed the difference between outcome-based and process-based reward modeling. Followed by Lightman et al. (2023), they collected large-scale process-based supervision signals through human annotation and verified that LLMs can benefit more from process-based reward modeling with human-annotated supervision than outcome-based reward modeling. There is also prior research that distilled the emergent reasoning ability of LLMs to small language models (Fu et al., 2023b; Shridhar et al., 2023). Compared to previous works (Zelikman et al., 2022; Uesato et al., 2022; Zhu et al., 2023; Ni et al., 2023), we are using a simpler way of generating augmented samples without any trained process-level reward models and we are focusing on researching the scaling relationship between LLMs and math reasoning ability.
|
| 40 |
+
|
| 41 |
+
Scaling Laws of Large Language Models It is important to understand and predict the performance gain as the language model scales up. Kaplan et al. (2020) first investigated and derived a predictable relationship on how the number of model parameters and data sizes contribute to the loss over many orders of magnitudes. Hoffmann et al. (2022) refined the scaling laws in (Kaplan et al., 2020) and found the scaling laws for computation-optimal training. Muennighoff et al. (2023) explored and extended the scaling laws under a data-constrained scenario. Besides investigating the scaling performance for pre-training, Gao et al. (2022) discussed the scaling laws for overparameterized reward models for alignment with human preference, and Hernandez et al. (2021) developed scaling laws for transferring performance from pre-trained models to downstream tasks. Henighan et al. (2020); Caballero et al. (2022) investigated scaling laws of math problems. In this paper, we are investigating the scaling relationships of large language models on learning math word problems with pre-training losses, supervised data amount, and augmented data amount.
|
| 42 |
+
|
| 43 |
+
# 3 THE FACTORS OF MATH REASONING ABILITY IN SUPERVISED LLM
|
| 44 |
+
|
| 45 |
+
The target of this paper is to try to understand the performances of supervised LLMs in math reasoning. We expect a pre-trained LLM $\rho$ to learn reasoning ability from a supervised reasoning dataset $\mathcal{D}$ . The dataset is defined by $\mathcal{D} = \{q_i, r_i, a_i\}_i$ , where $q$ is a question, $r$ is a chain-of-thought reasoning path, and $a$ is a numerical answer. We perform supervised fine-tuning on dataset $\mathcal{D}$ to obtain an SFT model $\pi$ . We use $\pi$ to generate reasoning paths and answers in the test set by greedy decoding and report the accuracy (i.e. acc or maj1@1) as our metric here.
|
| 46 |
+
|
| 47 |
+
# 3.1 MODEL ACCURACY VS. PRE-TRAINING LOSS
|
| 48 |
+
|
| 49 |
+
Previous works state that the larger LLM shows better reasoning ability across the same series of models (Brown et al., 2020; Chowdhery et al., 2022; Touvron et al., 2023a,b), and we find LLaMA outperforms GPT-3 which shows the model parameter counts should not be the only indicator of reasoning ability. While LLMs have different architectures, model parameters, and pre-training token numbers, we find the pre-training loss is a stable performance indicator of the math reasoning ability and we use it to represent the model instead of using their model parameters and pre-training token numbers.
|
| 50 |
+
|
| 51 |
+
We analyze the SFT and ICL (8-shot) performance of GPT-3 (Brown et al., 2020), LLaMA (Touvron et al., 2023a), LLaMA2 (Touvron et al., 2023b), and GPT-4 (OpenAI, 2023). The pre-training losses
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
Figure 2: The performance of SFT (blue lines) and ICL (red lines) settings on GSM8K. GPT-4 states they use some part of the GSM8K data in pre-training, and suggest others consider its performance between SFT and ICL.
|
| 55 |
+
|
| 56 |
+
of these models are observed in their paper, we should notice that pre-training losses correspond to different pre-training datasets and different tokenizers which means they could not be compared strictly (and we cannot use it to do any sort of regression directly) while the tendency among these losses is still enlightening. We use the results of GPT-3 fine-tuning from (Cobbe et al., 2021) and we fine-tune LLaMA and LLaMA2 on the GSM8K training set (detailed in Appendix A.1). For in-context learning, we use the results from LLaMA (Touvron et al., 2023a) and LLaMA2 (Touvron et al., 2023b) paper.
|
| 57 |
+
|
| 58 |
+
In Figure 2, we can find that:
|
| 59 |
+
|
| 60 |
+
- The pre-training losses are approximately negatively linear correlated to the SFT and ICL accuracy during the given pre-training loss interval.
|
| 61 |
+
- SFT outperforms ICL consistently, while the improvements diminish when the pre-training loss is lower.
|
| 62 |
+
|
| 63 |
+
The linear relation of SFT and ICL accuracy may only work in the given interval. The reasons are (1) the slope of ICL is steeper than SFT, while the SFT performance should be greater than ICL performance; (2) the accuracy can not bigger than 1 or smaller than 0. It should be using $-\log (acc)$ instead of $acc$ as the dependent variable theoretically while we find an apparent linear relationship among pre-training loss and $acc$ and use $acc$ as the dependent variable. LLaMA-2 7B(13B) can be viewed as an approximation of continue-training of LLaMA 7B(13B). As it trains longer, its ICL and SFT performance both improve without changing the parameter count. From the observations, one effective way to improve reasoning ability is to train a better base model with lower pre-training loss (Pre-training is all you need!). The models with lower pre-training loss improve less from the fine-tuning which may be due to the models having already obtained more reasoning abilities during pre-training and the supervised data can provide less signal to supervise them.
|
| 64 |
+
|
| 65 |
+

|
| 66 |
+
Figure 3: The performance of SFT with different amounts of supervised data on GSM8K.
|
| 67 |
+
|
| 68 |
+
# 3.2 MODEL ACCURACY VS. SUPERVISED DATA COUNT
|
| 69 |
+
|
| 70 |
+
Supervised fine-tuning does improve LLMs' reasoning ability, we want to know how the supervised data amount influences the model's improvement. We fine-tune LLaMA and LLaMA2 with $\{1, 1/2, 1/4, 1/8, 1/16, 1/32\}$ amount of the training set from GSM8K (detailed in Appendix A.2). We want to use this experiment to extrapolate the model performances if we have more supervised data. In Figure 3, we plot the results of training with different amounts of supervised data. From this figure, we can observe that:
|
| 71 |
+
|
| 72 |
+
- The model performance has a log-linear relation versus data amount. When the data amount doubles, the performance increases by a unit.
|
| 73 |
+
- Better model needs more amount of data to outperform its ICL performance.
|
| 74 |
+
- Better model benefits less when supervised data amount doubles.
|
| 75 |
+
|
| 76 |
+
The log-linear relation is stable during $\{1, 1/2, 1/4, 1/8\}$ amount of the training data. From the observation, it is straightforward to enlarge the training dataset to improve the performance, especially for worse models. For better models, it benefits less which echoes that better models have learned more reasoning ability during pre-training.
|
| 77 |
+
|
| 78 |
+
# 3.3 MODEL ACCURACY VS. AUGMENTED DATA COUNT
|
| 79 |
+
|
| 80 |
+
Increasing the amount of math reasoning labeled data is difficult, especially proposing a new question. It is easy for a well-educated student to solve hundreds of math word problems per day, but it is very hard to come up with diverse and educational math problems. So our direction changes to augment new data using existing resources. We have tried augmenting new queries (detailed in Appendix D.1) and augmenting revisions (detailed in Appendix D.2). These approaches have none to marginal improvements compared to SFT. We find a simplified version of rejection sampling (Zhu et al., 2023) is a naive and effective way to augment new reasoning paths and can improve the model performance. And we find the key factor influences fine-tuning on rejection sampling (RFT) augmented data is distinct reasoning path amount. Combining rejection sampling samples from multiple
|
| 81 |
+
|
| 82 |
+
<table><tr><td>Setting</td><td>7B</td><td>7B-2</td><td>13B</td><td>13B-2</td><td>33B</td></tr><tr><td>Pretrain loss</td><td>1.8</td><td>1.75</td><td>1.73</td><td>1.68</td><td>1.62</td></tr><tr><td>ICL</td><td>11.0/18.1</td><td>14.6/-</td><td>17.8/29.3</td><td>28.7/-</td><td>35.6/53.1</td></tr><tr><td>SFT</td><td>35.9/48.7</td><td>41.6/55.4</td><td>43.0/55.2</td><td>50.0/61.7</td><td>54.6/-</td></tr><tr><td>RFT k = 100</td><td>41.7/52.7</td><td>47.5/58.7</td><td>49.1/59.9</td><td>54.8/65.4</td><td>54.5/-</td></tr><tr><td>Correct paths per question</td><td>53.3</td><td>60.8</td><td>62.5</td><td>71.6</td><td>88.7</td></tr><tr><td>Distinct paths per question</td><td>5.25</td><td>5.19</td><td>5.26</td><td>5.29</td><td>2.78</td></tr></table>
|
| 83 |
+
|
| 84 |
+
Table 1: The performance of RFT with $k = 100$ on GSM8K compared with SFT and ICL. Distinct path amount means distinct equation list amount here.
|
| 85 |
+
|
| 86 |
+
models, we can further fine-tune a LLaMA-7B model to an accuracy of 49.3 (compared with SFT 35.9) and a LLaMA-13B model to an accuracy of 52.1 (compared with SFT 43.0).
|
| 87 |
+
|
| 88 |
+
Rejection Sampling Fine-tuning The SFT model $\pi$ obtains the ability to perform zero-shot chain-of-thought reasoning, and we use $\pi$ to generate more correct reasoning paths $r_{ij}$ to supply the training dataset. For each $q_i$ , we generate $k$ candidate reasoning paths and answers $r,a$ with a temperature of 0.7 following (Cobbe et al., 2021). We first filter out reasoning paths with wrong answers $a \neq a_i$ or wrong calculations based on Python evaluation. Each reasoning path contains a list of equations $e_j$ , and we select one reasoning path $r_{ij}$ for each distinct equation list as the augmented data and remove other reasoning paths with the same list of equations to deduplicate similar reasoning paths. Different order of elements (e.g. $3 + 4 = 7$ and $4 + 3 = 7$ ) or different order of equations (e.g. $1 + 2 = 3, 3 + 4 = 7$ and $1 + 4 = 5, 2 + 5 = 7$ ) are considered different. It is helpful for models to know these orders can be exchanged and is hard for models to learn this with only one reasoning path each problem. We define $\mathcal{D}_{\pi}' = \mathcal{D} \cup \{q_i, r_{ij}, a_i\}_{i,j}$ as the augmented dataset. We fine-tune $\mathcal{D}'$ on pre-trained LLM $\rho$ to $\pi_{\mathrm{RFT}}$ as RFT, and we detail how we apply RFT in Appendix A.3. We list the results of RFT with sampling $k = 100$ candidate reasoning paths on LLaMA and LLaMA-2 in Table 1. For ICL, SFT, and RFT, we list the maj1@1 (accuracy) and maj1@100 (sample 100 times and calculate accuracy based on majority voting) as metrics.
|
| 89 |
+
|
| 90 |
+
In the case of 7B and 13B models, RFT yields an approximate increase of 5 to 6 points in maj1@1 and about 4 points increase in maj1@100. For 33B models, RFT does not improve performance compared to SFT. The main reason comes from the augmented samples from rejection sampling. We can find that better models generate more correct reasoning paths per question. For LLaMA-33B-SFT, it can generate an average of 88.7 correct paths per question. However, it overfits the training set and has difficulty generating more diverse paths on the training set questions. Rejection sampling with 33B is very time-consuming and we do not conduct a temperate grid search, we have tried using a larger temperate 1.0 for decoding LLaMA-33B-SFT models, it generates 82.4 correct paths and 4.77 distinct paths per question which is more diverse than using temperate 0.7 but still less diverse than 7B and 13B models. We admit there should be a temperate (or generation config) that can produce more distinct paths and generate good results for RFT in 33B and even larger models while it does need more computation resources for inference compared to sampling using 7B and 13B models. We will show we can use 7B and 13B models only for rejection sampling to improve the 33B model.
|
| 91 |
+
|
| 92 |
+
Model Accuracy vs Rejection Sampling Data Count To understand the performance of RFT, we vary $k$ among 1,3,6,12,25,50,100 and apply RFT. We also have another setting of $k = 100$ while not removing any reasoning paths denoted as noDEDup. We list the RFT results with different $k$ on Figure 4. Comparing using RFT with $k = 100$ and noDEDup, the performance is similar and shows that it is better to estimate RFT performance based on distinct reasoning path amount instead of RFT augmented sample counts. Furthermore, using dedduplication has better performances for 3 of 4 models and needs much less training time.
|
| 93 |
+
|
| 94 |
+
When using $k = 3$ , RFT outperforms SFT by 2 points stably. For most data points, using larger $k$ leads to better performances. However, the merits of RFT are decreasing when doubling $k$ . We calculate different paths per question for different $k$ in Table 2. We can see that the amount of different reasoning paths is not growing quickly along $k$ growing. In Figure 3, we know doubling training samples can have a linear performance improvement. Doubling reasoning paths should
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
Figure 4: The performance of RFT with different amounts of sampling count $k$ on GSM8K.
|
| 98 |
+
|
| 99 |
+
<table><tr><td>k</td><td>7B</td><td>7B-2</td><td>13B</td><td>13B-2</td><td>33B</td></tr><tr><td>1</td><td>1.17</td><td>1.19</td><td>1.15</td><td>1.18</td><td>1.06</td></tr><tr><td>3</td><td>1.44</td><td>1.47</td><td>1.41</td><td>1.45</td><td>1.16</td></tr><tr><td>6</td><td>1.74</td><td>1.78</td><td>1.69</td><td>1.76</td><td>1.28</td></tr><tr><td>12</td><td>2.20</td><td>2.23</td><td>2.11</td><td>2.21</td><td>1.46</td></tr><tr><td>25</td><td>2.93</td><td>2.93</td><td>2.88</td><td>2.94</td><td>1.77</td></tr><tr><td>50</td><td>3.94</td><td>3.91</td><td>3.90</td><td>3.94</td><td>2.19</td></tr><tr><td>100</td><td>5.25</td><td>5.19</td><td>5.26</td><td>5.29</td><td>2.78</td></tr><tr><td>400 (U13B)</td><td></td><td></td><td>12.84</td><td></td><td></td></tr><tr><td>500 (U33B)</td><td></td><td></td><td>13.65</td><td></td><td></td></tr></table>
|
| 100 |
+
|
| 101 |
+
Table 2: Different reasoning paths per question generated by different SFT models with different $k$ .
|
| 102 |
+
|
| 103 |
+
improve less than doubling training samples since obtaining different reasoning paths does not obtain any new questions. Therefore, doubling $k$ leads to diminished performance improvements.
|
| 104 |
+
|
| 105 |
+
Combining rejection sampling samples from multiple models The experiment results above demonstrate performance boosts in mathematical reasoning, benefitting from rejection sampling. Through case studies in 4.1, we show that rejection sampling can augment training data with reasoning paths of diverse calculation processes. However, the reasoning paths sampled from one single SFT model can be logically non-diverse. Therefore, we expect to further improve the mathematical reasoning performance by leveraging rejection sampled reasoning paths aggregated from different models. We denote two final datasets as $\mathcal{D}_{\mathrm{U}13\mathrm{B}}^{\prime}$ and $\mathcal{D}_{\mathrm{U}33\mathrm{B}}^{\prime}$ , which are aggregated from rejection sampling different models $\mathcal{D}_{\mathrm{U}13\mathrm{B}}^{\prime} = \mathcal{D}_{7\mathrm{B}}^{\prime} \oplus \mathcal{D}_{7\mathrm{B}2}^{\prime} \oplus \mathcal{D}_{13\mathrm{B}}^{\prime} \oplus \mathcal{D}_{13\mathrm{B}2}^{\prime}$ and $\mathcal{D}_{\mathrm{U}33\mathrm{B}}^{\prime} = \mathcal{D}_{\mathrm{U}13\mathrm{B}}^{\prime} \oplus \mathcal{D}_{33\mathrm{B}}^{\prime}$ , where U means models under a certain size, 7B/13B/33B means LLaMA-7B/13B/33B and 7B2/13B2 means LLaMA2-7B/13B. $\oplus$ means an aggregation process in which all the reasoning paths from different sets are first combined and then Algorithm 1 is applied to deduplicate the reasoning paths with the same calculation process regarding the equation forms and orders.
|
| 106 |
+
|
| 107 |
+
We can see, through the results visualized in Figure 5, that using the aggregated dataset $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ and $\mathcal{D}_{\mathrm{U33B}}^{\prime}$ can lead to uniformly better performance than fine-tuning with datasets from a single model across different model sizes. RFT on these two augmented datasets $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ and $\mathcal{D}_{\mathrm{U33B}}^{\prime}$ decreases the performance gaps among the same size models in SFT and RFT $k = 100$ which mean the combined augmented datasets provide enough reasoning supervision to fulfill the pre-training gap. We can assume with sufficient supervised data amounts, the performance indicator should be the model size but not the pre-training losses.
|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
Figure 5: The performance of RFT with rejection sampling samples from multiple models.
|
| 111 |
+
|
| 112 |
+
We have stated that it is expensive to apply RFT $k = 100$ on 33B models and it needs a temperate grid search to achieve an improvement compared to SFT. However fine-tuning on $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ has similar rejection sampling computational cost compared with sampling 100 times on 33B and achieve better performance.
|
| 113 |
+
|
| 114 |
+
Another phenomenon is including $\mathcal{D}_{33\mathrm{B}}^{\prime}$ in aggregation barely influences the performance. To give a more comprehensive analysis of the results, we calculate the average reasoning path number per question in Table 2 and depict a Venn diagram to visualize the source of different reasoning paths shown in Figure 6. In Table 2, the average reasoning path numbers of $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ and $\mathcal{D}_{\mathrm{U33B}}^{\prime}$ surpass those of a single model by large amounts, while $\mathcal{D}_{\mathrm{U33B}}^{\prime}$ only have slightly more reasoning paths than $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ by 0.81. In the meanwhile, as shown in Figure 6, the models under and including the size of 13B can contribute unique reasoning paths of similar proportion in $\mathcal{D}_{\mathrm{U33B}}^{\prime}$ around $15\%$ . However, only $6.5\%$ of the reasoning paths can be exclusively acquired from LLaMA-33B-SFT model. This shows that the SFT model of 33B can provide limited reasoning diversity when sampling the training questions. This finding is consistent with the results above in Table 1, indicating the 33B model (and possibly 65B and 70B models) can well memorize the human-annotated reasoning paths.
|
| 115 |
+
|
| 116 |
+
For 65B models, we find using $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ does not improve the performance compared to SFT. The reason can be better models benefit less from the supervised sample amounts while it has learnt more reasoning ability during pre-training.
|
| 117 |
+
|
| 118 |
+
Overall, we can come to the conclusion that (1) RFT improves the mathematical reasoning performance of (worse) LLMs through diverse reasoning paths from rejection sampling of the SFT models, and aggregating more diverse reasoning paths can improve the performance further. (2) Different SFT models can contribute reasoning paths with different calculation processes from rejection sampling, leading to more diverse training data for RFT, and LLMs of larger parameter sizes may degrade in generating diversified reasoning paths as a result of overfitting the training questions. There may be a generation config or training config for large enough LMs not to overfit on the training dataset while it is not trivial to find them.
|
| 119 |
+
|
| 120 |
+
Comparing to other baselines We compare our RFT results of training on $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ to several baselines and the results are detailed in Table 3. Although LLaMA and LLaMA2 are top-tier open-sourced LLMs $^{1}$ , their mathematical reasoning performances still lag behind the current proprietary LLMs which are of larger parameter scales, such as GPT-4 and PaLM2. Compared to results on
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
Figure 6: The Venn diagram of the proportions of the reasoning calculation paths that each model provide to $\mathcal{D}_{\mathrm{U33B}}^{\prime}$ . For example, $15.5\%$ (in the yellow part) of the reasoning calculation paths in $\mathcal{D}_{\mathrm{U33B}}^{\prime}$ can only be exclusively found in the rejection sampling results from LLaMA2-13B-SFT.
|
| 124 |
+
|
| 125 |
+
open-resourced models, our results on LLaMA present better performance than two recent state-of-the-art reasoning augmentation methods. Our RFT method is simpler compared to CoRE, since RFT does not require training verifier models and decoding with Monte Carlo Tree Search (MCTS). Compared to other open-sourced aligned language models, we can find that 7B models struggle at a level of 35 scores which are very similar to SFT performances of LLaMA-7B. We guess they use GSM8K during their pre-training phase following (OpenAI, 2023) or human alignment fine-tuning phase following (Qingyi et al., 2023). Using our augmented dataset $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ to replace the original GSM8K can significantly boost their 7B models' performances.
|
| 126 |
+
|
| 127 |
+
# 4 DISCUSSION
|
| 128 |
+
|
| 129 |
+
# 4.1 DIFFERENT DISTRIBUTION OF REASONING PATHS
|
| 130 |
+
|
| 131 |
+
In the aforementioned analysis of RFT training data, we observe that rejection sampling can augment the training question with diverse reasoning calculation paths. In this section, we investigate whether RFT models can learn to generate different reasoning paths to reach the correct answers. We fine-tune LLaMA and LLaMA2 of 7B and 13B on $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ . During inference, we sample 100 different reasoning paths from each trained model for each test set question with a temperature of 0.7. For each question, we compute the number of different calculation processes presented in 100 sampled reasoning paths that lead to the correct answer and draw histograms with respect to test set questions. SFT and RFT models on self-sampled datasets (RFT $k = 100$ ) are included for comparison.
|
| 132 |
+
|
| 133 |
+
As shown in Figure 7, the models trained by RFT on $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ exhibit more question counts than the models trained by RFT $k = 100$ and SFT on the larger numbers of unique calculation processes. There are more question counts for SFT models where all the sampled reasoning paths only correspond to one single calculation process and SFT models can barely generate more than 8 different calculation
|
| 134 |
+
|
| 135 |
+
<table><tr><td>Base Model</td><td>Training</td><td>maj1@1</td><td>maj1@K*</td></tr><tr><td colspan="4">Proprietary LLMs</td></tr><tr><td>GPT-4 (OpenAI, 2023)</td><td>5-shot ICL</td><td>92.0</td><td>-</td></tr><tr><td>GPT-3-175B (Brown et al., 2020)</td><td>SFT</td><td>34.0</td><td>-</td></tr><tr><td>PaLM2 (Anil et al., 2023)</td><td>8-shot ICL</td><td>80.7</td><td>91.0@K=40</td></tr><tr><td>PaLM-540B (Chowdhery et al., 2022)</td><td>8-shot ICL</td><td>56.5</td><td>74.4@K=40</td></tr><tr><td>Chinchilla-70B (Uesato et al., 2022)</td><td>5-shot ICL</td><td>43.7</td><td>58.6@K=96</td></tr><tr><td>Chinchilla-70B</td><td>SFT</td><td>58.9</td><td>77.7@K=96</td></tr><tr><td colspan="4">Open-sourced LLMs</td></tr><tr><td>GPT-Neo-2.7B (Black et al., 2021)</td><td>FCS + PCS (Ni et al., 2023)</td><td>19.5</td><td>41.4</td></tr><tr><td>GPT-J-6B (Wang & Komatsuzaki, 2021)</td><td>CoRE (Zhu et al., 2023)</td><td>34.9</td><td>63.2@K=40</td></tr><tr><td>ChatGLM2-6B (Zeng et al., 2022)</td><td>8-shot ICL</td><td>32.4</td><td>-</td></tr><tr><td>ChatGLM2-6B</td><td>Human Alignment</td><td>28.1</td><td>-</td></tr><tr><td>ChatGLM2-12B</td><td>8-shot ICL</td><td>40.9</td><td>-</td></tr><tr><td>ChatGLM2-12B</td><td>Human Alignment</td><td>38.1</td><td>-</td></tr><tr><td>InternLM-7B (Team, 2023)</td><td>4-shot ICL</td><td>31.2</td><td>-</td></tr><tr><td>InternLM-7B</td><td>Human Alignment</td><td>34.5</td><td></td></tr><tr><td>LLaMA-7B</td><td>SFT</td><td>35.9</td><td>48.7</td></tr><tr><td colspan="4">Our RFT on open-sourced LLMs</td></tr><tr><td>LLaMA-7B</td><td>RFT-U13B</td><td>49.3</td><td>61.8</td></tr><tr><td>LLaMA2-7B</td><td>RFT-U13B</td><td>50.3</td><td>65.6</td></tr><tr><td>LLaMA-13B</td><td>RFT-U13B</td><td>52.1</td><td>66.2</td></tr><tr><td>LLaMA2-13B</td><td>RFT-U13B</td><td>55.4</td><td>69.1</td></tr></table>
|
| 136 |
+
|
| 137 |
+
Table 3: Compare GSM8K results with other baselines. RFT-U13B means models fine-tuned on $\mathcal{D}_{\mathrm{U13B}}^{\prime}$ . FCS and PCS represent fully-correct solutions and partially-correct solutions respectively. *K=100 if not specified.
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
Unique Reasoning Calculation Path Num.
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
Unique Reasoning Calculation Path Num.
|
| 148 |
+
Figure 7: The histograms of question numbers solved with different numbers of unique reasoning calculation paths. We show the difference in question counts between SFT and RFT U13B in two cases where the numbers of unique reasoning calculation paths are 1 or more than 10.
|
| 149 |
+
|
| 150 |
+
processes for a question. This analysis demonstrates that diverse reasoning calculation paths in training data can equip the LLMs with finding diverse reasoning logic for solving math problems.
|
| 151 |
+
|
| 152 |
+
<table><tr><td>Model size</td><td>7B</td><td>7B-2</td><td>13B</td><td>13B-2</td><td>33B</td><td>65B</td><td>70B</td></tr><tr><td>Pre-train FLOPs</td><td>4.2 × 1022</td><td>8.4 × 1022</td><td>7.8 × 1022</td><td>1.6 × 1023</td><td>2.7 × 1023</td><td>5.5 × 1023</td><td>8.4 × 1023</td></tr><tr><td>SFT FLOPs</td><td colspan="2">1.7 × 1017</td><td colspan="2">3.3 × 1017</td><td>7.7 × 1017</td><td>1.3 × 1018</td><td>1.7 × 1018</td></tr><tr><td>RFT Inference FLOPs</td><td colspan="2">1.4 × 1018</td><td colspan="2">2.6 × 1018</td><td>6.9 × 1018</td><td>1.4 × 1019</td><td>1.8 × 1019</td></tr><tr><td>RFT-U33B FLOPs</td><td colspan="2">3.0 × 1018</td><td colspan="2">5.7 × 1018</td><td>1.3 × 1019</td><td>2.2 × 1019</td><td>3.0 × 1019</td></tr><tr><td>Pre-train GPU hrs</td><td>82k</td><td>184k</td><td>135k</td><td>368k</td><td>530k</td><td>1022k</td><td>1720k</td></tr><tr><td>SFT GPU hrs</td><td colspan="2">0.6</td><td colspan="2">4</td><td>40</td><td>74</td><td>80</td></tr><tr><td>RFT Inference GPU hrs</td><td colspan="2">10</td><td colspan="2">0.1k</td><td>0.1k</td><td>4.3k</td><td>4.5k</td></tr><tr><td>RFT-U33B GPU hrs</td><td colspan="2">9</td><td colspan="2">62</td><td>0.6k</td><td>1k</td><td>1.2k</td></tr><tr><td>ICL Accuracy</td><td>11.0</td><td>14.6</td><td>17.8</td><td>28.7</td><td>35.6</td><td>50.9</td><td>56.8</td></tr><tr><td>SFT Accuracy</td><td>35.9</td><td>41.6</td><td>43.0</td><td>50.0</td><td>54.6</td><td>59.3</td><td>63.2</td></tr><tr><td>RFT-U33B Accuracy</td><td>49.1</td><td>51.2</td><td>51.4</td><td>55.3</td><td>57.9</td><td>-</td><td>-</td></tr></table>
|
| 153 |
+
|
| 154 |
+
Table 4: The statistics of FLOPs and GPU hours required for pre-training, SFT, RFT inference, and RFT. We take the pre-training GPU hours from Touvron et al. (2023a;b). The GPU hours for RFT inference are calculated for 7,473 train set questions and 100 samples per question. To make the best of GPUs and properly fit models into the GPU memory, we tune the inference batch size. For 33B, 65B, and 70B models, we use DeepSpeed ZeRO3 (Rasley et al., 2020) for distributed training. All the GPU hours are based on NVIDIA A100 80GB GPU. Note we use non-embedding parameters to compute FLOPs in our experiments.
|
| 155 |
+
|
| 156 |
+
# 4.2 TOWARDS EXCELSIOR MATHEMATICAL REASONING
|
| 157 |
+
|
| 158 |
+
From our findings, there are two main factors that can improve mathematical reasoning abilities given a preset amount of human-annotated samples, including: (1) Pre-training the LLMs to lower losses; (2) Augmenting fine-tuning with rejection sampling. Through extensive experiments, we empirically verify the scaling relationships between the mathematical reasoning performance of LLM with both factors respectively. Out of the consideration of sustainable NLP, in this section, we investigate the possible computational resources required to extrapolate the mathematical performance of LLMs by both factors and discuss how to improve the performance more efficiently.
|
| 159 |
+
|
| 160 |
+
We estimate the pre-training, SFT, RFT inference, and RFT FLOPs following Kaplan et al. (2020) and GPU times in Table 4 which is detailed in Appendix E. We can find that the cost times of SFT $(\sim 1 \times 10^{-5})$ and RFT $(\sim 1 \times 10^{-4})$ are negligible compared to pre-training. One can always use SFT and RFT to improve models' performance. However, it could be hard to use RFT to further boost performance. Since we need much more sampling counts (at an exponential level) to increase distinct reasoning paths and there exists an upper bound of distinct reasoning path amount for a given math reasoning question.
|
| 161 |
+
|
| 162 |
+
We assume that performance follows RFT>SFT>ICL, from the findings in this paper we know the improvement speed follows RFT<SFT<ICL. And if we have an omnipotent language model which has a pre-training loss that is the same as the corpus randomness, it could have $\mathrm{RFT} = \mathrm{SFT} = \mathrm{ICL} = 100$ . Thus when you pre-train a better language model (i.e. smaller pre-training loss), your model's performance still follows RFT>SFT>ICL but their performance gaps are diminishing. Since you can obtain an RFT model without too much effort (compared to pre-training), then the most important thing we should do is to decrease the model's pre-training loss. From LLaMA-7B to LLaMA2-7B, it needs to add $4.2\times 10^{22}$ FLOPs to obtain a 2.1 improvement in the RFT-U33B setting with a 0.05 pre-training loss decrease. From LLaMA-7B to LLaMA-13B, it adds $3.6\times 10^{22}$ FLOPs to obtain a 2.3 improvement in the RFT-U33B setting with a 0.07 pre-training loss decrease. While minimizing pre-training loss is expensive compared to SFT and RFT, we believe other abilities may follow a similar pattern and better pre-training can benefit all other tasks.
|
| 163 |
+
|
| 164 |
+
# 5 CONCLUSIONS
|
| 165 |
+
|
| 166 |
+
In this paper, we are investigating the scaling relationship in supervising math reasoning abilities with large language models. We find the relationship between math performance and pre-training
|
| 167 |
+
|
| 168 |
+
losses, supervised data amount, and distinct reasoning paths. We find that better language models benefit less with SFT and RFT, and the most important thing is to pre-train a better language model towards excellent math reasoning abilities.
|
| 169 |
+
|
| 170 |
+
# 6 ACKNOWLEDGEMENT
|
| 171 |
+
|
| 172 |
+
We would like to express our sincere appreciation to Tianhang Zhu, Runji Lin, Kai Dang, Keming Lu, Wei Wang, and Junyang Lin for their valuable insights and contributions to this paper.
|
| 173 |
+
|
| 174 |
+
# 7 LIMITATIONS
|
| 175 |
+
|
| 176 |
+
In this paper, we miss the following parts which are very important for building math reasoning abilities for LLMs and should be discussed in the revised version of this paper or future works.
|
| 177 |
+
|
| 178 |
+
- RFT for 65B and 70B LLaMA models.
|
| 179 |
+
- Pre-training on the math-related corpus. This is obviously useful shown in Lewkowycz et al. (2022). While the pre-training loss obtained here cannot align with general domain pre-trained models' losses.
|
| 180 |
+
- We do not regress any scaling laws in this paper since many numbers are estimated and pre-training losses, ICL prompts and SFT settings of various models may not be aligned.
|
| 181 |
+
|
| 182 |
+
# REFERENCES
|
| 183 |
+
|
| 184 |
+
Marcin Andrychowicz, Filip Wolski, Alex Ray, Jonas Schneider, Rachel Fong, Peter Welinder, Bob McGrew, Josh Tobin, OpenAI Pieter Abbeel, and Wojciech Zaremba. Hindsight experience replay. In I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems, volume 30. Curran Associates, Inc., 2017. URL https://proceedings.neurips.cc/paper_files/paper/2017/file/453fadbd8a1a3af50a9df4df899537b5-Paper.pdf.
|
| 185 |
+
Rohan Anil, Andrew M Dai, Orhan First, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. Palm 2 technical report. arXiv preprint arXiv:2305.10403, 2023.
|
| 186 |
+
Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, Carol Chen, Catherine Olsson, Christopher Olah, Danny Hernandez, Dawn Drain, Deep Ganguli, Dustin Li, Eli TranJohnson, Ethan Perez, Jamie Kerr, Jared Mueller, Jeffrey Ladish, Joshua Landau, Kamal Ndousse, Kamile Lukosuite, Liane Lovitt, Michael Sellitto, Nelson Elhage, Nicholas Schiefer, Noemi Mercado, Nova DasSarma, Robert Lasenby, Robin Larson, Sam Ringer, Scott Johnston, Shauna Kravec, Sheer El Showk, Stanislav Fort, Tamera Lanham, Timothy Telleen-Lawton, Tom Conerly, Tom Henighan, Tristan Hume, Samuel R. Bowman, Zac Hatfield-Dodds, Ben Mann, Dario Amodei, Nicholas Joseph, Sam McCandlish, Tom Brown, and Jared Kaplan. Constitutional ai: Harmlessness from ai feedback, 2022.
|
| 187 |
+
Sid Black, Leo Gao, Phil Wang, Connor Leahy, and Stella Biderman. GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow, March 2021. URL https://doi.org/10.5281/zenodo.5297715.
|
| 188 |
+
Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, T. J. Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeff Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. ArXiv, abs/2005.14165, 2020. URL https://api_semanticscholar.org/ CorpusID:218971783.
|
| 189 |
+
|
| 190 |
+
Ethan Caballero, Kshitij Gupta, Irina Rish, and David Krueger. Broken neural scaling laws. arXiv preprint arXiv:2210.14891, 2022.
|
| 191 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez, Abhishek Rao, Parker Barnes, Yi Tay, Noam Shazeer, Vinodkumar Prabhakaran, Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia, Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David Dohan, Shivani Agrawal, Mark Omernick, Andrew M. Dai, Thanumalayan Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi Wang, Brennan Saeta, Mark Diaz, Orhan First, Michele Catasta, Jason Wei, Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. Palm: Scaling language modeling with pathways, 2022.
|
| 192 |
+
Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.
|
| 193 |
+
Hanze Dong, Wei Xiong, Deepanshu Goyal, Rui Pan, Shizhe Diao, Jipeng Zhang, Kashun Shum, and Tong Zhang. Raft: Reward ranked finetuning for generative foundation model alignment, 2023.
|
| 194 |
+
Yao Fu, Litu Ou, Mingyu Chen, Yuhao Wan, Hao Peng, and Tushar Khot. Chain-of-thought hub: A continuous effort to measure large language models' reasoning performance, 2023a.
|
| 195 |
+
Yao Fu, Hao Peng, Litu Ou, Ashish Sabharwal, and Tushar Khot. Specializing smaller language models towards multi-step reasoning. arXiv preprint arXiv:2301.12726, 2023b.
|
| 196 |
+
Leo Gao, John Schulman, and Jacob Hilton. Scaling laws for reward model overoptimization, 2022.
|
| 197 |
+
Tom Henighan, Jared Kaplan, Mor Katz, Mark Chen, Christopher Hesse, Jacob Jackson, Heewoo Jun, Tom B Brown, Prafulla Dhariwal, Scott Gray, et al. Scaling laws for autoregressive generative modeling. arXiv preprint arXiv:2010.14701, 2020.
|
| 198 |
+
Danny Hernandez, Jared Kaplan, Tom Henighan, and Sam McCandlish. Scaling laws for transfer, 2021.
|
| 199 |
+
Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, Tom Hennigan, Eric Noland, Katie Millican, George van den Driessche, Bogdan Damoc, Aurelia Guy, Simon Osindero, Karen Simonyan, Erich Elsen, Jack W. Rae, Oriol Vinyals, and Laurent Sifre. Training compute-optimal large language models, 2022.
|
| 200 |
+
Jiaxin Huang, Shixiang Shane Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve, 2022.
|
| 201 |
+
Zhanming Jie, Jierui Li, and Wei Lu. Learning to reason deductively: Math word problem solving as complex relation extraction. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5944-5955, Dublin, Ireland, May 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.acl-long.410. URL https://aclanthology.org/2022.acl-long.410.
|
| 202 |
+
Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. CoRR, abs/2001.08361, 2020. URL https://arxiv.org/abs/2001.08361.
|
| 203 |
+
Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. Large language models are zero-shot reasoners. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=e2TBb5y0yFf.
|
| 204 |
+
|
| 205 |
+
Rik Koncel-Kedziorski, Subhro Roy, Aida Amini, Nate Kushman, and Hannaneh Hajishirzi. MAWPS: A math word problem repository. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1152-1157, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-1136. URL https://aclanthology.org/N16-1136.
|
| 206 |
+
Yihuai Lan, Lei Wang, Qiyuan Zhang, Yunshi Lan, Bing Tian Dai, Yan Wang, Dongxiang Zhang, and Ee-Peng Lim. Mwptoolkit: An open-source framework for deep learning-based math word problem solvers, 2021.
|
| 207 |
+
Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, Yuhuai Wu, Behnam Neyshabur, Guy Gur-Ari, and Vedant Misra. Solving quantitative reasoning problems with language models, 2022.
|
| 208 |
+
Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making language models better reasoners with step-aware verifier. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 5315-5333, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.acl-long.291.
|
| 209 |
+
Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023.
|
| 210 |
+
Niklas Muennighoff, Alexander M. Rush, Boaz Barak, Teven Le Scao, Aleksandra Piktus, Nouamane Tazi, Sampo Pyysalo, Thomas Wolf, and Colin Raffel. Scaling data-constrained language models, 2023.
|
| 211 |
+
Ansong Ni, Jeevana Priya Inala, Chenglong Wang, Alex Polozov, Christopher Meek, Dragomir Radev, and Jianfeng Gao. Learning math reasoning from self-sampled correct and partially-correct solutions. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=4D4TSJE6-K.
|
| 212 |
+
Maxwell Nye, Anders Johan Andreassen, Guy Gur-Ari, Henryk Michalewski, Jacob Austin, David Bieber, David Dohan, Aitor Lewkowycz, Maarten Bosma, David Luan, Charles Sutton, and Augustus Odena. Show your work: Scratchpads for intermediate computation with language models, 2021.
|
| 213 |
+
OpenAI. Gpt-4 technical report, 2023.
|
| 214 |
+
Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. Are NLP models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2080-2094, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.nacl-main.168. URL https://aclanthology.org/2021.nacl-main.168.
|
| 215 |
+
Si Qingyi, Wang Tong, Gu Naibin, Liu Rui, and Lin Zheng. Alpaca-cot: An instruction-tuning platform with unified interface of instruction collection, parameter-efficient methods, and large language models. https://github.com/PhoebusSi/alpaca-CoT, 2023.
|
| 216 |
+
Jeff Rasley, Samyam Rajbhandari, Olatunj Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, KDD '20, pp. 3505-3506, New York, NY, USA, 2020. Association for Computing Machinery. ISBN 9781450379984. doi: 10.1145/3394486.3406703. URL https://doi.org/10.1145/3394486.3406703.
|
| 217 |
+
David Saxton, Edward Grefenstette, Felix Hill, and Pushmeet Kohli. Analysing mathematical reasoning abilities of neural models, 2019.
|
| 218 |
+
|
| 219 |
+
Kumar Shridhar, Alessandro Stolfo, and Mrinmaya Sachan. Distilling reasoning capabilities into smaller language models. In *Findings of the Association for Computational Linguistics: ACL* 2023, pp. 7059-7073, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023-findings-acl.441.
|
| 220 |
+
Feifan Song, Bowen Yu, Minghao Li, Haiyang Yu, Fei Huang, Yongbin Li, and Houfeng Wang. Preference ranking optimization for human alignment. arXiv preprint arXiv:2306.17492, 2023.
|
| 221 |
+
InternLM Team. Internlm: A multilingual language model with progressively enhanced capabilities. https://github.com/InternLM/InternLM, 2023.
|
| 222 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. Llama: Open and efficient foundation language models, 2023a.
|
| 223 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, and Thomas Scialom. Llama 2: Open foundation and fine-tuned chat models, 2023b.
|
| 224 |
+
Jonathan Uesato, Nate Kushman, Ramana Kumar, Francis Song, Noah Siegel, Lisa Wang, Antonia Creswell, Geoffrey Irving, and Irina Higgins. Solving math word problems with process- and outcome-based feedback, 2022.
|
| 225 |
+
Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021.
|
| 226 |
+
Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=1PLlNIMMrw.
|
| 227 |
+
Jason Wei, Maarten Bosma, Vincent Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M. Dai, and Quoc V. Le. Finetuned language models are zero-shot learners. ArXiv, abs/2109.01652, 2021. URL https://api_semanticscholar.org/ CorpusID:237416585.
|
| 228 |
+
Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed Huai hsin Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. Emergent abilities of large language models. Trans. Mach. Learn. Res., 2022, 2022a. URL https://api_semanticscholar.org/CorpusID:249674500.
|
| 229 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Huai hsin Chi, F. Xia, Quoc Le, and Denny Zhou. Chain of thought prompting elicits reasoning in large language models. ArXiv, abs/2201.11903, 2022b. URL https://api_semanticscholar.org/ CorpusID:246411621.
|
| 230 |
+
Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L. Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models, 2023.
|
| 231 |
+
Zheng Yuan, Hongyi Yuan, Chuanqi Tan, Wei Wang, and Songfang Huang. How well do large language models perform in arithmetic tasks? arXiv preprint arXiv:2304.02015, 2023a.
|
| 232 |
+
|
| 233 |
+
Zheng Yuan, Hongyi Yuan, Chuanqi Tan, Wei Wang, Songfang Huang, and Fei Huang. Rrhf: Rank responses to align language models with human feedback without tears, 2023b.
|
| 234 |
+
|
| 235 |
+
Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. STar: Bootstrapping reasoning with reasoning. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=3ELRdg2sgI.
|
| 236 |
+
|
| 237 |
+
Aohan Zeng, Xiao Liu, Zhengxiao Du, Zihan Wang, Hanyu Lai, Ming Ding, Zhuoyi Yang, Yifan Xu, Wendi Zheng, Xiao Xia, et al. Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414, 2022.
|
| 238 |
+
|
| 239 |
+
Tianjun Zhang, Fangchen Liu, Justin Wong, Pieter Abbeel, and Joseph E. Gonzalez. The wisdom of hindsight makes language models better instruction followers, 2023.
|
| 240 |
+
|
| 241 |
+
Xinyu Zhu, Junjie Wang, Lin Zhang, Yuxiang Zhang, Yongfeng Huang, Ruyi Gan, Jiaxing Zhang, and Yujiu Yang. Solving math word problems via cooperative reasoning induced language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 4471-4485, Toronto, Canada, July 2023. Association for Computational Linguistics. URL https://aclanthology.org/2023.acl-long.245.
|
| 242 |
+
|
| 243 |
+
# A DETAILED EXPERIMENT SETTING
|
| 244 |
+
|
| 245 |
+
# A.1 SFT ON GSM8K
|
| 246 |
+
|
| 247 |
+
We fine-tune GSM8K with 3 epochs and a batch size of 128 on NVIDIA A100 GPUs. We use 8 GPUs for 7B and 13B models, 16 GPUs for 33B models, and 32 GPUs for 65B and 70B models during fine-tuning. We use a peak learning rate of 2e-5 with a $3\%$ learning rate warmup. We evaluate the results on the final epoch. We use greedy decode to calculate maj1@1 and decode with temperature 0.7 to calculate maj1@100.
|
| 248 |
+
|
| 249 |
+
# A.2 SFT ON DOWNSAMPLED GSM8K
|
| 250 |
+
|
| 251 |
+
We random downsample GSM8K dataset for fine-tuning. We find that using 3 epochs for little data will result in very poor results which are listed in Table 5. We search training epoch among $\{3, \frac{3}{data\ fraction}\}$ and evaluate the latest epoch. We report better test results among these two different epoch settings.
|
| 252 |
+
|
| 253 |
+
# A.3 REJECTION SAMPLING FINE-TUNING ON GSM8K
|
| 254 |
+
|
| 255 |
+
We use an SFT model $\pi$ to sample on training dataset for $k = 100$ times with a temperature of 0.7. We extract the equation list in generated reasoning paths by finding $<<equation>>$ first, removing all white spaces, and joining the equation string list by a special symbol to a string (called
|
| 256 |
+
|
| 257 |
+
it get_equation in our algorithm) for dedduplication. We select the reasoning paths by this algorithm:
|
| 258 |
+
|
| 259 |
+
Algorithm 1: Reasoning Path Selection
|
| 260 |
+
Data: Reasoning paths for question $q,\mathcal{R}_q$ Result: Selected reasoning paths for question $q,\mathcal{R}_q^s$ 1 Initialize selected reasoning paths, $\mathcal{R}_q^s = \mathrm{list}()$ 2 Initialize appeared equation set, $\mathcal{E}_q^s = \mathrm{set}()$ 3 for $r$ in $\mathcal{R}_q$ do 4 if get_equation(r) $\notin \mathcal{E}_q^s$ then 5 $\mathcal{R}_q^s.\mathrm{append}(r);$ 6 $\mathcal{E}_q^s.\mathrm{update}([\mathrm{get\_equation}(r)])$ 7 end 8 else 9 find $r^s\in \mathcal{R}_q^s$ s.t. get_equation $(r^{s}) =$ get_equation(r); 10 if $\sum_{i:r_i^s\in \mathcal{E}_q^s,r_i^s\neq r^s}$ Levenstein_dist $(r,r_i^s) > \sum_{i:r_i^s\in \mathcal{E}_q^s,r_i^s\neq r^s}$ Levenstein_dist $(r^s,r_i^s)$ then 11 $r^s = r;$ 12 end 13 end
|
| 261 |
+
|
| 262 |
+
We are trying to find the most dissimilar reasoning paths based on Levenstein distances. The idea comes from we want diverse reasoning paths for better generalization.
|
| 263 |
+
|
| 264 |
+
# B DETAILED RESULTS OF SFT AND RFT
|
| 265 |
+
|
| 266 |
+
We list detailed results of SFT and RFT in Table 5 and 6.
|
| 267 |
+
|
| 268 |
+
<table><tr><td>Model</td><td>Data</td><td>Epoch</td><td>7B</td><td>7B-2</td><td>13B</td><td>13B-2</td><td>33B</td><td>65B</td><td>70B-2</td></tr><tr><td>ICL-8shot</td><td>0</td><td>0</td><td>11.0</td><td>14.6</td><td>17.8</td><td>28.7</td><td>35.6</td><td>50.9</td><td>56.8</td></tr><tr><td>SFT</td><td>1/32</td><td>96</td><td>9.5</td><td>10.1</td><td>8.6</td><td>17.1</td><td>18.6</td><td>25.2</td><td>27.4</td></tr><tr><td>SFT</td><td>1/16</td><td>48</td><td>14.3</td><td>15.5</td><td>14.2</td><td>23.9</td><td>25.9</td><td>28.9</td><td>33.6</td></tr><tr><td>SFT</td><td>1/8</td><td>24</td><td>17.9</td><td>20.8</td><td>18.4</td><td>28.5</td><td>31.6</td><td>35.8</td><td>38.9</td></tr><tr><td>SFT</td><td>1/4</td><td>12</td><td>21.6</td><td>27.7</td><td>26.7</td><td>36.3</td><td>38.4</td><td>45.6</td><td>46.9</td></tr><tr><td>SFT</td><td>1/2</td><td>6</td><td>29.0</td><td>33.1</td><td>35.2</td><td>43.7</td><td>48.6</td><td>50.5</td><td>57.5</td></tr><tr><td>SFT</td><td>1/32</td><td>3</td><td>7.8</td><td>14.2</td><td>0.0</td><td>5.9</td><td>25.3</td><td>28.9</td><td>15.8</td></tr><tr><td>SFT</td><td>1/16</td><td>3</td><td>12.7</td><td>16.2</td><td>7.4</td><td>27.7</td><td>29.2</td><td>39.5</td><td>52.8</td></tr><tr><td>SFT</td><td>1/8</td><td>3</td><td>16.5</td><td>21.8</td><td>19.5</td><td>33.4</td><td>39.3</td><td>46.0</td><td>57.8</td></tr><tr><td>SFT</td><td>1/4</td><td>3</td><td>22.7</td><td>28.1</td><td>27.4</td><td>37.5</td><td>44.6</td><td>50.4</td><td>57.8</td></tr><tr><td>SFT</td><td>1/2</td><td>3</td><td>30.9</td><td>34.6</td><td>36.1</td><td>45.3</td><td>50.8</td><td>55.6</td><td>61.0</td></tr><tr><td>SFT</td><td>7.4K</td><td>3</td><td>35.9</td><td>41.6</td><td>43.0</td><td>50.0</td><td>54.6</td><td>59.3</td><td>63.2</td></tr><tr><td>RFT no dedup</td><td>1/32</td><td>3</td><td>37.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT no dedup</td><td>1/16</td><td>3</td><td>38.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT no dedup</td><td>1/8</td><td>3</td><td>41.1</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT no dedup</td><td>1/4</td><td>3</td><td>41.2</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT no dedup</td><td>1/2</td><td>3</td><td>43.9</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT no dedup</td><td>400K</td><td>3</td><td>43.6</td><td>46.7</td><td>46.9</td><td>53.7</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT k=1</td><td>~12K</td><td>3</td><td>37.6</td><td>43.4</td><td>42.7</td><td>52.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT k=3</td><td>~15K</td><td>3</td><td>39.0</td><td>45.3</td><td>45.2</td><td>51.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT k=6</td><td>~18K</td><td>3</td><td>39.5</td><td>45.6</td><td>46.8</td><td>52.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT k=12</td><td>~22K</td><td>3</td><td>41.6</td><td>45.3</td><td>48.0</td><td>53.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT k=25</td><td>~28K</td><td>3</td><td>40.9</td><td>46.5</td><td>46.0</td><td>52.6</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT k=50</td><td>~35K</td><td>3</td><td>40.7</td><td>47.0</td><td>49.4</td><td>54.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RFT k=100</td><td>~47K</td><td>3</td><td>41.7</td><td>47.5</td><td>49.1</td><td>54.8</td><td>54.5</td><td>-</td><td>-</td></tr><tr><td>RFT-U13B</td><td>104K</td><td>3</td><td>49.3</td><td>50.3</td><td>52.1</td><td>55.4</td><td>56.5</td><td>59.0</td><td>62.3</td></tr><tr><td>RFT-U33B</td><td>110K</td><td>3</td><td>49.1</td><td>51.2</td><td>51.4</td><td>55.3</td><td>57.9</td><td>59.7</td><td>64.8</td></tr></table>
|
| 269 |
+
|
| 270 |
+
Table 5: Detailed numerical results in this paper, some experiments are still under running. We report maj1@1 (accuracy) in this table.
|
| 271 |
+
|
| 272 |
+
<table><tr><td>Setting</td><td>7B</td><td>7B-2</td><td>13B</td><td>13B-2</td><td>33B</td><td>65B</td><td>70B-2</td></tr><tr><td>ICL-8shot</td><td>11.0/18.1</td><td>14.6/-</td><td>17.8/29.3</td><td>28.7/-</td><td>35.6/53.1</td><td>50.9/69.7</td><td>56.8/-</td></tr><tr><td>SFT</td><td>35.9/48.7</td><td>41.6/55.4</td><td>43.0/55.2</td><td>50.0/61.7</td><td>54.6/72.6</td><td>59.3/69.7</td><td>63.2/73.5</td></tr><tr><td>RFT k=100</td><td>41.7/52.7</td><td>47.5/58.7</td><td>49.1/59.9</td><td>54.8/65.4</td><td>54.5/-</td><td>-</td><td>-</td></tr><tr><td>RFT-U13B</td><td>49.3/61.8</td><td>50.3/65.6</td><td>52.1/66.2</td><td>55.4/69.1</td><td>56.5/-</td><td>59.0/-</td><td>62.3/-</td></tr><tr><td>RFT-U33B</td><td>49.1/61.6</td><td>51.2/64.1</td><td>51.4/66.3</td><td>55.3/69.1</td><td>57.9/71.3</td><td>59.7/-</td><td>64.8/-</td></tr></table>
|
| 273 |
+
|
| 274 |
+
Table 6: Detailed numerical results in this paper, some experiments are still under running. We report maj1@100 in this table.
|
| 275 |
+
|
| 276 |
+
<table><tr><td>Query A</td><td>Weng earns $12 an hour for babysitting. Yesterday, she just did 50 minutes of babysitting. How much did she earn?</td></tr><tr><td>Path 1</td><td>Weng earned 12/60 = $(<12/60=0.2)0.2 an hour for the 50 minutes that she worked yesterday. Therefore, she earned $0.2 x 50 = $(<0.2*50=10)10 for the 50 minutes that she worked yesterday.##### 10</td></tr><tr><td>Path 2</td><td>Weng earns 12 / 60 = $(<12/60=0.2)0.2 per minute. So, Weng earned 50 * 0.2 = $(<50*0.2=10)10 for the 50 minutes of babysitting. ###### 10</td></tr><tr><td>Path 3</td><td>Since there are 60 minutes in an hour, then Weng did 50/60 = <(50/60=0.8333)0.8333 of an hour of babysitting. So, she earned $12 x 0.8333 = $(<12*0.8333=10)10. ###### 10</td></tr><tr><td>Path 4</td><td>She earns 12 * 50 / 60 = $(<12*50/60=10)10. ###### 10</td></tr><tr><td>Path 5</td><td>Weng earns 12 * (50 / 60) = $(<12*(50/60)=10)10. ###### 10</td></tr><tr><td>Query B</td><td>A local college is offering German lessons and currently has 8 students enrolled. Through advertising, 8 more became interested but a fourth of these dropped out within a day. 2 more got frustrated and left. The class then rallied to show how simple the course actually is and increased enrollment by 5 times the amount of students already enrolled in the class, but 2 had to drop it because of scheduling conflicts. After one last rally, 6 more people enrolled. As the days passed, half of the class eventually dropped, and half of the remaining students graduated. How many are still enrolled?</td></tr><tr><td>Path 1</td><td>8+8=(<8+8=16)16 students are enrolled in the German class. 8*.25=(<8*.25=2)2 dropped out from the new recruits. 2 more left, so the class had 16-2-2=(<16-2-2=12)12 students left. There were 12*5+12=(<12*5+12=72)72 total students in the class after the rally. Two people had to drop the class, so there were 72-2=(<72-2=70)70 left. 70+6=(<70+6=76)76 were enrolled after the final rally. 76/2=(<76/2=38)38 are left after half the class dropped. 38/2=(<38/2=19)19 are left after half the class graduated. ###### 19</td></tr><tr><td>Path 2</td><td>8+8=(<8+8=16)16 students are enrolled after the first rally. 16/4=(<16/4=4)4 dropped out. 16-4=(<16-4=12)12 remain. 12*5+12=(<12*5+12=72)72 after the second rally. 72-2=(<72-2=70)70 after 2 more dropped. 70+6=(<70+6=76)76 after 6 more enrolled. 76/2=(<76/2=38)38 remain after half the class dropped. 38/2=(<38/2=19)19 remain after half the class graduated. ###### 19</td></tr><tr><td>Path 3</td><td>Advertising brought in 8+8=(<8+8=16)16 students. Of these, 16/4=(<16/4=4)4 dropped out. 16-4-2=(<16-4-2=10)10 remained. Then 10*5=(<10*5=50)50 more enrolled. This brought the class to 50+10=(<50+10=60)60 students. Then 60+6=(<60+6=66)66 enrolled. So there were 66+10=(<66+10=76)76 students. Then 76/2=(<76/2=38)38 dropped. So 76-38=(<76-38=38)38 remained. Then 38/2=(<38/2=19)19 graduated. So 38-19=(<38-19=19)19 were left. ###### 19</td></tr></table>
|
| 277 |
+
|
| 278 |
+
Table 7: Cases of generated reasoning paths with different reasoning complexity from rejection sampling for RFT. The calculations are highlighted in red.
|
| 279 |
+
|
| 280 |
+
# C CASE STUDY OF RFT
|
| 281 |
+
|
| 282 |
+
In this section, we present the cases of the training samples from rejection sampling. The case studies would shed light on how RFT potentially improves the mathematical reasoning performance of LLMs. The cases are shown in Table 7. As aforementioned, RFT considers the reasoning paths with different calculation processes regarding equation forms or orders, leading to the correct answers. In the cases from Table 7, all the reasoning paths from RFT result in the correct answer of 10, while the calculation processes of reasoning are diverse. Path 1 and 2, as well as Path 4 and 5, are different in the equation forms as highlighted in red. Path 1 and 2 present a two-step calculation reasoning process while Path 4 and 5 alter to a one-step calculation reasoning process. The case demonstrates
|
| 283 |
+
|
| 284 |
+
that rejection sampling can potentially provide more supervision signals that improve mathematical reasoning performance. The filtered reasoning paths sampled from LLMs themselves are of similar quality to the reasoning demonstrations from human annotations.
|
| 285 |
+
|
| 286 |
+
# D PRELIMINARY EXPERIMENTS
|
| 287 |
+
|
| 288 |
+
# D.1 SELF QUERY AUGMENTATION
|
| 289 |
+
|
| 290 |
+
Through our preliminary experiments and case studies, the errors made by the fine-tuned LLMs are partly attributed to the incorrect reasoning chains where LLMs mistakenly understand the context information or fail to consider all the information in the queries. Although such incorrect reasoning chains lead to wrong answers to the original queries, the reasoning chains themselves represent reasonable logic. For example, for the query Josh decides to try flipping a house. He buys a house for \(80,000 and then puts in \)50,000 in repairs. This increased the value of the house by 150%. How much profit did he make?, a fine-tuned LLaMA model predicts The value of the house increased by \(80,000 * .15 = \\(12,000\). So the house was worth \(80,000 + 12,000 = \\)92,000\). So he made a profit of 92,000 - 80,000 - 50,000 = \\(42,000 where the model erroneously interprets 150% as 15%, but the reasoning chain is reasonable if we ignore the error.
|
| 291 |
+
|
| 292 |
+
Therefore, such wrong predictions made by the LLMs may be correct under other queries (if we change $150\%$ to $15\%$ in the above example). We conduct experiments to generate queries for the predicted reasoning chains. This is a similar idea to the hindsight experience replay (Andrychowicz et al., 2017) in reinforcement learning where the method is designed to deal with the sparse reward problems by changing the original objectives for the failed samples to form samples with positive rewards. Such an idea was recently adopted by HIR (Zhang et al., 2023) to better align LLMs with instructions.
|
| 293 |
+
|
| 294 |
+
Concretely, we reformat GSM8K reversely by predicting the query given the corresponding groundtrue reasoning result and then we fine-tune a LLaMA model on the reversed task. We use this model to generate queries on the predicted reasoning chains by a normally fine-tuned LLaMA model on the training set of GSM8K, formalizing a training sample for augmentation. We experiment on the LLaMA 7B model and fine-tune models on the data mixing original and generated samples or solely on generated samples.
|
| 295 |
+
|
| 296 |
+
The results are shown in the left subfigure in Figure 8. We can see that fine-tuning with self query augmentation data leads to the worst results, and the performance of mixing the original data with self query augmented data still falls short of that of the original data. The fine-tuned performance for mathematical reasoning does not benefit from the naive idea of self query augmentation. Through several case studies of generated data, we find that there are two major defects in the generated data. The first one is some reasoning chains themselves are not logically reasonable, for example, there may be some calculation errors in the reasoning chains. The second one is that the generated query may not be suitable for a reasoning chain. The query generation model may still erroneously interpret the information in the reasoning chains. Both defects attribute to a mediocre augmented data quality, hence can be possible reasons for the failure of this data augmentation procedure.
|
| 297 |
+
|
| 298 |
+
# D.2 SELF REVISING AUGMENTATION
|
| 299 |
+
|
| 300 |
+
We also explore improving the mathematical reasoning abilities of LLMs through revising augmentation. To equip LLaMA with revising abilities, we generate a revising dataset by first sampling $K$ reasoning paths from a fine-tuned LLaMA model, then concatenating the query with one of the sampled reasoning paths using a template, and finally pairing with the ground-true reasoning path to form a training sample. We use a sampling temperature of 0.7 for generating reasoning paths. During inference, we use the fine-tuned revising model to revise the prediction from the normally fine-tuned model.
|
| 301 |
+
|
| 302 |
+
The results are shown in the middle subfigure of Figure 8. We can see that with $K = 1$ the revising model improves the final accuracy marginally comparing $36.09\%$ to $35.90\%$ . Surprisingly, as we increase $K$ , the performances degrade. The possible defect of the revising model is that generated samples on the training set for revising training suffer from a distribution discrepancy with generated samples on the test set for revising inference. The sampled reasoning paths on the training set may
|
| 303 |
+
|
| 304 |
+

|
| 305 |
+
Figure 8: Results for different methods of self data augmentation. GSM and H. represent GSM8K and Hindsight respectively. The red dotted lines in the middle and right figures represent the results of vanilla fine-tuning on GSM8K.
|
| 306 |
+
|
| 307 |
+

|
| 308 |
+
|
| 309 |
+

|
| 310 |
+
|
| 311 |
+
have a larger lexical similarity to the ground true reasoning paths compared to those on the test set. Therefore we try two different procedures to alleviate such an issue.
|
| 312 |
+
|
| 313 |
+
1. We use the sampled reasoning path with the largest Levenstein distance out of $K$ sampled paths with respect to the ground true path to form a training sample.
|
| 314 |
+
2. We split the train set to $N$ folds, and fine-tune a model on each $N - 1$ folds and sampling reasoning path on the left fold.
|
| 315 |
+
|
| 316 |
+
The results are shown in the middle and right subfigures in Figure 8, we can see that when leveraging Levenstein distance for reasoning path selection, the fine-tuned revising model enjoys a performance boost, harvesting uniformly better performance than the fine-tuning baseline across different $K$ 's. The results demonstrate that for the revising performance, the lexical diversity of reasoning paths matters when constructing training samples. However, the revising performance does not benefit from the $N$ -fold procedure.
|
| 317 |
+
|
| 318 |
+
# E ESTIMATING FLOPS OF SFT AND RFT
|
| 319 |
+
|
| 320 |
+
We mainly follow the notations of (Kaplan et al., 2020) here.
|
| 321 |
+
|
| 322 |
+
Training FLOPs For each input sample of length $n_{ctx}$ in GSM8K dataset, we can split it into two parts:
|
| 323 |
+
|
| 324 |
+
$$
|
| 325 |
+
n _ {c t x} = n _ {Q} + n _ {R} \tag {1}
|
| 326 |
+
$$
|
| 327 |
+
|
| 328 |
+
where $n_{Q}, n_{R}$ denotes the length of question and generated reasoning path and answers respectively.
|
| 329 |
+
|
| 330 |
+
$$
|
| 331 |
+
C _ {\text {t r a i n}} \approx 6 N n _ {c t x} N _ {s} \tag {2}
|
| 332 |
+
$$
|
| 333 |
+
|
| 334 |
+
where $N_{s}$ denotes the numbers of samples.
|
| 335 |
+
|
| 336 |
+
Inference FLOPs We roughly computed the FLOPs of each token during the forward pass:
|
| 337 |
+
|
| 338 |
+
$$
|
| 339 |
+
C _ {\text {f o r w a r d}} \left(n _ {\mathrm {c t x}}\right) = 2 N + 2 n _ {\text {l a y e r}} n _ {\mathrm {c t x}} d _ {\text {m o d e l}} \tag {3}
|
| 340 |
+
$$
|
| 341 |
+
|
| 342 |
+
To ensure the results were more accurate and reliable, we also took into account the Key-Value (KV) cache during the decoding procedure.
|
| 343 |
+
|
| 344 |
+
$$
|
| 345 |
+
K V _ {\text {c a c h e}} \approx 4 n _ {\text {l a y e r}} d _ {\text {m o d e l}} ^ {2} \tag {4}
|
| 346 |
+
$$
|
| 347 |
+
|
| 348 |
+
Therefore, we obtain the FLOPs per token during the forward pass considering the KV cache.
|
| 349 |
+
|
| 350 |
+
$$
|
| 351 |
+
\begin{array}{l} C _ {\text {f o r w a r d}} ^ {\prime} \left(n _ {c t x}\right) = 2 N + 2 n _ {\text {l a y e r}} n _ {c t x} d _ {\text {m o d e l}} - K V _ {\text {c a c h e}} (5) \\ = 2 4 n _ {\text {l a y e r}} d _ {\text {m o d e l}} ^ {2} + 2 n _ {\text {l a y e r}} n _ {c t x} d _ {\text {m o d e l}} - 4 n _ {\text {l a y e r}} d _ {\text {m o d e l}} ^ {2} (6) \\ = 2 0 n _ {\text {l a y e r}} d _ {\text {m o d e l}} ^ {2} + 2 n _ {\text {l a y e r}} n _ {c t x} d _ {\text {m o d e l}} (7) \\ \approx 1. 6 6 N + 2 n _ {\text {l a y e r}} n _ {c t x} d _ {\text {m o d e l}} (8) \\ \end{array}
|
| 352 |
+
$$
|
| 353 |
+
|
| 354 |
+
The total inference FLOPs are computed as follows:
|
| 355 |
+
|
| 356 |
+
$$
|
| 357 |
+
C _ {\text {t o t a l}} = N _ {s} \cdot \left[ n _ {q} C _ {\text {f o r w a r d}} \left(n _ {q}\right) + \sum_ {i = n _ {q}} ^ {n _ {q} + n _ {r}} i \cdot C _ {\text {f o r w a r d}} ^ {\prime} (i) \right] \tag {9}
|
| 358 |
+
$$
|
| 359 |
+
|
| 360 |
+
where $N_{s}$ denotes the numbers of samples. $n_q, n_r$ denotes the average length (tokens) of the user query and generated response respectively. In GSM8K dataset, $n_q \approx 66$ and $n_r \approx 130$ .
|
data/2023/2308_01xxx/2308.01825/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d8dcdbfd725ee10fdacbc14505aed7b6bd3e0623875d636ecc146c88d917567
|
| 3 |
+
size 1109838
|
data/2023/2308_01xxx/2308.01825/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2023/2308_01xxx/2308.01834/8f7c1f3a-8864-47d7-b9cd-e1b3128d4da8_content_list.json
ADDED
|
@@ -0,0 +1,750 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "The Capability of Large Language Models to Measure Psychiatric Functioning",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
166,
|
| 8 |
+
290,
|
| 9 |
+
831,
|
| 10 |
+
309
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Isaac R. Galatzer-Levy<sup>1</sup>, Daniel McDuff<sup>1</sup>, Vivek Natarajan<sup>1</sup>, Alan Karthikesalingam<sup>1</sup>, Matteo Malgaroli<sup>2</sup>",
|
| 17 |
+
"bbox": [
|
| 18 |
+
130,
|
| 19 |
+
310,
|
| 20 |
+
866,
|
| 21 |
+
349
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup>Google Research",
|
| 28 |
+
"bbox": [
|
| 29 |
+
112,
|
| 30 |
+
390,
|
| 31 |
+
261,
|
| 32 |
+
409
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "2NYU Grossman School of Medicine",
|
| 39 |
+
"bbox": [
|
| 40 |
+
112,
|
| 41 |
+
410,
|
| 42 |
+
411,
|
| 43 |
+
428
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Abstract",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
460,
|
| 53 |
+
131,
|
| 54 |
+
537,
|
| 55 |
+
147
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 1
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "The current work investigates the capability of Large language models (LLMs) that are explicitly trained on large corpuses of medical knowledge (Med-PaLM 2) to predict psychiatric functioning from patient interviews and clinical descriptions without being trained to do so. To assess this, $n = 145$ depression and $n = 115$ PTSD assessments and $n = 46$ clinical case studies across high prevalence/high comorbidity disorders (Depressive, Anxiety, Psychotic, trauma and stress, Addictive disorders) were analyzed using prompts to extract estimated clinical scores and diagnoses. Results demonstrate that Med-PaLM 2 is capable of assessing psychiatric functioning across a range of psychiatric conditions with the strongest performance being the prediction of depression scores based on standardized assessments (Accuracy range $= 0.80 - 0.84$ ) which were statistically indistinguishable from human clinical raters $t(1,144) = 1.20$ ; $p = 0.23$ . Results show the potential for general clinical language models to flexibly predict psychiatric risk based on free descriptions of functioning from both patients and clinicians.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
116,
|
| 64 |
+
169,
|
| 65 |
+
880,
|
| 66 |
+
409
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 1
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "Main text",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
457,
|
| 76 |
+
111,
|
| 77 |
+
540,
|
| 78 |
+
126
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 2
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Assessment of psychiatric functioning represents a common task across verticals of medicine. Primary care settings are the most common first point of contact for treatment or triage of common psychiatric disorders including depression, anxiety, post traumatic stress, psychosis, and addiction. $^{1}$ As such, it is a public health priority to scale up the assessment of common psychiatric risk and illness in primary care settings. $^{2,3}$ Assessments rely on either verbal self-report or structured screening instruments, often administered by non-experts with limited experience in determining risk or adjudicating between disorders. $^{4}$ The lack of standardization and automation of clinical information scoring and assessment represents a unique limitation of psychiatry compared to other areas of medicine. This limitation can be attributed to the nature of psychiatric assessment which results in linguistic descriptions rather than biological values that can be mathematically parsed. $^{5}$ Large language models (LLMs), which utilize advances in neural network architecture that are trained on large text datasets to flexibly interpret and respond to natural language, have demonstrated emergent learning capabilities whereby they can solve natural language problems, such as translation, that they were not explicitly trained to solve.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
114,
|
| 87 |
+
148,
|
| 88 |
+
883,
|
| 89 |
+
429
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 2
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "Large scale language models (LLMs) are trained using relatively simple pre-text tasks, involving predicting preceding, intermediate or subsequent words or sentences that are hidden from the input $^{6}$ . These models have been shown to capture complex knowledge and concepts, due to scaling the neural architectures and the data used to train them. $^{6-8}$ The models can produce text that is indistinguishable from that written by humans, $^{9}$ match human-level performance on multiple reading-comprehension benchmarks, and can achieve passing grades on medical and law bar exams. $^{10,11}$ The capability to learn patterns in data without providing training examples, known as self-supervised learning, comes from training on large numbers of parameters and data sources to learn general rules and relationships that can be applied to answer specific questions. Just as LLMs trained on large corpuses of language can learn translation without training by understanding general rules of language, so may LLMs trained on general medical knowledge be capable of understanding specific linguistically based rules of clinical assessment.",
|
| 96 |
+
"bbox": [
|
| 97 |
+
114,
|
| 98 |
+
450,
|
| 99 |
+
880,
|
| 100 |
+
689
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 2
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "A particularly fruitful area to apply LLMs within medicine may be psychiatric assessment. Screening and diagnosis of psychiatric risk is linguistically assessed and communicated. As a result, machine learning models that utilize example data such as clinical interviews to train a model, known as supervised learning, have demonstrated strong results in classifying disorders including depression, PTSD, and psychosis. $^{12}$ However, because these models are trained on relatively narrow examples, they are typically not flexible enough to be applied to data that is different than the training examples. Owing to the large number of parameters and underlying training data, LLMs likely have latent knowledge of psychiatric language, assessment, symptoms and diagnosis. This knowledge is inconsistent and likely requires further training on focused",
|
| 107 |
+
"bbox": [
|
| 108 |
+
116,
|
| 109 |
+
710,
|
| 110 |
+
880,
|
| 111 |
+
890
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 2
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "medical knowledge (e.g. journals, case studies). Such data sources can provide diverse sources of psychiatric knowledge as screening and assessment of common psychiatric disorders commonly occur in non-psychiatric settings. $^{13}$ Further, as LLM technology is applied to medical applications, there is additional regulatory and ethical obligation to ensure that AI-driven assessments are built on sound data sources. $^{14}$ The current work tests the capabilities of LLMs to generate predictions of psychiatric symptom severity and diagnoses.",
|
| 118 |
+
"bbox": [
|
| 119 |
+
109,
|
| 120 |
+
90,
|
| 121 |
+
883,
|
| 122 |
+
210
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 3
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "Depression and PTSD clinical interview assessments",
|
| 129 |
+
"text_level": 1,
|
| 130 |
+
"bbox": [
|
| 131 |
+
109,
|
| 132 |
+
231,
|
| 133 |
+
535,
|
| 134 |
+
248
|
| 135 |
+
],
|
| 136 |
+
"page_idx": 3
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"type": "text",
|
| 140 |
+
"text": "We implement our experiments on a transformer architecture (PaLM 2 $^{8}$ ), with medical domain fine-tuning called Med-PaLM 2. $^{11}$ For the purposes of this analysis the large (L) model was used. Pretrained on a massive text corpus ( $^{10,11}$ ) comprising hundreds of billions of tokens, the model has been exposed to a diverse set of natural language use cases drawn from various sources (for full description of Med-PaLM 2 development (see ${}^{11}$ ). First, to assess Med-PaLM 2's accuracy to measure and screen for depression and post traumatic stress disorder (PTSD), we utilized research grade clinical interview transcripts (for full description see methods). Transcripts were entered as inputs to Med-PaLM following a standardized prompt structure designed to 1) focus model attention on knowledge of the utilized PTSD and depression rating scales (PCL-C; PHQ 8), 2) estimate scores on both scales; 3) produce a confidence estimate; 3) provide descriptive reasoning for the selected score (see methods for full description).",
|
| 141 |
+
"bbox": [
|
| 142 |
+
109,
|
| 143 |
+
250,
|
| 144 |
+
879,
|
| 145 |
+
470
|
| 146 |
+
],
|
| 147 |
+
"page_idx": 3
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"type": "text",
|
| 151 |
+
"text": "Med-PaLM 2 produced estimated scores for the PHQ-8 and PCL-C respectively. Estimated scores with no training ( $\\mu = 8.50$ ; $\\mathrm{SD} = 9.02$ ) were not statistically different from human raters ( $\\mu = 7.94$ ; $\\mathrm{SD} = 5.36$ ) for depression ratings [PHQ-8: $t(1,144) = 1.20$ ; $p = 0.23$ ] but were significantly different for PTSD ratings [PCL-C; $t(1,114) = 2.02$ ; $p < .01$ ] with humans ( $\\mu = 27.77$ ; $\\mathrm{SD} = 11.53$ ) scoring subjects significantly lower than Med-PaLM 2 ( $\\mu = 36.51$ ; $\\mathrm{SD} = 12.55$ ). Results of the prediction of real number scores demonstrated that Med-PaLM 2 predicted participant scores at a low error rate and identified caseness of depression and PTSD at a good accuracy. Analyses further revealed that while Med-PaLM 2's classified depression at both high sensitivity and specificity (sensitivity $= 0.75$ ; specificity $= 0.82$ ), Med-PaLM 2's performance classifying PTSD demonstrated strong specificity (0.98, it demonstrated low sensitivity (0.30; See Table 1 for full results)",
|
| 152 |
+
"bbox": [
|
| 153 |
+
109,
|
| 154 |
+
491,
|
| 155 |
+
879,
|
| 156 |
+
710
|
| 157 |
+
],
|
| 158 |
+
"page_idx": 3
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"type": "text",
|
| 162 |
+
"text": "In comparison to results of human raters in the published literature, Med-PaLM 2 performance in assessing depression, as measured by sensitivity and specificity is consistent with performance of human raters pooled across studies through meta-analysis (Pooled sensitivity $= 0.84$ (range $= .70 - .94$ ); Pooled specificity $= 0.81$ (range $= 0.69 - 0.82$ ). Further Med-PaLM 2 demonstrated consistent scores on Cohen's Kappa, a measure of agreement between raters, with human raters when compared to estimates in the literature between two human raters Med-PaLM 2 Kappa $= .55$ ; Published Kappa ranges $= 0.35 - 0.76$ ).<sup>18</sup> The PCL has similarly demonstrated a range across studies (sensitivity range $= 0.20 - 1.00$ ; specificity range $= 0.71 - 0.99$ ) with variability",
|
| 163 |
+
"bbox": [
|
| 164 |
+
109,
|
| 165 |
+
731,
|
| 166 |
+
883,
|
| 167 |
+
893
|
| 168 |
+
],
|
| 169 |
+
"page_idx": 3
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"type": "text",
|
| 173 |
+
"text": "attributed to study bias and a lack of consensus on the appropriate score for a clinical cut-off. $^{19}$ In this instance, Med-PaLM 2 demonstrated high specificity but moderate to low sensitivity and a Kappa between the LLM and the human rater of 0.33 indicating fair agreement. $^{20}$ Sensitivity, specificity, and the Kappa between human and LLM ratings are reported in Table 1. Results further demonstrated improvements when weighting regression results by the model confidence score and when using a cut-off threshold $=> -0.20$ (See supplemental Figure 1).",
|
| 174 |
+
"bbox": [
|
| 175 |
+
109,
|
| 176 |
+
90,
|
| 177 |
+
883,
|
| 178 |
+
210
|
| 179 |
+
],
|
| 180 |
+
"page_idx": 4
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"type": "table",
|
| 184 |
+
"img_path": "images/cd9ff29a89f675bb7bfcf367eb0faada42225ae80672e623703e38f314980f64.jpg",
|
| 185 |
+
"table_caption": [
|
| 186 |
+
"Table 1: Results of Med-PaLM 2 prediction of PCL-C and PHQ-8 scores and clinical cutoffs."
|
| 187 |
+
],
|
| 188 |
+
"table_footnote": [],
|
| 189 |
+
"table_body": "<table><tr><td></td><td>Med-PaLM 2 PCL-C</td><td>Med-PaLM 2 PHQ-8</td></tr><tr><td>Accuracy</td><td>0.74</td><td>0.80</td></tr><tr><td>F1 Score</td><td>0.64</td><td>0.77</td></tr><tr><td>Precision</td><td>0.88</td><td>0.65</td></tr><tr><td>Sensitivity</td><td>0.30</td><td>0.75</td></tr><tr><td>Specificity</td><td>0.98</td><td>0.82</td></tr><tr><td>MAE</td><td>9.07</td><td>2.33</td></tr><tr><td>RMSE</td><td>11.2</td><td>3.93</td></tr><tr><td>Kappa with Clinical Ratings</td><td>0.33</td><td>0.55</td></tr><tr><td>Pearson r (p-value)</td><td>0.41 (p < 0.01)</td><td>0.55 (p < 0.01)</td></tr></table>",
|
| 190 |
+
"bbox": [
|
| 191 |
+
243,
|
| 192 |
+
284,
|
| 193 |
+
754,
|
| 194 |
+
551
|
| 195 |
+
],
|
| 196 |
+
"page_idx": 4
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"type": "text",
|
| 200 |
+
"text": "To assess Med-PaLM 2's capability to extract and summarize diagnostically relevant information, we compared the frequency of diagnostically descriptive terms and phrases taken from the DSM 5 descriptions of MDD and PTSD. We compared the frequency of MDD and PTSD words in the description of PHQ-8 and PCL-C estimates across cases who were assessed for both. Results demonstrated that Med-PaLM 2 was significantly more likely to use words associated with the correct diagnosis when describing results of the PHQ-8 and PCL-C respectively $[\\chi^2 (1,146) = 138.12; p < .001; \\mathrm{O.R.} = 3.88$ ; see Supplemental Table 1; Figure 2].",
|
| 201 |
+
"bbox": [
|
| 202 |
+
109,
|
| 203 |
+
590,
|
| 204 |
+
867,
|
| 205 |
+
732
|
| 206 |
+
],
|
| 207 |
+
"page_idx": 4
|
| 208 |
+
},
|
| 209 |
+
{
|
| 210 |
+
"type": "text",
|
| 211 |
+
"text": "Figure 2: Frequency of words and phrases associated with Major Depressive Disorder (MDD) and Posttraumatic Stress Disorder (PTSD) associated with MDD and PTSD assessments",
|
| 212 |
+
"bbox": [
|
| 213 |
+
109,
|
| 214 |
+
771,
|
| 215 |
+
857,
|
| 216 |
+
811
|
| 217 |
+
],
|
| 218 |
+
"page_idx": 4
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"type": "image",
|
| 222 |
+
"img_path": "images/b270f773e9f6c4bc475248758919f1ab84014f75f8b239dfa31470e6c74c9b8c.jpg",
|
| 223 |
+
"image_caption": [],
|
| 224 |
+
"image_footnote": [
|
| 225 |
+
"Note: Sleep was removed from analyses because sleep abnormalities are diagnostic of both PTSD and MDD."
|
| 226 |
+
],
|
| 227 |
+
"bbox": [
|
| 228 |
+
196,
|
| 229 |
+
97,
|
| 230 |
+
802,
|
| 231 |
+
444
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 5
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "text",
|
| 237 |
+
"text": "Clinical case study assessment",
|
| 238 |
+
"text_level": 1,
|
| 239 |
+
"bbox": [
|
| 240 |
+
116,
|
| 241 |
+
518,
|
| 242 |
+
359,
|
| 243 |
+
535
|
| 244 |
+
],
|
| 245 |
+
"page_idx": 5
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"text": "Next, to assess Med-PaLM 2's capabilities to provide clinical labels across diverse psychiatric disorders that are commonly encountered in primary care setting, we entered individual de identified training case studies with accompanying diagnoses hidden from the following psychiatric categories: depressive disorders (e.g. dysthymia, MDD, premenstrual dysphoric disorder; $n = 12$ ), anxiety (e.g., specific phobias, Generalized Anxiety Disorder; $n = 6$ ), posttraumatic (e.g., PTSD, acute stress disorder; $n = 8$ ), substance and addiction related (e.g., cocaine dependence; gambling disorder; $n = 7$ ), and psychotic disorders (schizophrenia, schizoaffective disorder; $n = 7$ ) from American Psychiatric Association (APA) training examples.[21] We assessed the accuracy to correctly label the diagnostic category, label the specific diagnosis within each category compared to others, and label additional diagnoses or diagnostic modifiers (See methods for a description of the dataset).",
|
| 250 |
+
"bbox": [
|
| 251 |
+
114,
|
| 252 |
+
537,
|
| 253 |
+
880,
|
| 254 |
+
756
|
| 255 |
+
],
|
| 256 |
+
"page_idx": 5
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"text": "Without providing guidance on diagnoses to choose between, Med-PaLM 2 correctly labeled the diagnostic categories $92.5\\%$ ( $n = 37$ ) of the time and labeled the correct diagnosis $77.5\\%$ ( $n = 31$ ) of the time. The frequency of correct diagnosis within each category was compared using Fisher's Exact Test. Results demonstrated that there were no statistically significant differences between diagnoses, except for Depressive Disorders for which Med-PaLM 2 demonstrated marginally better categorization using Fisher's exact test [Phi (1, 39) = -0.27; $p = .09$ ; O.R. =",
|
| 261 |
+
"bbox": [
|
| 262 |
+
116,
|
| 263 |
+
777,
|
| 264 |
+
875,
|
| 265 |
+
897
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 5
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"text": "0.32]. All other diagnoses within categories did not approach significance (see Supplemental Table 2). Finally, in 24 case studies, an additional diagnosis or diagnostic modifier was provided. Med-PaLM 2 correctly generated additional diagnoses and modifiers in only $20\\%$ ( $n = 5$ ) of cases. Given the small sample of subjects within each category that had additional diagnoses and modifiers, no statistical tests could be employed for comparison.",
|
| 272 |
+
"bbox": [
|
| 273 |
+
114,
|
| 274 |
+
89,
|
| 275 |
+
880,
|
| 276 |
+
188
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 6
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"text": "In summary, we applied Med-PaLM-2, an LLM trained on general medical knowledge, $^{10}$ to predict the results of structured depression and PTSD assessment instruments from de-identified transcripts of standardized research clinical interview databases. Med-PaLM-2 achieved state-of-the-art performance on these tasks, and was additionally able to provide an estimated confidence rating in its assessment, and a written explanation of the rationale for its scoring. Further, when applied to case studies, Med-PaLM-2 demonstrated high accuracy in labeling DSM 5 diagnoses without prior training. Importantly Med-PaLM 2 performed inconsistently in identifying comorbidities and diagnostic modifiers (i.e. MDD with psychotic features) indicating that additional training or prompt tuning may be required to improve models.",
|
| 283 |
+
"bbox": [
|
| 284 |
+
114,
|
| 285 |
+
209,
|
| 286 |
+
880,
|
| 287 |
+
388
|
| 288 |
+
],
|
| 289 |
+
"page_idx": 6
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"text": "In addition to prediction of diagnosis, Med-PaLM 2 provided explanations for the model decisions based on the text. The capability to summarize the reasoning behind modeling decisions is an important advance in machine learning that aims for medical applications. High dimensional models are commonly criticized and limited in their medical applications because they are uninterpretable \"black boxes\". In the current context where psychiatric diagnoses are heterogeneous in their symptom presentation and treatments and linguistic descriptions can be idiosyncratic among non-specialist clinicians,[22] explanations through summarization are equally relevant to assessment outcomes. To formally test the capability for summarization in PTSD and MDD assessment, we conducted an analysis of the frequency of words associated with each diagnostic category revealed a statistically higher frequency of words and phrases associated with each diagnostic category within Med-PaLM 2's explanation of the model result. This indicates that LLMs both provide diagnoses and provide explanatory summarization at an accuracy that is actionable for clinical screening.",
|
| 294 |
+
"bbox": [
|
| 295 |
+
114,
|
| 296 |
+
409,
|
| 297 |
+
877,
|
| 298 |
+
669
|
| 299 |
+
],
|
| 300 |
+
"page_idx": 6
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"text": "Large language models demonstrate the capability to read and understand common psychiatric constructs without explicitly teaching them to perform this function. Further, analyses of word frequencies show that Med-Palm 2 produces content-specific summarization. As such, Med-PaLM 2 demonstrates the capability to assess psychiatric functioning based on both patient and clinician's descriptions while providing explainable summarization. Results demonstrate that Med-PaLM 2 is better able to predict depression than other psychiatric constructs. Depression ratings from Med-PaLM 2 did not differ statistically from human raters while PTSD scores were shown to be statistically discrepant between human and automated scoring. Further, depressive disorder classification based on case studies was marginally better than other disorder categories. This result likely reflects the overall prevalence of depressive disorders and even the use of the",
|
| 305 |
+
"bbox": [
|
| 306 |
+
114,
|
| 307 |
+
690,
|
| 308 |
+
880,
|
| 309 |
+
890
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 6
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"text": "PHQ in clinical screening which are both much more common than other disorders and screenings. Additional model tuning may be required to improve the accuracy of other assessments. We hypothesize that Med-Palm 2 performed best when assessing MDD because it is the most commonly occurring psychiatric condition and psychiatric comorbidity with physical illness. As a result, models most likely had access to the greatest number and diversity of training examples in the general medical corpus. Further, the current results are demonstrated on relatively small datasets and in limited use cases. As such, the performance of these models can not be generalized and we can not make broad claims about the capability to screen or evaluate psychiatric functioning using LLMs. The disorders assessed in this study are in high prevalence globally while the current work is limited to English only using demographically narrow data sources for testing. Results are not intended to serve as a generalized solution, but rather to demonstrate the capabilities that can be harnessed, developed, and validated to improve the scale and access of psychiatric screening and assessment. Additional data sources from diverse populations and formats are required to generalize and apply these results.",
|
| 316 |
+
"bbox": [
|
| 317 |
+
116,
|
| 318 |
+
89,
|
| 319 |
+
880,
|
| 320 |
+
369
|
| 321 |
+
],
|
| 322 |
+
"page_idx": 7
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"text": "Despite these limitations, the current results demonstrate that LLMs trained on general medical knowledge have emergent capabilities to predict psychiatric functioning without being trained to do so. As such, LLMs are likely to find broad applications to standardize screening and assessment across medical contexts that rely on verbal descriptions from patients and clinicians.",
|
| 327 |
+
"bbox": [
|
| 328 |
+
116,
|
| 329 |
+
390,
|
| 330 |
+
875,
|
| 331 |
+
468
|
| 332 |
+
],
|
| 333 |
+
"page_idx": 7
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"text": "Methods",
|
| 338 |
+
"text_level": 1,
|
| 339 |
+
"bbox": [
|
| 340 |
+
116,
|
| 341 |
+
511,
|
| 342 |
+
189,
|
| 343 |
+
527
|
| 344 |
+
],
|
| 345 |
+
"page_idx": 7
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"type": "text",
|
| 349 |
+
"text": "Datasets",
|
| 350 |
+
"text_level": 1,
|
| 351 |
+
"bbox": [
|
| 352 |
+
116,
|
| 353 |
+
551,
|
| 354 |
+
196,
|
| 355 |
+
569
|
| 356 |
+
],
|
| 357 |
+
"page_idx": 7
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "text",
|
| 361 |
+
"text": "Depression and PTSD clinical interview assessments",
|
| 362 |
+
"text_level": 1,
|
| 363 |
+
"bbox": [
|
| 364 |
+
114,
|
| 365 |
+
598,
|
| 366 |
+
532,
|
| 367 |
+
614
|
| 368 |
+
],
|
| 369 |
+
"page_idx": 7
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"type": "text",
|
| 373 |
+
"text": "The Distress Analysis Interview Corpus Wizard of Oz (DAIC-WOZ) and extended corpus $^{15}$ was utilized as a primary data source for analysis. The DAIC-WOZ contains previously de-identified interview transcripts and accompanying expert ratings on the 8 item Patient Health Questionnaire $[(PHQ-8)^{16};\\mathrm{n} = 145]$ and the PTSD Checklist-Civilian version $[(PCL - C^{17});\\mathrm{n} = 115]$ . These data are available to researchers from the original publisher (see $^{15}$ ) and was obtained through an investigator initiated request. Data sources were deemed to not require IRB oversight because they were public and de-identified. Based on the clinical cut-off of 10 on the PHQ-8, $\\mathrm{n} = 69$ subjects could be categorized as meeting criteria for provisional MDD and $\\mathrm{n} = 45$ provisional PTSD based on a PCL-C cut-off of 44 or greater. For the purpose of analyses, interviewee speech content was separated from the interviewer and used for analyses while interviewer speech was omitted from analyses.",
|
| 374 |
+
"bbox": [
|
| 375 |
+
114,
|
| 376 |
+
618,
|
| 377 |
+
880,
|
| 378 |
+
835
|
| 379 |
+
],
|
| 380 |
+
"page_idx": 7
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"type": "text",
|
| 384 |
+
"text": "Clinical case study assessment",
|
| 385 |
+
"text_level": 1,
|
| 386 |
+
"bbox": [
|
| 387 |
+
116,
|
| 388 |
+
859,
|
| 389 |
+
359,
|
| 390 |
+
875
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 7
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"text": "Case studies and accompanying diagnoses were taken from DSM-5 Clinical Cases is a companion book to the Diagnostic and Statistical Manual of Mental Disorders, Fifth Edition (DSM-5), which is the standard classification of mental disorders used by mental health professionals in the United States. The book provides in-depth case studies of patients with a variety of mental disorders, as classified by the DSM-5. The book is divided into 20 chapters, each of which corresponds to a major category of mental disorders in the DSM-5. Each chapter contains a series of case studies with distinct diagnoses within a diagnostic category. The case studies are written by leading experts in the field of psychiatry and provide detailed descriptions of the patients' symptoms, history, and treatment. The areas of psychiatry covered include Neurodevelopmental disorders, Schizophrenia spectrum and other psychotic disorders, Bipolar and related disorders, Depressive disorders, Anxiety disorders, Obsessive-compulsive and related disorders, Trauma- and stressor-related disorders, Dissociative disorders, Somatic symptom and related disorders, Feeding and eating disorders, Elimination disorders, Sleep-wake disorders, Sexual dysfunctions, Gender dysphoria, Disruptive, impulse-control, and conduct disorders, Substance-related and addictive disorders, Neurocognitive disorders",
|
| 397 |
+
"bbox": [
|
| 398 |
+
114,
|
| 399 |
+
88,
|
| 400 |
+
883,
|
| 401 |
+
388
|
| 402 |
+
],
|
| 403 |
+
"page_idx": 8
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"type": "text",
|
| 407 |
+
"text": "The current analyses utilized all case studies from the most common psychiatric disorder categories including Depressive disorders, Anxiety disorders, Trauma- and stressor-related disorders, substance-related and addictive disorders. And psychotic disorders.",
|
| 408 |
+
"bbox": [
|
| 409 |
+
116,
|
| 410 |
+
398,
|
| 411 |
+
831,
|
| 412 |
+
455
|
| 413 |
+
],
|
| 414 |
+
"page_idx": 8
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"type": "text",
|
| 418 |
+
"text": "Outcomes",
|
| 419 |
+
"text_level": 1,
|
| 420 |
+
"bbox": [
|
| 421 |
+
116,
|
| 422 |
+
488,
|
| 423 |
+
210,
|
| 424 |
+
506
|
| 425 |
+
],
|
| 426 |
+
"page_idx": 8
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"type": "text",
|
| 430 |
+
"text": "Depression and PTSD clinical interview assessments",
|
| 431 |
+
"text_level": 1,
|
| 432 |
+
"bbox": [
|
| 433 |
+
114,
|
| 434 |
+
534,
|
| 435 |
+
532,
|
| 436 |
+
550
|
| 437 |
+
],
|
| 438 |
+
"page_idx": 8
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"type": "text",
|
| 442 |
+
"text": "Primary outcome metric of PTSD severity was the PTSD Check-List Civilian Version((PCL-C), a 17-item self-report measure of posttraumatic stress disorder (PTSD) symptoms.[17] The PCL-C is based on the DSM-IV criteria for PTSD and assesses the frequency and severity of symptoms in the past month and validated to assess non-military related symptoms of PTSD including, re-experiencing the traumatic event (e.g., nightmares, flashbacks), avoidance of reminders of the traumatic event, negative alterations in cognitions and mood (e.g., negative thoughts about oneself, difficulty feeling positive emotions), and alterations in arousal and reactivity (e.g., difficulty sleeping, irritability). Each item is rated on a scale of 1 (not at all) to 5 (extremely). The total score ranges from 17 to 85, with higher scores indicating more severe PTSD symptoms. A score of 50 or higher is considered to be indicative of PTSD. A clinical cut-off score of 44 for \"probable PTSD\" was selected based on guidance from the validation literature.[17]",
|
| 443 |
+
"bbox": [
|
| 444 |
+
114,
|
| 445 |
+
553,
|
| 446 |
+
880,
|
| 447 |
+
771
|
| 448 |
+
],
|
| 449 |
+
"page_idx": 8
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"type": "text",
|
| 453 |
+
"text": "The primary outcome metric of depression severity was the Patient Health Questionnaire 8-item version (PHQ-8). The PHQ-8 is a brief, self-report questionnaire that is used to screen for and measure the severity of depressive symptoms. It is based on the nine diagnostic criteria for major depressive disorder (MDD) in the DSM-IV. The PHQ-8 is a widely used and well-validated measure of depression. The scale consists of eight items in which respondents are asked to rate",
|
| 454 |
+
"bbox": [
|
| 455 |
+
116,
|
| 456 |
+
792,
|
| 457 |
+
880,
|
| 458 |
+
891
|
| 459 |
+
],
|
| 460 |
+
"page_idx": 8
|
| 461 |
+
},
|
| 462 |
+
{
|
| 463 |
+
"type": "text",
|
| 464 |
+
"text": "how often they have experienced each symptom over the past two weeks on a scale of 0 (not at all) to 3 (nearly every day). The total score ranges from 0 to 24, with higher scores indicating more severe depressive symptoms. A cut off score of 10 or higher was selected to assign probable depression to interviews.",
|
| 465 |
+
"bbox": [
|
| 466 |
+
116,
|
| 467 |
+
90,
|
| 468 |
+
866,
|
| 469 |
+
167
|
| 470 |
+
],
|
| 471 |
+
"page_idx": 9
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"type": "text",
|
| 475 |
+
"text": "Clinical case study assessment",
|
| 476 |
+
"text_level": 1,
|
| 477 |
+
"bbox": [
|
| 478 |
+
116,
|
| 479 |
+
191,
|
| 480 |
+
361,
|
| 481 |
+
208
|
| 482 |
+
],
|
| 483 |
+
"page_idx": 9
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"type": "text",
|
| 487 |
+
"text": "Outcomes accompanied each case study. Outcomes included 1) diagnostic category (e.g. depressive disorder; 2) specific disorder or diagnosis (e.g. Major Depressive Disorder) along with diagnostic modifiers or comorbidities (e.g with psychotic features).",
|
| 488 |
+
"bbox": [
|
| 489 |
+
116,
|
| 490 |
+
210,
|
| 491 |
+
848,
|
| 492 |
+
268
|
| 493 |
+
],
|
| 494 |
+
"page_idx": 9
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"type": "text",
|
| 498 |
+
"text": "Analysis",
|
| 499 |
+
"text_level": 1,
|
| 500 |
+
"bbox": [
|
| 501 |
+
116,
|
| 502 |
+
311,
|
| 503 |
+
196,
|
| 504 |
+
330
|
| 505 |
+
],
|
| 506 |
+
"page_idx": 9
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"type": "text",
|
| 510 |
+
"text": "Depression and PTSD clinical interview assessments",
|
| 511 |
+
"text_level": 1,
|
| 512 |
+
"bbox": [
|
| 513 |
+
116,
|
| 514 |
+
354,
|
| 515 |
+
535,
|
| 516 |
+
372
|
| 517 |
+
],
|
| 518 |
+
"page_idx": 9
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"type": "text",
|
| 522 |
+
"text": "To assess the accuracy of Med-PaLM 2, we first applied prompts to focus the model's attention through the prompt: \"Are you familiar with the [PHQ-8/PCL-C]?\" Next, Med-PaLM 2 was prompted, \"Based on the following clinical interview, what do you estimate the Participants [PHQ-8/PCL-C] score is?\" Med-PaLM 2's performance with no additional training or data examples was assessed and compared to models that were trained on example data. For each interview, Med-PaLM 2 provided: 1) an estimated clinical score; 2) a model-derived confidence score for each result and; 3) a description of the reasoning behind the score. The confidence score is a log-likelihood estimation that the model would generate the target from the given input. Based on the published literature for each scale, provisional diagnoses of major depressive disorder (MDD; PHQ-8 cutoff $= >10$ ) and posttraumatic stress disorder (PTSD; PCL-C cutoff $= >44$ ). We further compared the accuracy of model-derived diagnoses based on different cutoffs for the confidence scores. Finally, we analyzed the frequency of words and phrases taken from the MDD and PTSD diagnosis to determine if Med-PaLM 2 was more likely to use appropriate descriptors associated with each diagnosis ( e.g. PTSD: flashbacks, intrusive thoughts; MDD: low mood, loss of appetite; see supplemental Table 3 for list of descriptive terms per diagnosis). The term \"sleep\" was removed because abnormal sleep is a symptom of both PTSD and MDD.",
|
| 523 |
+
"bbox": [
|
| 524 |
+
114,
|
| 525 |
+
375,
|
| 526 |
+
874,
|
| 527 |
+
710
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 9
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "text",
|
| 533 |
+
"text": "Clinical case study assessment",
|
| 534 |
+
"text_level": 1,
|
| 535 |
+
"bbox": [
|
| 536 |
+
116,
|
| 537 |
+
734,
|
| 538 |
+
361,
|
| 539 |
+
752
|
| 540 |
+
],
|
| 541 |
+
"page_idx": 9
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"type": "text",
|
| 545 |
+
"text": "To assess the accuracy of Med-PaLM 2, each case study was entered and prompted \"Take on the expertise of an expert in psychiatric diagnosis using the DSM 5. Read the following case study and apply the most appropriate diagnoses.\" Each case study produced a diagnosis that was used to assess both the diagnostic category and primary diagnosis. Additionally, when appropriate, Med-PaLM 2 produced secondary diagnoses and diagnostic modifiers. These outputs were used for statistical comparisons.",
|
| 546 |
+
"bbox": [
|
| 547 |
+
116,
|
| 548 |
+
755,
|
| 549 |
+
875,
|
| 550 |
+
875
|
| 551 |
+
],
|
| 552 |
+
"page_idx": 9
|
| 553 |
+
},
|
| 554 |
+
{
|
| 555 |
+
"type": "text",
|
| 556 |
+
"text": "Supplemental figures",
|
| 557 |
+
"text_level": 1,
|
| 558 |
+
"bbox": [
|
| 559 |
+
116,
|
| 560 |
+
90,
|
| 561 |
+
295,
|
| 562 |
+
109
|
| 563 |
+
],
|
| 564 |
+
"page_idx": 10
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"type": "text",
|
| 568 |
+
"text": "Supplemental Figure 1: Model accuracy and sample size across model-derived confidence score cut-offs.",
|
| 569 |
+
"bbox": [
|
| 570 |
+
114,
|
| 571 |
+
128,
|
| 572 |
+
872,
|
| 573 |
+
167
|
| 574 |
+
],
|
| 575 |
+
"page_idx": 10
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"type": "image",
|
| 579 |
+
"img_path": "images/f374c150eca073e1796dc4845efe49158d5ccb047e884a14838f9ebf1d98b9a4.jpg",
|
| 580 |
+
"image_caption": [],
|
| 581 |
+
"image_footnote": [],
|
| 582 |
+
"bbox": [
|
| 583 |
+
259,
|
| 584 |
+
193,
|
| 585 |
+
733,
|
| 586 |
+
523
|
| 587 |
+
],
|
| 588 |
+
"page_idx": 10
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "text",
|
| 592 |
+
"text": "Supplemental Table 1: Frequency of words in Med-PaLM 2's reason for the associated Major Depressive Disorder (MDD) and Posttraumatic Stress Disorder (PTSD) assessment.",
|
| 593 |
+
"bbox": [
|
| 594 |
+
116,
|
| 595 |
+
551,
|
| 596 |
+
849,
|
| 597 |
+
590
|
| 598 |
+
],
|
| 599 |
+
"page_idx": 10
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"type": "table",
|
| 603 |
+
"img_path": "images/b659e55514ef9e8eb2bef9f80946bf60afe07d40c621654d58740b3ccdc57738.jpg",
|
| 604 |
+
"table_caption": [],
|
| 605 |
+
"table_footnote": [],
|
| 606 |
+
"table_body": "<table><tr><td></td><td></td><td colspan=\"3\">Actual</td></tr><tr><td></td><td></td><td>MDD Term</td><td>PTSD Term</td><td>Total</td></tr><tr><td rowspan=\"3\">Predicted</td><td>MDD Assessment</td><td>118</td><td>8</td><td>126</td></tr><tr><td>PTSD Assessment</td><td>26</td><td>101</td><td>127</td></tr><tr><td>Total</td><td>144</td><td>109</td><td></td></tr></table>",
|
| 607 |
+
"bbox": [
|
| 608 |
+
238,
|
| 609 |
+
609,
|
| 610 |
+
759,
|
| 611 |
+
771
|
| 612 |
+
],
|
| 613 |
+
"page_idx": 10
|
| 614 |
+
},
|
| 615 |
+
{
|
| 616 |
+
"type": "text",
|
| 617 |
+
"text": "Supplemental Table 2: Results of Med-PaLM 2 and other best in class models in prediction of PCL-C and PHQ-8 scores and clinical cutoffs.",
|
| 618 |
+
"bbox": [
|
| 619 |
+
116,
|
| 620 |
+
811,
|
| 621 |
+
857,
|
| 622 |
+
849
|
| 623 |
+
],
|
| 624 |
+
"page_idx": 10
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"type": "table",
|
| 628 |
+
"img_path": "images/45b3a65a9a9aafbdd96c13c7993e7e881f13ecdffe5a8817b8ea360cf75af641.jpg",
|
| 629 |
+
"table_caption": [
|
| 630 |
+
"Fisher's Exact Test"
|
| 631 |
+
],
|
| 632 |
+
"table_footnote": [
|
| 633 |
+
"Note: The frequency of correct diagnosis within each category was compared to the frequency of correct diagnosis across all other categories using Fisher's exact test. All significance tests are calculated based on two-tailed significance and odds ratios are calculated regarding the larger class."
|
| 634 |
+
],
|
| 635 |
+
"table_body": "<table><tr><td></td><td>Category</td><td>Diagnosis</td><td>Phi(p - value)</td><td>Odds Ratio</td></tr><tr><td>Depression</td><td>1.00</td><td>0.83</td><td>-0.27( = 0.09)</td><td>0.32</td></tr><tr><td>Anxiety</td><td>1.00</td><td>0.83</td><td>-0.02( >0,99)</td><td>0.09</td></tr><tr><td>Psychosis</td><td>0.86</td><td>0.71</td><td>-0.04( >0,99)</td><td>0.23</td></tr><tr><td>Trauma & Stress</td><td>0.80</td><td>0.60</td><td>0.14( = 0.58)</td><td>0.16</td></tr><tr><td>Addictive disorder</td><td>1.00</td><td>1.00</td><td>-0.16 ( = 0.57)</td><td>0.25</td></tr><tr><td>All</td><td>0.94</td><td>0.71</td><td></td><td></td></tr></table>",
|
| 636 |
+
"bbox": [
|
| 637 |
+
117,
|
| 638 |
+
127,
|
| 639 |
+
879,
|
| 640 |
+
362
|
| 641 |
+
],
|
| 642 |
+
"page_idx": 11
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"type": "table",
|
| 646 |
+
"img_path": "images/2856526264bed05d99d843b34a61d674cfbc4f23753423b8b2822e314de164b3.jpg",
|
| 647 |
+
"table_caption": [
|
| 648 |
+
"Supplemental Table 3: Descriptive terms for Major Depressive Disorder and Posttraumatic Stress Disorder."
|
| 649 |
+
],
|
| 650 |
+
"table_footnote": [],
|
| 651 |
+
"table_body": "<table><tr><td>Depressive Disorders</td><td>Trauma & Stress Disorders</td></tr><tr><td>Phrases</td><td></td></tr><tr><td>Depressed mood</td><td>Reliving the trauma</td></tr><tr><td>Loss of interest</td><td>Negative changes in thinking and mood</td></tr><tr><td>Loss of energy</td><td>Physical reactions</td></tr><tr><td>Weight gain</td><td>Emotional reactions</td></tr><tr><td>Weight loss</td><td>Intrusive thoughts</td></tr><tr><td>Low energy</td><td>Avoid people</td></tr><tr><td>Making decisions</td><td>Avoid places</td></tr><tr><td>Change in sleep</td><td>Sleep disturbances</td></tr><tr><td>Simple tasks</td><td>Traumatic event</td></tr><tr><td>Loss of pleasures</td><td></td></tr><tr><td>Keywords</td><td></td></tr><tr><td>Appetite</td><td>Avoidance</td></tr><tr><td>Weight</td><td>Flashback</td></tr><tr><td>Sad</td><td>Nightmare</td></tr><tr><td>Hopeless</td><td>Startle</td></tr></table>",
|
| 652 |
+
"bbox": [
|
| 653 |
+
114,
|
| 654 |
+
522,
|
| 655 |
+
883,
|
| 656 |
+
897
|
| 657 |
+
],
|
| 658 |
+
"page_idx": 11
|
| 659 |
+
},
|
| 660 |
+
{
|
| 661 |
+
"type": "table",
|
| 662 |
+
"img_path": "images/ae37536ba89d17ed96336410e702636080af60d231649fdf2f9f84422f168d66.jpg",
|
| 663 |
+
"table_caption": [],
|
| 664 |
+
"table_footnote": [
|
| 665 |
+
"Note: Keywords and phrases were taken from symptom descriptions in the Diagnostic and statistical Manual of Mental Disorders - 5th edition (DSM 5)."
|
| 666 |
+
],
|
| 667 |
+
"table_body": "<table><tr><td>Empty</td><td>Hypervigilant</td></tr><tr><td>Helpless</td><td>Numb</td></tr><tr><td>Cry</td><td>Hopeless</td></tr><tr><td>Energy</td><td>Angry</td></tr><tr><td>Overeat</td><td>Concentration</td></tr><tr><td>Think</td><td>Sleep</td></tr><tr><td>Concentrate</td><td>Trauma</td></tr><tr><td>Worthless</td><td></td></tr><tr><td>Guilt</td><td></td></tr><tr><td>Burden</td><td></td></tr><tr><td>Foggy</td><td></td></tr><tr><td>Focus</td><td></td></tr><tr><td>Attention</td><td></td></tr><tr><td>Slow</td><td></td></tr><tr><td>Sleep</td><td></td></tr><tr><td>Oversleep</td><td></td></tr><tr><td>Undersleep</td><td></td></tr><tr><td>Task</td><td></td></tr><tr><td>Fatigue</td><td></td></tr><tr><td>Pleasure</td><td></td></tr><tr><td>Agitation</td><td></td></tr></table>",
|
| 668 |
+
"bbox": [
|
| 669 |
+
116,
|
| 670 |
+
95,
|
| 671 |
+
620,
|
| 672 |
+
468
|
| 673 |
+
],
|
| 674 |
+
"page_idx": 12
|
| 675 |
+
},
|
| 676 |
+
{
|
| 677 |
+
"type": "text",
|
| 678 |
+
"text": "References",
|
| 679 |
+
"text_level": 1,
|
| 680 |
+
"bbox": [
|
| 681 |
+
450,
|
| 682 |
+
573,
|
| 683 |
+
547,
|
| 684 |
+
590
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 12
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "list",
|
| 690 |
+
"sub_type": "ref_text",
|
| 691 |
+
"list_items": [
|
| 692 |
+
"1. Wang, P. S. et al. Twelve-month use of mental health services in the United States: results from the National Comorbidity Survey Replication. Arch. Gen. Psychiatry 62, 629-640 (2005).",
|
| 693 |
+
"2. US Preventive Services Task Force et al. Screening for Anxiety Disorders in Adults: US Preventive Services Task Force Recommendation Statement. JAMA 329, 2163-2170 (2023).",
|
| 694 |
+
"3. Whitman, A. et al. Addressing social determinants of health: Examples of successful evidence-based strategies and current federal efforts. Off Heal Policy.",
|
| 695 |
+
"4. Mulvaney-Day, N. et al. Screening for Behavioral Health Conditions in Primary Care"
|
| 696 |
+
],
|
| 697 |
+
"bbox": [
|
| 698 |
+
111,
|
| 699 |
+
607,
|
| 700 |
+
864,
|
| 701 |
+
907
|
| 702 |
+
],
|
| 703 |
+
"page_idx": 12
|
| 704 |
+
},
|
| 705 |
+
{
|
| 706 |
+
"type": "list",
|
| 707 |
+
"sub_type": "ref_text",
|
| 708 |
+
"list_items": [
|
| 709 |
+
"Settings: A Systematic Review of the Literature. J. Gen. Intern. Med. 33, 335-346 (2018).",
|
| 710 |
+
"5. Cuthbert, B. N. The role of RDoC in future classification of mental disorders. Dialogues Clin. Neurosci. 22, 81-85 (2020).",
|
| 711 |
+
"6. Devlin, J., Chang, M.-W., Lee, K. & Toutanova, K. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. arXiv [cs.CL] (2018).",
|
| 712 |
+
"7. Kaplan, J. et al. Scaling Laws for Neural Language Models. arXiv [cs.LG] (2020).",
|
| 713 |
+
"8. Anil, R. et al. PaLM 2 Technical Report. arXiv [cs.CL] (2023).",
|
| 714 |
+
"9. OpenAI. GPT-4 Technical Report. arXiv [cs.CL] (2023).",
|
| 715 |
+
"10. Singhal, K. et al. Large Language Models Encode Clinical Knowledge. arXiv [cs.CL] (2022).",
|
| 716 |
+
"11. Singhal, K. et al. Towards Expert-Level Medical Question Answering with Large Language Models. arXiv [cs.CL] (2023).",
|
| 717 |
+
"12. Le Glaz, A. et al. Machine Learning and Natural Language Processing in Mental Health: Systematic Review. J. Med. Internet Res. 23, e15708 (2021).",
|
| 718 |
+
"13. Ford, D. E. & Kamerow, D. B. Screening for psychiatric and substance abuse disorders in clinical practice. J. Gen. Intern. Med. 5, S37-41 (1990).",
|
| 719 |
+
"14. Gilbert, S., Harvey, H., Melvin, T., Vollebregt, E. & Wicks, P. Large language model AI chatbots require approval as medical devices. Nat. Med. (2023) doi:10.1038/s41591-023-02412-6.",
|
| 720 |
+
"15. Gratch, J. et al. The Distress Analysis Interview Corpus of human and computer interviews. in Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14) 3123-3128 (European Language Resources Association (ELRA), 2014)."
|
| 721 |
+
],
|
| 722 |
+
"bbox": [
|
| 723 |
+
112,
|
| 724 |
+
88,
|
| 725 |
+
879,
|
| 726 |
+
875
|
| 727 |
+
],
|
| 728 |
+
"page_idx": 13
|
| 729 |
+
},
|
| 730 |
+
{
|
| 731 |
+
"type": "list",
|
| 732 |
+
"sub_type": "ref_text",
|
| 733 |
+
"list_items": [
|
| 734 |
+
"16. Kroenke, K. et al. The PHQ-8 as a measure of current depression in the general population. J. Affect. Disord.",
|
| 735 |
+
"17. Wilkins, K. C., Lang, A. J. & Norman, S. B. Synthesis of the psychometric properties of the PTSD checklist (PCL) military, civilian, and specific versions. *Depress. Anxiety* 28, 596-606 (2011).",
|
| 736 |
+
"18. Thombs, B. D. et al. The diagnostic accuracy of the Patient Health Questionnaire-2 (PHQ-2), Patient Health Questionnaire-8 (PHQ-8), and Patient Health Questionnaire-9 (PHQ-9) for detecting major depression: protocol for a systematic review and individual patient data meta-analyses. Syst. Rev. 3, 124 (2014).",
|
| 737 |
+
"19. McDonald, S. D. & Calhoun, P. S. The diagnostic accuracy of the PTSD checklist: a critical review. Clin. Psychol. Rev. 30, 976-987 (2010).",
|
| 738 |
+
"20. McHugh, M. L. Interrater reliability: the kappa statistic. Biochem. Med. 22, 276-282 (2012).",
|
| 739 |
+
"21. Barnhill, J. W. DSM-5 Clinical Cases. (American Psychiatric Pub, 2013).",
|
| 740 |
+
"22. Galatzer-Levy, I. R. & Bryant, R. A. 636,120 Ways to Have Posttraumatic Stress Disorder. Perspect. Psychol. Sci. 8, 651-662 (2013)."
|
| 741 |
+
],
|
| 742 |
+
"bbox": [
|
| 743 |
+
109,
|
| 744 |
+
88,
|
| 745 |
+
879,
|
| 746 |
+
632
|
| 747 |
+
],
|
| 748 |
+
"page_idx": 14
|
| 749 |
+
}
|
| 750 |
+
]
|