MinerU Batch 7ccabdf0-26a9-43a2-95b9-6c7682317003 (Part 5/8)
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +8 -0
- data/2025/2504_03xxx/2504.03600/e5e6f2c9-b520-45ba-a6d8-f048ec675c39_content_list.json +1338 -0
- data/2025/2504_03xxx/2504.03600/e5e6f2c9-b520-45ba-a6d8-f048ec675c39_model.json +0 -0
- data/2025/2504_03xxx/2504.03600/e5e6f2c9-b520-45ba-a6d8-f048ec675c39_origin.pdf +3 -0
- data/2025/2504_03xxx/2504.03600/full.md +266 -0
- data/2025/2504_03xxx/2504.03600/images/1085ef736e68e6396ba4ffec8a770702736c2199e39003abff238711c34f1928.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/130fd5f966b0e7ba408da3d2cf52c7359fbbc84e45fd10032cd51ef78bcf3cf1.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/17fd29cbf2cd19784c0bb0d2cfb0e23354962e03a41bb725e1fbaa214aac2aef.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/3868cdfd0b9dda8f2a8d7a45edb94cedec57782bbe1280e2eb66b7773ca707b2.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/39f85da958d90f259c0b08e48d0cd2f6ee0c6fb10e5f02130280442abe1a26e7.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/4851f885c97704d72c4fa799ec785f4b12a8acc796bb89a31287d7a93e590b58.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/5adc6f2de8fb8211005aa566f50fbc3e799ffb2ddf53b6a0675bab8455dda641.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/60084a3f5a332127b3d5175f91f9e633214ac09405a0bf3c48f56eb35078d71d.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/732aab5a71796f9876fca6ce2f448fd5f7eb67d7afaa475bcf3cdec437b0d556.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/7fdcc8eed4e5e4497f7ac24395973bcf772b520fc3584b58a0307c6576742633.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/85593d1218f423c1e77a2d537f1efc37f4cbe8e0a39d6d6d7e504ef0e701d4f6.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/a6580f62096183a517a5efbbf8dc3cc33516b4fe224c9d73262984951b762cf9.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/b73121f12808a4f15320ee8ab6e137119314a0e4cbc6d98ff9a1dabf4554de6f.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/ba99d36fc4ebe552b5550e833041c34c6c9c450be92d14b1b940279625df93c6.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/c0d5c685a4c1e7652706f3329c06bdb7685a7b0dd30ef73d04cad8f009f5502b.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/images/ebbc9e0f16c0a44fad1a4bf9ae569948bc6d1ef7ebee52b2e64705398d7e9332.jpg +3 -0
- data/2025/2504_03xxx/2504.03600/layout.json +0 -0
- data/2025/2504_03xxx/2504.03601/868ff8de-112e-45e3-a5e7-d3a76d78b931_content_list.json +0 -0
- data/2025/2504_03xxx/2504.03601/868ff8de-112e-45e3-a5e7-d3a76d78b931_model.json +0 -0
- data/2025/2504_03xxx/2504.03601/868ff8de-112e-45e3-a5e7-d3a76d78b931_origin.pdf +3 -0
- data/2025/2504_03xxx/2504.03601/full.md +497 -0
- data/2025/2504_03xxx/2504.03601/images/203ec427caf0475b6de20a1c27e8f5d86efacda5f8e6acae7c169101e65fc728.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/2b8e0d56103bcc42f7476f84e79b2069ef908ea243fa876358afda18c118752d.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/2ef11530635023def2c6bbd7d0095d4770537183662847e5a80478a095b50a2b.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/3199bca066d064c413546149c857fe2287fe8b36fd97a2b514d3efc0fc9c03fe.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/3f64f67378a0b7e6fbf7151b5b45f29a4fe2f91c63b430da9d64017780c1bf53.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/4203782c4a6a25dc3bb32f2d8a1eb074f8aedff6a4e743415259aca7c6bc1c8a.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/62e072e86f40da4411690e9d06828dce77c6e6f95b0fdd8e7b80ea4102d43b8b.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/7152beccd11e88f51abc2a53fae6549e9e5f17e52485b4003e4db45e28116eb6.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/aadc07b5135ebf6aaf609b46288e97e7822c3c262f062f92421cf17449a351d4.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/cd559fb87d18a9d2a2968961d57a1b5b0bc196e8abd6b0ce7684367dc6006373.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/ce6e74d3af5194803e096a95af152e26799e17b4e2a522e5baaefbd2b3266ff7.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/e87359d51cf1bb9b7ca3ead71e055c135bd96e9fe052a91ff53e63f936dd0e2f.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/images/e87c3e3d8cfa887c8723e9d1e30c35a087337b7bff360f8c9c8096b084ddeccd.jpg +3 -0
- data/2025/2504_03xxx/2504.03601/layout.json +0 -0
- data/2025/2504_03xxx/2504.03624/2aef3cf2-63ec-4c64-b6be-6f4154c03023_content_list.json +0 -0
- data/2025/2504_03xxx/2504.03624/2aef3cf2-63ec-4c64-b6be-6f4154c03023_model.json +0 -0
- data/2025/2504_03xxx/2504.03624/2aef3cf2-63ec-4c64-b6be-6f4154c03023_origin.pdf +3 -0
- data/2025/2504_03xxx/2504.03624/full.md +0 -0
- data/2025/2504_03xxx/2504.03624/images/1cc66296e8a29104cfea243cfb83fc6134021779f2af488d4d5e99bd42d5bcc0.jpg +3 -0
- data/2025/2504_03xxx/2504.03624/images/1fecb8a82dcbadd166b9bd03dd08128da3541cc761c11a16af0b819fd4420831.jpg +3 -0
- data/2025/2504_03xxx/2504.03624/images/33ea7131b53f28909fe42f8b25ac0b9c3757da5cc268ff15a33fa8ad908acee5.jpg +3 -0
- data/2025/2504_03xxx/2504.03624/images/3473a2175257e4fce9156c179b984736feb1dc0ccc692489eb89718fd065bd5c.jpg +3 -0
- data/2025/2504_03xxx/2504.03624/images/3639835691b6d953b865fb514ef3de322e2fe0c3e5392ca41115f3dd6ef855f5.jpg +3 -0
- data/2025/2504_03xxx/2504.03624/images/37db35014a6a18bfa0a1e31ed569540424dd14958513957d80b9aa4cd1ded650.jpg +3 -0
.gitattributes
CHANGED
|
@@ -1365,3 +1365,11 @@ data/2025/2504_04xxx/2504.04158/f2103d47-786d-466a-b93a-76660f3198b5_origin.pdf
|
|
| 1365 |
data/2025/2504_04xxx/2504.04259/740753b2-ad81-48a6-b430-9d2ffd0735ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1366 |
data/2025/2504_04xxx/2504.04264/1fcb6f14-6316-4151-b04e-aa0a2fc81413_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1367 |
data/2025/2504_05xxx/2504.05336/ae59f1e2-9358-495a-9fc7-19dc6aef4aed_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1365 |
data/2025/2504_04xxx/2504.04259/740753b2-ad81-48a6-b430-9d2ffd0735ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1366 |
data/2025/2504_04xxx/2504.04264/1fcb6f14-6316-4151-b04e-aa0a2fc81413_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1367 |
data/2025/2504_05xxx/2504.05336/ae59f1e2-9358-495a-9fc7-19dc6aef4aed_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1368 |
+
data/2025/2504_03xxx/2504.03600/e5e6f2c9-b520-45ba-a6d8-f048ec675c39_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1369 |
+
data/2025/2504_03xxx/2504.03601/868ff8de-112e-45e3-a5e7-d3a76d78b931_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1370 |
+
data/2025/2504_03xxx/2504.03624/2aef3cf2-63ec-4c64-b6be-6f4154c03023_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1371 |
+
data/2025/2504_03xxx/2504.03846/f5fc10ed-0805-4b05-9c26-0167a6778545_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1372 |
+
data/2025/2504_03xxx/2504.03888/09546206-0abf-4d10-90c2-36f7036856ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1373 |
+
data/2025/2504_03xxx/2504.03964/aaa072ba-9db7-42de-83f7-25d14235af3f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1374 |
+
data/2025/2504_04xxx/2504.04051/dc50d9f3-81a9-461a-a16b-57310bb49133_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1375 |
+
data/2025/2504_13xxx/2504.13889/1a0f13f7-5e07-471b-b90a-7f3cd28898ce_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2025/2504_03xxx/2504.03600/e5e6f2c9-b520-45ba-a6d8-f048ec675c39_content_list.json
ADDED
|
@@ -0,0 +1,1338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "MedSAM2: Segment Anything in 3D Medical Images and Videos",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
111,
|
| 8 |
+
44,
|
| 9 |
+
887,
|
| 10 |
+
114
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Jun Ma*, Zongxin Yang*, Sumin Kim, Bihui Chen, Mohammed Baharoon, Adibvafa Fallahpour, Reza Asakereh, Hongwei Lyu, and Bo Wang†",
|
| 17 |
+
"bbox": [
|
| 18 |
+
120,
|
| 19 |
+
128,
|
| 20 |
+
875,
|
| 21 |
+
165
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract",
|
| 28 |
+
"text_level": 1,
|
| 29 |
+
"bbox": [
|
| 30 |
+
468,
|
| 31 |
+
184,
|
| 32 |
+
527,
|
| 33 |
+
196
|
| 34 |
+
],
|
| 35 |
+
"page_idx": 0
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"type": "text",
|
| 39 |
+
"text": "Medical image and video segmentation is a critical task for precision medicine, which has witnessed considerable progress in developing task or modality-specific and generalist models for 2D images. However, there have been limited studies on building general-purpose models for 3D images and videos with comprehensive user studies. Here, we present MedSAM2, a promptable segmentation foundation model for 3D image and video segmentation. The model is developed by fine-tuning the Segment Anything Model 2 on a large medical dataset with over 455,000 3D image-mask pairs and 76,000 frames, outperforming previous models across a wide range of organs, lesions, and imaging modalities. Furthermore, we implement a human-in-the-loop pipeline to facilitate the creation of large-scale datasets resulting in, to the best of our knowledge, the most extensive user study to date, involving the annotation of 5,000 CT lesions, 3,984 liver MRI lesions, and 251,550 echocardiogram video frames, demonstrating that MedSAM2 can reduce manual costs by more than $85\\%$ . MedSAM2 is also integrated into widely used platforms with user-friendly interfaces for local and cloud deployment, making it a practical tool for supporting efficient, scalable, and high-quality segmentation in both research and healthcare environments.",
|
| 40 |
+
"bbox": [
|
| 41 |
+
135,
|
| 42 |
+
203,
|
| 43 |
+
861,
|
| 44 |
+
349
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "INTRODUCTION",
|
| 51 |
+
"text_level": 1,
|
| 52 |
+
"bbox": [
|
| 53 |
+
73,
|
| 54 |
+
446,
|
| 55 |
+
200,
|
| 56 |
+
460
|
| 57 |
+
],
|
| 58 |
+
"page_idx": 0
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"type": "text",
|
| 62 |
+
"text": "Medical image segmentation plays a pivotal role in numerous clinical applications, including anatomical structure analysis [1], disease diagnosis [2], surgery planning, and treatment monitoring [3]. By delineating the boundaries of organs, lesions, and other relevant anatomies, segmentation algorithms provide clinicians with crucial information for precise disease analysis. Over the past decade, deep learning-based methods have revolutionized this field, delivering unprecedented performance on various segmentation tasks and benchmarks. For example, DeepLab [4] [5] has achieved human-level performance in left ventricle segmentation from echocardiography for ejection fraction assessment [1], which has proven to save time for both sonographers and cardiologists via blinding and randomization clinical trial [6]. U-Net [7] has been employed for accurate cell detection and segmentation in light microscopy images [8] and 3D nnU-Net [9] has been widely used in various anatomy and lesion segmentation, such as heart chamber segmentation in Magnetic Resonance Imaging (MRI) scans [2], pancreas cancer and abdominal organ segmentation in Computed Tomograph (CT) scans [3] [10], and whole-body lesion segmentation in Positron Emission Tomography (PET) scans [11].",
|
| 63 |
+
"bbox": [
|
| 64 |
+
71,
|
| 65 |
+
465,
|
| 66 |
+
924,
|
| 67 |
+
627
|
| 68 |
+
],
|
| 69 |
+
"page_idx": 0
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"type": "text",
|
| 73 |
+
"text": "Driven by advanced network architectures [12] and large-scale datasets [13], recent trends in segmentation present a paradigm shift from specialist models tailored for specific tasks to generalist or foundation models capable of performing segmentation without extensive task-specific model development [14]-[16]. One prominent example is the Segment Anything Model (SAM) [13], a pioneer segmentation foundation model in computer vision that has shown remarkable generalization ability across a wide range of two-dimensional (2D) natural image segmentation tasks. However, due to the substantial domain gap, its performance remains suboptimal in medical images [17] [18]. Despite these limitations, SAM can be effectively adapted to the medical domain through transfer learning. For instance, models such as MedSAM [19] and SAM-Med [20] [21] have demonstrated strong capabilities in segmenting various organs and abnormalities across diverse medical imaging modalities by fine-tuning SAM on large-scale medical datasets.",
|
| 74 |
+
"bbox": [
|
| 75 |
+
71,
|
| 76 |
+
627,
|
| 77 |
+
926,
|
| 78 |
+
760
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "list",
|
| 84 |
+
"sub_type": "text",
|
| 85 |
+
"list_items": [
|
| 86 |
+
"- Jun Ma is with AI Collaborative Centre, University Health Network; Vector Institute, Toronto, Canada (* Equal Contribution).",
|
| 87 |
+
"Zongxin Yang is with Department of Biomedical Informatics, Harvard Medical School, Harvard University, Boston, USA (* Equal Contribution).",
|
| 88 |
+
"- Sumin Kim is with Peter Munk Cardiac Centre, University Health Network; Department of Computer Science, University of Toronto; Vector Institute, Toronto, Canada.",
|
| 89 |
+
"- Bihui Chen is with Peter Munk Cardiac Centre, University Health Network; Department of Computer Science, University of Toronto; Vector Institute, Toronto, Canada.",
|
| 90 |
+
"- Mohammed Baharoon is with Department of Biomedical Informatics, Harvard Medical School, Harvard University, Boston, USA. Part of this work was done at the University of Toronto, Toronto, Canada.",
|
| 91 |
+
"Adibvaf Fallahpour is with Peter Munk Cardiac Centre, University Health Network; Department of Computer Science, University of Toronto; Vector Institute, Toronto, Canada.",
|
| 92 |
+
"- Reza Asakereh participated in this project when he was with Peter Munk Cardiac Centre, University Health Network, Toronto, Canada.",
|
| 93 |
+
"- Hongwei Lyu is with Peter Munk Cardiac Centre, University Health Network, Toronto, Canada.",
|
| 94 |
+
"Bo Wang is with Peter Munk Cardiac Centre and AI Hub, University Health Network; Department of Laboratory Medicine and Pathobiology and Department of Computer Science, University of Toronto; Vector Institute, Toronto, Canada(†Corresponding Author). E-mail: bowang@vectorinstitute.ai"
|
| 95 |
+
],
|
| 96 |
+
"bbox": [
|
| 97 |
+
73,
|
| 98 |
+
773,
|
| 99 |
+
921,
|
| 100 |
+
936
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "aside_text",
|
| 106 |
+
"text": "arXiv:2504.03600v1 [eess.IV] 4 Apr 2025",
|
| 107 |
+
"bbox": [
|
| 108 |
+
22,
|
| 109 |
+
268,
|
| 110 |
+
60,
|
| 111 |
+
715
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "page_number",
|
| 117 |
+
"text": "1",
|
| 118 |
+
"bbox": [
|
| 119 |
+
911,
|
| 120 |
+
32,
|
| 121 |
+
919,
|
| 122 |
+
42
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "Despite the potential of these foundation models, their application to medical imaging is still limited and faces three main limitations. First, most medical image segmentation foundation models [19] [20] are primarily designed for 2D image data and may not capture the three-dimensional (3D) spatial relationships or temporal information in volumetric and video medical data. Second, although some studies have extended SAM to 3D image segmentation using 3D image encoders [21] and adapters [22]-[24] or developed interactive 3D segmentation models [25]-[27] to incorporate manual corrections, there is still a lack of general models to segment both 3D images and videos, which are frequently necessary in real-world clinical workflows. The state-of-the-art video segmentation model, SAM2 [28], has shown great potential to fill this gap [29]-[32], but adaption on large-scale datasets has been underexplored. Finally, large-scale validation of these models in practical image-labeling scenarios remains notably absent, leaving important questions about their scalability and utility in facilitating high-throughput medical image annotation tasks.",
|
| 129 |
+
"bbox": [
|
| 130 |
+
71,
|
| 131 |
+
51,
|
| 132 |
+
923,
|
| 133 |
+
198
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 1
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "In this work, we address these limitations by presenting MedSAM2, a general model for 3D medical image and video segmentation. Specifically, we first curate a large-scale dataset consisting of more than 455,000 3D image-mask pairs and 76,000 annotated video frames, spanning multiple organs, pathologies, and imaging protocols for model development. Then, we build MedSAM2 by modifying and fine-tuning SAM2 on the large dataset. Extensive experiments show that MedSAM2 is capable of handling both volumetric medical scans and successive video frames, enabling versatile segmentation across diverse medical data. Furthermore, we conduct three user studies to demonstrate that MedSAM2 substantially facilitates annotation workflows for high-throughput and efficient segmentation, substantially reducing the time and effort required for creating large-scale medical datasets in various imaging modalities. MedSAM2 has the potential to transform clinical workflows by enabling more efficient diagnostic processes, treatment planning, and longitudinal monitoring across cardiology, oncology, and surgical specialties, where precise 3D organ and lesion segmentation is critical but traditionally time-consuming.",
|
| 140 |
+
"bbox": [
|
| 141 |
+
71,
|
| 142 |
+
198,
|
| 143 |
+
924,
|
| 144 |
+
363
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 1
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "text",
|
| 150 |
+
"text": "RESULTS",
|
| 151 |
+
"text_level": 1,
|
| 152 |
+
"bbox": [
|
| 153 |
+
73,
|
| 154 |
+
380,
|
| 155 |
+
153,
|
| 156 |
+
393
|
| 157 |
+
],
|
| 158 |
+
"page_idx": 1
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"type": "text",
|
| 162 |
+
"text": "Dataset and model architecture",
|
| 163 |
+
"text_level": 1,
|
| 164 |
+
"bbox": [
|
| 165 |
+
73,
|
| 166 |
+
398,
|
| 167 |
+
308,
|
| 168 |
+
412
|
| 169 |
+
],
|
| 170 |
+
"page_idx": 1
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"type": "text",
|
| 174 |
+
"text": "A large amount of training data is the foundation for developing generalist segmentation models. We assembled a large-scale and diverse 3D medical image and video dataset based on public datasets, including various normal anatomical structures and pathologies from various medical imaging modalities (Fig 1a, Methods, Supplementary Table 1). In particular, we collected 363,161, 14,818, and 77,154 3D image-mask pairs for CT, PET, and MRI modalities, respectively. In addition, we curated 19,232 and 56,462 annotated frames for ultrasound and endoscopy, respectively.",
|
| 175 |
+
"bbox": [
|
| 176 |
+
71,
|
| 177 |
+
417,
|
| 178 |
+
924,
|
| 179 |
+
491
|
| 180 |
+
],
|
| 181 |
+
"page_idx": 1
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"type": "text",
|
| 185 |
+
"text": "The pre-trained SAM2 model [28] has provided a strong backbone for general feature representations, which was trained on 256 A100 GPUs. To reuse the pre-trained model weights and avoid prohibitive computing costs, MedSAM2 adopted the SAM2 network architecture, including an image encoder, a memory attention module, a prompt encoder, and a mask decoder (Fig 1b). The image encoder extracts multi-scale features from each 2D slice or video frame using the hierarchical vision transformer [33] (Hiera), which achieves faster and more accurate performance than the naïve vision transformer [12] in SAM. The memory attention module employs transformer blocks with self-attention and cross-attention mechanisms to condition current frame features on previous frames' predictions through a streaming memory bank. The prompt encoders convert various user interactions (i.e., points, bounding boxes, and masks) to embedding. We used bounding boxes as the main prompt because they are less ambiguous in specifying the segmentation target, making them suitable for most organs and lesions. Specifically, for 3D images, we applied the bounding box prompt on the middle slice and propagated the segmentation mask bidirectionally toward both ends of the volume data. Finally, the mask decoder incorporates memory-conditioned features and prompt embeddings to produce accurate segmentation masks.",
|
| 186 |
+
"bbox": [
|
| 187 |
+
71,
|
| 188 |
+
491,
|
| 189 |
+
924,
|
| 190 |
+
666
|
| 191 |
+
],
|
| 192 |
+
"page_idx": 1
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"type": "text",
|
| 196 |
+
"text": "Existing studies have demonstrated that fine-tuning all parts of the model yields better performance than only fine-tuning parts of the model, such as the image encoder, the mask decoder, and the prompt encoder [34], [35]. For MedSAM2, we employ a comprehensive full-model fine-tuning approach using the lightweight SAM2.1-Tiny variant, which achieved competitive performance with fewer parameters compared to larger variants. During fine-tuning, we applied lower learning rates for the image encoder to preserve pre-trained feature extraction capabilities and higher learning rates for other model parts. We carefully balanced our training data with different sampling rates across 3D images and videos to ensure optimal performance across diverse modalities (Methods).",
|
| 197 |
+
"bbox": [
|
| 198 |
+
71,
|
| 199 |
+
666,
|
| 200 |
+
924,
|
| 201 |
+
770
|
| 202 |
+
],
|
| 203 |
+
"page_idx": 1
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"type": "text",
|
| 207 |
+
"text": "Performance on various 3D medical image and video segmentation tasks",
|
| 208 |
+
"text_level": 1,
|
| 209 |
+
"bbox": [
|
| 210 |
+
71,
|
| 211 |
+
787,
|
| 212 |
+
617,
|
| 213 |
+
801
|
| 214 |
+
],
|
| 215 |
+
"page_idx": 1
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"type": "text",
|
| 219 |
+
"text": "We first evaluated the trained model on the holdout 3D test set, which contains 40 segmentation tasks from different cohorts across a wide range of organs and lesions in CT, MRI, and PET scans. We also compared the latest SAM2.1 models with different sizes (tiny, small, base, and large) [28] and the current state-of-the-art (SOTA) bounding box-based segmentation foundation model (EfficientMedSAM-Top1) [36], which is the winning solution in the CVPR 2024 Efficient MedSAMs competition [37]. All models were initialized with a bounding box prompt on the middle slice of the segmentation target. Each model first generated a 2D mask at the middle slice and then propagated it bidirectionally to create the full 3D segmentation.",
|
| 220 |
+
"bbox": [
|
| 221 |
+
71,
|
| 222 |
+
805,
|
| 223 |
+
924,
|
| 224 |
+
907
|
| 225 |
+
],
|
| 226 |
+
"page_idx": 1
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"type": "text",
|
| 230 |
+
"text": "Fig. 2a shows the quantitative results on the 3D testing set (Supplementary Table 2-3 and Fig. 1). The SAM2.1 models exhibit similar performance across all categories, with no significant differences in median DSC scores ( $p$ -value",
|
| 231 |
+
"bbox": [
|
| 232 |
+
71,
|
| 233 |
+
907,
|
| 234 |
+
924,
|
| 235 |
+
938
|
| 236 |
+
],
|
| 237 |
+
"page_idx": 1
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"type": "page_number",
|
| 241 |
+
"text": "2",
|
| 242 |
+
"bbox": [
|
| 243 |
+
911,
|
| 244 |
+
32,
|
| 245 |
+
921,
|
| 246 |
+
42
|
| 247 |
+
],
|
| 248 |
+
"page_idx": 1
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"type": "image",
|
| 252 |
+
"img_path": "images/ba99d36fc4ebe552b5550e833041c34c6c9c450be92d14b1b940279625df93c6.jpg",
|
| 253 |
+
"image_caption": [
|
| 254 |
+
"a.",
|
| 255 |
+
"3D CT & PET (378K 3D image-mask pairs)"
|
| 256 |
+
],
|
| 257 |
+
"image_footnote": [],
|
| 258 |
+
"bbox": [
|
| 259 |
+
112,
|
| 260 |
+
133,
|
| 261 |
+
883,
|
| 262 |
+
231
|
| 263 |
+
],
|
| 264 |
+
"page_idx": 2
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"type": "image",
|
| 268 |
+
"img_path": "images/b73121f12808a4f15320ee8ab6e137119314a0e4cbc6d98ff9a1dabf4554de6f.jpg",
|
| 269 |
+
"image_caption": [
|
| 270 |
+
"3D MRI (77K 3D images-mask pairs)",
|
| 271 |
+
"Ultrasound and Endoscopy Videos (76K frames)"
|
| 272 |
+
],
|
| 273 |
+
"image_footnote": [],
|
| 274 |
+
"bbox": [
|
| 275 |
+
112,
|
| 276 |
+
250,
|
| 277 |
+
883,
|
| 278 |
+
334
|
| 279 |
+
],
|
| 280 |
+
"page_idx": 2
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"type": "image",
|
| 284 |
+
"img_path": "images/60084a3f5a332127b3d5175f91f9e633214ac09405a0bf3c48f56eb35078d71d.jpg",
|
| 285 |
+
"image_caption": [],
|
| 286 |
+
"image_footnote": [],
|
| 287 |
+
"bbox": [
|
| 288 |
+
112,
|
| 289 |
+
354,
|
| 290 |
+
883,
|
| 291 |
+
532
|
| 292 |
+
],
|
| 293 |
+
"page_idx": 2
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"type": "image",
|
| 297 |
+
"img_path": "images/ebbc9e0f16c0a44fad1a4bf9ae569948bc6d1ef7ebee52b2e64705398d7e9332.jpg",
|
| 298 |
+
"image_caption": [
|
| 299 |
+
"Fig. 1. Dataset and network architecture for MedSAM2 development. a, The dataset includes diverse 3D CT, PET, MRI images, ultrasound, and endoscopy videos. For each 3D image example, we visualize both 2D slices and 3D structures. For each video example, we visualize frames at different time points. b, MedSAM2 is a promptable segmentation network with an image encoder, a prompt encoder, a memory attention module, and a mask decoder. The image encoder extracts multiscale features from each frame or 2D slice. The memory attention module conditions the current frame features on past frames' features and predictions using streaming memory. The mask decoder generates accurate segmentation masks based on bounding box prompts and memory-conditioned features. This architecture enables MedSAM2 to effectively segment both 3D medical images and videos by exploiting spatial continuity across slices and frames."
|
| 300 |
+
],
|
| 301 |
+
"image_footnote": [],
|
| 302 |
+
"bbox": [
|
| 303 |
+
114,
|
| 304 |
+
535,
|
| 305 |
+
885,
|
| 306 |
+
797
|
| 307 |
+
],
|
| 308 |
+
"page_idx": 2
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"type": "page_number",
|
| 312 |
+
"text": "3",
|
| 313 |
+
"bbox": [
|
| 314 |
+
911,
|
| 315 |
+
32,
|
| 316 |
+
921,
|
| 317 |
+
42
|
| 318 |
+
],
|
| 319 |
+
"page_idx": 2
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"type": "image",
|
| 323 |
+
"img_path": "images/1085ef736e68e6396ba4ffec8a770702736c2199e39003abff238711c34f1928.jpg",
|
| 324 |
+
"image_caption": [
|
| 325 |
+
"Fig. 2. Segmentation performance on hold-out 3D image and video datasets. a, Performance distribution of six models across five typical 3D segmentation tasks in terms of Dice similarity coefficient (DSC) scores: CT organs $(N = 783)$ , CT Lesions $(N = 409)$ , MRI organs $(N = 734)$ , MRI lesions $(N = 318)$ , and PET lesions $(N = 65)$ . The center line within the box represents the median value, with the bottom and top bounds of the box delineating the 25th and 75th percentiles, respectively. Whiskers are chosen to show the 1.5 of the interquartile range. Up-triangles denote the minima and down-triangles denote the maxima. b, Visualized segmentation examples for stomach and liver cancer in computed tomography (CT), and spleen and brain cancer in Magnetic Resonance Imaging (MRI). Blue: initial bounding box prompts; Yellow: reference standards; Blue: best SAM2.1 segmentation results; Green: EfficientMedSAM-Top1 segmentation results; Magenta: MedSAM2 segmentation results. c, Performance distribution of SAM2.1 and MedSAM2 for left ventricle $(N = 100)$ , left ventricle epicardium $(N = 100)$ , and left atrium $(N = 100)$ segmentation in ultrasound videos and easy $(N = 119)$ and hard $(N = 54)$ polyp segmentation in endoscopy videos. d, Visualized segmentation examples for heart chambers and polyps in ultrasound and endoscopy videos, respectively."
|
| 326 |
+
],
|
| 327 |
+
"image_footnote": [],
|
| 328 |
+
"bbox": [
|
| 329 |
+
81,
|
| 330 |
+
83,
|
| 331 |
+
919,
|
| 332 |
+
773
|
| 333 |
+
],
|
| 334 |
+
"page_idx": 3
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"type": "page_number",
|
| 338 |
+
"text": "4",
|
| 339 |
+
"bbox": [
|
| 340 |
+
911,
|
| 341 |
+
32,
|
| 342 |
+
921,
|
| 343 |
+
42
|
| 344 |
+
],
|
| 345 |
+
"page_idx": 3
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"type": "text",
|
| 349 |
+
"text": "$>0.05)$ . This suggests that increasing model size within the SAM2.1 family does not necessarily translate to substantial improvements in segmentation accuracy for 3D medical images. The EfficientMedSAM-Top1 outperforms all SAM2.1 variants in CT Organs, CT Lesions, and MR Lesions, achieving median DSC scores of $83.55\\%$ (interquartile range (IQR): $67.20 - 91.78\\%$ ), $77.95\\%$ (69.15-84.81%), and $82.25\\%$ (68.30-90.53%), respectively. However, its performance is not consistently superior to SAM2.1 models in MRI organ and PET lesion tasks, which is $9.22\\%$ and $2.74\\%$ lower than the best SAM2.1 model, respectively. One possible reason could be that the MRI Organs dataset includes images from unseen MRI sequences that introduce variations in image characteristics.",
|
| 350 |
+
"bbox": [
|
| 351 |
+
71,
|
| 352 |
+
53,
|
| 353 |
+
924,
|
| 354 |
+
155
|
| 355 |
+
],
|
| 356 |
+
"page_idx": 4
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"type": "text",
|
| 360 |
+
"text": "The comparable performance of different SAM2.1 model sizes motivated us to build MedSAM2 by fine-tuning the lightweight SAM2.1-Tiny model, aiming to improve segmentation performance for medical image datasets without relying on immense computational resources. MedSAM2 consistently achieves the highest DSC scores across all targets (CT organs: $88.84\\%$ (80.03-94.03%), CT lesions: $86.68\\%$ (74.32-91.14%), MRI organs: $87.06\\%$ (82.96-90.04%), MRI lesions: $88.37\\%$ (79.91-93.26%), PET lesions $87.22\\%$ (79.07-90.45)), indicating that transfer learning is an effective way to adapt general domain foundation models to the medical image domain. First, for simple and well-defined anatomical structures, such as the kidneys and Lungs, all methods, including SAM2.1 variants, achieve high DSC scores (often above $95\\%$ ), indicating that even general-purpose models can segment these targets accurately due to their clear boundaries and consistent appearances. However, for more challenging targets with heterogeneous appearances and complex shapes, such as the kidney lesions and pancreas, MedSAM2 shows substantial performance improvements, highlighting MedSAM2's enhanced ability to handle greater anatomical variability. Qualitative results (Fig. 2b) show that MedSAM2 produces more accurate and robust boundaries than other methods during propagation, owing to its memory design that effectively models temporal information across slices.",
|
| 361 |
+
"bbox": [
|
| 362 |
+
71,
|
| 363 |
+
155,
|
| 364 |
+
924,
|
| 365 |
+
343
|
| 366 |
+
],
|
| 367 |
+
"page_idx": 4
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"type": "text",
|
| 371 |
+
"text": "Next, we evaluated video segmentation performance for heart chambers and polyp segmentation in cardiac ultrasound (Echocardiography) and endoscopy videos on the widely used CAMUS [38] and SUN [39], [40] datasets, respectively (Fig. 2c, Supplementary Table 3). The heart chamber dataset focuses on delineating three structures: left ventricle, left ventricle epicardium, and left atrium. All SAM2.1 models perform similarly for left ventricle and atrium segmentation with high DSC scores, but have greater variance for the left ventricle epicardium in DSC score because of the heterogeneous appearances and diverse shape changes. MedSAM2 achieves better performance across the three tasks, with the highest DSC scores of $96.13\\%$ (95.09-97.15%), $93.10\\%$ (91.07-94.11%), $95.79\\%$ (94.38-96.96%) and less spread DSC distributions for the left ventricle, left ventricle epicardium, and left atrium, respectively, indicating better robustness in segmenting dynamic structures.",
|
| 372 |
+
"bbox": [
|
| 373 |
+
71,
|
| 374 |
+
345,
|
| 375 |
+
924,
|
| 376 |
+
474
|
| 377 |
+
],
|
| 378 |
+
"page_idx": 4
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"type": "text",
|
| 382 |
+
"text": "The polyp test set contains an easy and a hard subset. On the easy polyp subset, SAM2.1 models achieve comparable results, with similar median DSC scores ranging from $92.11\\%$ (75.74-96.47%) to $93.87\\%$ (77.48-96.64%) across different model sizes. MedSAM2 obtains a similar median DSC score of $92.24\\%$ (85.15-96.11%), but exhibits a much more compact distribution with a smaller interquartile range and fewer outliers. On the hard polyp subset, SAM2.1 models show a clear DSC score drop of $6.29\\%$ to $10.33\\%$ with wider variability and some outliers with low DSC scores. In contrast, MedSAM2 outperforms SAM2.1 with a noticeable gap and more consistent DSC scores of $92.22\\%$ (83.37-95.88%).",
|
| 383 |
+
"bbox": [
|
| 384 |
+
71,
|
| 385 |
+
476,
|
| 386 |
+
924,
|
| 387 |
+
563
|
| 388 |
+
],
|
| 389 |
+
"page_idx": 4
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"type": "text",
|
| 393 |
+
"text": "Qualitative segmentation results (Fig. 2d) show that SAM2.1 models struggle to capture fine structural boundaries, especially in regions with diverse contrast or complex tissue transitions. For example, the contours of SAM2.1 align with the anatomical boundaries of the left ventricle and atrium for most frames, but the segmentation quality deteriorates remarkably for the left ventricle epicardium, where the contours exhibit irregular boundaries, fragmented edges, and deviations from the true anatomical shape. MedSAM2 appears to produce smoother and more accurate segmentation results with fewer misaligned contours. For the polyp segmentation, while all models successfully track the polyp, SAM2.1 exhibits over-segmentation by including surrounding tissues in some frames. This suggests that SAM2.1 models have difficulty maintaining spatial coherence for medical video segmentation. MedSAM2 provides a more refined and closely fitting contour, indicating its superior capability in distinguishing polyps from the background, particularly in challenging lighting and texture variations.",
|
| 394 |
+
"bbox": [
|
| 395 |
+
71,
|
| 396 |
+
564,
|
| 397 |
+
924,
|
| 398 |
+
708
|
| 399 |
+
],
|
| 400 |
+
"page_idx": 4
|
| 401 |
+
},
|
| 402 |
+
{
|
| 403 |
+
"type": "text",
|
| 404 |
+
"text": "Altogether, SAM2.1 models perform well in simpler cases but exhibit higher variability and lower accuracy in difficult segmentation tasks. MedSAM2 consistently outperforms SAM2.1 across all tasks and produces more consistent and reliable segmentation results with reduced variability across different tasks, particularly in challenging cases, highlighting the importance of domain-specific fine-tuning for foundation models in medical image and video segmentation.",
|
| 405 |
+
"bbox": [
|
| 406 |
+
71,
|
| 407 |
+
709,
|
| 408 |
+
924,
|
| 409 |
+
768
|
| 410 |
+
],
|
| 411 |
+
"page_idx": 4
|
| 412 |
+
},
|
| 413 |
+
{
|
| 414 |
+
"type": "text",
|
| 415 |
+
"text": "MedSAM2 enables efficient 3D lesion annotation for large 3D CT and MRI datasets",
|
| 416 |
+
"text_level": 1,
|
| 417 |
+
"bbox": [
|
| 418 |
+
71,
|
| 419 |
+
786,
|
| 420 |
+
684,
|
| 421 |
+
801
|
| 422 |
+
],
|
| 423 |
+
"page_idx": 4
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"text": "Beyond evaluating the segmentation accuracy of MedSAM2, we assess its practical value in assisting annotations of large-scale 3D lesion datasets. Accurate and efficient lesion segmentation in 3D medical images represents one of the most critical tasks for quantitative assessment of disease progression, treatment planning, and response evaluation. However, the heterogeneity of lesions (such as size, shape, texture, and contrast) and the noise and artifacts inherent in medical images make manual segmentation a time-consuming and labor-intensive task.",
|
| 428 |
+
"bbox": [
|
| 429 |
+
71,
|
| 430 |
+
805,
|
| 431 |
+
924,
|
| 432 |
+
878
|
| 433 |
+
],
|
| 434 |
+
"page_idx": 4
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "text",
|
| 438 |
+
"text": "To address this limitation, we developed a human-in-the-loop pipeline with MedSAM2 to assist in 3D lesion annotation (Fig. 3a). Human annotators first draw a 2D bounding box, specifying the lesion at the middle slice, where the lesion usually has the longest diameter. Lesion diameter is commonly used in RECIST (Response Evaluation Criteria in Solid Tumors) [41] to measure the lesion burden in cancer therapeutics. The 2D image and the lesion bounding box are fed into",
|
| 439 |
+
"bbox": [
|
| 440 |
+
71,
|
| 441 |
+
878,
|
| 442 |
+
924,
|
| 443 |
+
938
|
| 444 |
+
],
|
| 445 |
+
"page_idx": 4
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "page_number",
|
| 449 |
+
"text": "5",
|
| 450 |
+
"bbox": [
|
| 451 |
+
911,
|
| 452 |
+
32,
|
| 453 |
+
921,
|
| 454 |
+
42
|
| 455 |
+
],
|
| 456 |
+
"page_idx": 4
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "image",
|
| 460 |
+
"img_path": "images/3868cdfd0b9dda8f2a8d7a45edb94cedec57782bbe1280e2eb66b7773ca707b2.jpg",
|
| 461 |
+
"image_caption": [
|
| 462 |
+
"a.",
|
| 463 |
+
"d."
|
| 464 |
+
],
|
| 465 |
+
"image_footnote": [],
|
| 466 |
+
"bbox": [
|
| 467 |
+
200,
|
| 468 |
+
130,
|
| 469 |
+
803,
|
| 470 |
+
422
|
| 471 |
+
],
|
| 472 |
+
"page_idx": 5
|
| 473 |
+
},
|
| 474 |
+
{
|
| 475 |
+
"type": "image",
|
| 476 |
+
"img_path": "images/4851f885c97704d72c4fa799ec785f4b12a8acc796bb89a31287d7a93e590b58.jpg",
|
| 477 |
+
"image_caption": [
|
| 478 |
+
"b."
|
| 479 |
+
],
|
| 480 |
+
"image_footnote": [],
|
| 481 |
+
"bbox": [
|
| 482 |
+
81,
|
| 483 |
+
436,
|
| 484 |
+
269,
|
| 485 |
+
529
|
| 486 |
+
],
|
| 487 |
+
"page_idx": 5
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"type": "image",
|
| 491 |
+
"img_path": "images/7fdcc8eed4e5e4497f7ac24395973bcf772b520fc3584b58a0307c6576742633.jpg",
|
| 492 |
+
"image_caption": [
|
| 493 |
+
"c."
|
| 494 |
+
],
|
| 495 |
+
"image_footnote": [],
|
| 496 |
+
"bbox": [
|
| 497 |
+
282,
|
| 498 |
+
436,
|
| 499 |
+
419,
|
| 500 |
+
529
|
| 501 |
+
],
|
| 502 |
+
"page_idx": 5
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "image",
|
| 506 |
+
"img_path": "images/a6580f62096183a517a5efbbf8dc3cc33516b4fe224c9d73262984951b762cf9.jpg",
|
| 507 |
+
"image_caption": [],
|
| 508 |
+
"image_footnote": [],
|
| 509 |
+
"bbox": [
|
| 510 |
+
428,
|
| 511 |
+
446,
|
| 512 |
+
915,
|
| 513 |
+
520
|
| 514 |
+
],
|
| 515 |
+
"page_idx": 5
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"type": "image",
|
| 519 |
+
"img_path": "images/85593d1218f423c1e77a2d537f1efc37f4cbe8e0a39d6d6d7e504ef0e701d4f6.jpg",
|
| 520 |
+
"image_caption": [
|
| 521 |
+
"e.",
|
| 522 |
+
"h."
|
| 523 |
+
],
|
| 524 |
+
"image_footnote": [],
|
| 525 |
+
"bbox": [
|
| 526 |
+
83,
|
| 527 |
+
546,
|
| 528 |
+
267,
|
| 529 |
+
641
|
| 530 |
+
],
|
| 531 |
+
"page_idx": 5
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "image",
|
| 535 |
+
"img_path": "images/130fd5f966b0e7ba408da3d2cf52c7359fbbc84e45fd10032cd51ef78bcf3cf1.jpg",
|
| 536 |
+
"image_caption": [
|
| 537 |
+
"f.",
|
| 538 |
+
"i.",
|
| 539 |
+
"j."
|
| 540 |
+
],
|
| 541 |
+
"image_footnote": [],
|
| 542 |
+
"bbox": [
|
| 543 |
+
282,
|
| 544 |
+
545,
|
| 545 |
+
419,
|
| 546 |
+
641
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 5
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "image",
|
| 552 |
+
"img_path": "images/732aab5a71796f9876fca6ce2f448fd5f7eb67d7afaa475bcf3cdec437b0d556.jpg",
|
| 553 |
+
"image_caption": [
|
| 554 |
+
"g."
|
| 555 |
+
],
|
| 556 |
+
"image_footnote": [],
|
| 557 |
+
"bbox": [
|
| 558 |
+
428,
|
| 559 |
+
550,
|
| 560 |
+
915,
|
| 561 |
+
630
|
| 562 |
+
],
|
| 563 |
+
"page_idx": 5
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"type": "image",
|
| 567 |
+
"img_path": "images/c0d5c685a4c1e7652706f3329c06bdb7685a7b0dd30ef73d04cad8f009f5502b.jpg",
|
| 568 |
+
"image_caption": [
|
| 569 |
+
"Fig. 3. MedSAM2 for efficient lesion annotation in 3D CT and MRI scans. a, A human-in-the-loop pipeline for 3D lesion segmentation. b, Annotation time per CT lesion and c, the number of generated CT lesions during the iterative annotation process. d, Visualized segmentation examples of the liver lesion and femoral osteosarcoma in CT scans. e, Annotation time per liver MRI lesion and f, the number of generated MRI lesions during the iterative annotation process. g, Visualized segmentation examples of hepatocellular carcinoma and hepatic abscess in venous contrast-enhanced phase and T2-weighted MRI scans, respectively. f, Average annotation time (seconds) per frame and g, the number of annotated frames during the iterative annotation process. h, Visualized segmentation examples of the left ventricle (red), myocardium (green), left atrium (blue), right ventricle (yellow), and right atrium (cyan)."
|
| 570 |
+
],
|
| 571 |
+
"image_footnote": [],
|
| 572 |
+
"bbox": [
|
| 573 |
+
84,
|
| 574 |
+
660,
|
| 575 |
+
272,
|
| 576 |
+
770
|
| 577 |
+
],
|
| 578 |
+
"page_idx": 5
|
| 579 |
+
},
|
| 580 |
+
{
|
| 581 |
+
"type": "image",
|
| 582 |
+
"img_path": "images/39f85da958d90f259c0b08e48d0cd2f6ee0c6fb10e5f02130280442abe1a26e7.jpg",
|
| 583 |
+
"image_caption": [],
|
| 584 |
+
"image_footnote": [],
|
| 585 |
+
"bbox": [
|
| 586 |
+
282,
|
| 587 |
+
660,
|
| 588 |
+
419,
|
| 589 |
+
770
|
| 590 |
+
],
|
| 591 |
+
"page_idx": 5
|
| 592 |
+
},
|
| 593 |
+
{
|
| 594 |
+
"type": "image",
|
| 595 |
+
"img_path": "images/17fd29cbf2cd19784c0bb0d2cfb0e23354962e03a41bb725e1fbaa214aac2aef.jpg",
|
| 596 |
+
"image_caption": [],
|
| 597 |
+
"image_footnote": [],
|
| 598 |
+
"bbox": [
|
| 599 |
+
429,
|
| 600 |
+
667,
|
| 601 |
+
915,
|
| 602 |
+
758
|
| 603 |
+
],
|
| 604 |
+
"page_idx": 5
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"type": "page_number",
|
| 608 |
+
"text": "6",
|
| 609 |
+
"bbox": [
|
| 610 |
+
911,
|
| 611 |
+
32,
|
| 612 |
+
921,
|
| 613 |
+
42
|
| 614 |
+
],
|
| 615 |
+
"page_idx": 5
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"type": "text",
|
| 619 |
+
"text": "MedSAM2 to generate a 2D segmentation mask followed by human revision to get the refined 2D mask. At this step, the human annotator also specifies the top and bottom slices of the lesion. Then, MedSAM2 is executed again to generate a complete 3D lesion segmentation mask by forward and backward propagating the refined mask to the top slice and bottom slice, respectively. Finally, the human annotator manually refines the 3D segmentation to obtain the accurate 3D lesion mask. When dozens of new annotations are completed, we fine-tune MedSAM2 six to fifteen epochs to get a new model with improved performance. This pipeline is iterated multiple times to generate large-scale annotations gradually.",
|
| 620 |
+
"bbox": [
|
| 621 |
+
76,
|
| 622 |
+
53,
|
| 623 |
+
921,
|
| 624 |
+
140
|
| 625 |
+
],
|
| 626 |
+
"page_idx": 6
|
| 627 |
+
},
|
| 628 |
+
{
|
| 629 |
+
"type": "text",
|
| 630 |
+
"text": "We first applied the annotation pipeline to lesion segmentation in CT scans. DeepLesion [42], the largest lesion CT dataset, was used in this study, containing a wide range of lesion types (Methods). This dataset provided 2D bounding box annotations on the key slice where the lesion reaches its maximum 2D diameter. These bounding boxes followed the RECIST guideline, which defined the lesion size with long-axis and short-axis diameter markers on the key slice. Our annotation pipeline runs for three iterative rounds to refine segmentation accuracy and efficiency. Fig. 3b-c present the average annotation time per lesion and the increasing number of annotated lesions across these rounds. In the first round, we selected 500 lesions of various sizes and used the trained MedSAM2 model in the annotation pipeline. Compared to manual annotation, requiring an average of 525.9 seconds per lesion, the first round reduced the annotation time by $45\\%$ , bringing it down to 289.2 seconds per lesion. Then we fine-tuned MedSAM2 by combining the annotated dataset and existing CT lesion cases to derive a CT lesion-specific segmentation model, which was used in the second-round annotation. Using this improved model, we annotated 1,500 additional cases, further reducing the average annotation time to 185.3 seconds per lesion. For the third round, we updated the model again and annotated 3,000 unlabeled cases, achieving a remarkable reduction in annotation time to 74.3 seconds per lesion. Fig. 3d shows the segmentation results of two large lesions on the liver and femur. Notably, the femoral osteosarcoma was not presented in the training set, but the model was still able to generate good results, highlighting the model's capacity to generalize to unseen lesion types.",
|
| 631 |
+
"bbox": [
|
| 632 |
+
76,
|
| 633 |
+
141,
|
| 634 |
+
921,
|
| 635 |
+
358
|
| 636 |
+
],
|
| 637 |
+
"page_idx": 6
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"type": "text",
|
| 641 |
+
"text": "In addition, we used the pipeline to annotate the largest multi-phase MRI liver lesion LLD-MMRI2023 dataset [43]. This dataset consists of seven liver lesion types across eight MRI phases and each lesion has pre-defined bounding box prompts (Methods). Manual annotation required an average of 520.3 seconds per lesion, making it a time-intensive process. We conducted a three-round iterative annotation process similar to the CT experiments, progressively refining the segmentation model with annotated data. As shown in Fig. 3e-f, in the first round, MedSAM2 substantially reduced the annotation time by $54\\%$ to 240.5 seconds per lesion while successfully segmenting 498 lesions. To further enhance segmentation performance, we incorporated first-round annotations into the training set and fine-tuned MedSAM2, leading to a more efficient second-round annotation process that reduced the time to 150.7 seconds per lesion and expanded the dataset to additional 996 lesions. Building on this iterative improvement, we fine-tuned MedSAM2 once more for the third round to annotate the remaining 2,490 lesions, achieving an average annotation time of 65.2 seconds per lesion. Fig. 3g visualizes two segmentation examples of different lesion types: hepatocellular carcinoma in venous contrast-enhanced MRI and hepatic abscess in T2-weighted MRI, demonstrating that the annotation pipeline effectively handles diverse lesion appearances and generalizes to multi-phase MRI images with different characteristics. Across all rounds, this iterative process enabled the annotation of 3,984 liver lesions in approximately the time it would have taken to manually annotate only 500 cases.",
|
| 642 |
+
"bbox": [
|
| 643 |
+
76,
|
| 644 |
+
359,
|
| 645 |
+
921,
|
| 646 |
+
563
|
| 647 |
+
],
|
| 648 |
+
"page_idx": 6
|
| 649 |
+
},
|
| 650 |
+
{
|
| 651 |
+
"type": "text",
|
| 652 |
+
"text": "MedSAM2 enables high-throughput video annotation",
|
| 653 |
+
"text_level": 1,
|
| 654 |
+
"bbox": [
|
| 655 |
+
76,
|
| 656 |
+
582,
|
| 657 |
+
465,
|
| 658 |
+
597
|
| 659 |
+
],
|
| 660 |
+
"page_idx": 6
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "text",
|
| 664 |
+
"text": "Medical video annotation is a particularly resource-intensive and demanding task [44] because it requires frame-by-frame labeling of anatomical structures and pathological regions, making it much more complex than static 2D image segmentation. The dynamic nature of medical videos introduces additional challenges such as motion artifacts, varying illumination, and temporal consistency. Manual annotation in such cases is tedious and expensive, making it difficult to generate sufficient labeled data for deep learning model training or large-scale studies.",
|
| 665 |
+
"bbox": [
|
| 666 |
+
76,
|
| 667 |
+
601,
|
| 668 |
+
921,
|
| 669 |
+
674
|
| 670 |
+
],
|
| 671 |
+
"page_idx": 6
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"type": "text",
|
| 675 |
+
"text": "We adapted our annotation strategy for video data by leveraging MedSAM2's ability to process sequential frames with spatial and temporal coherence (Supplementary Fig.2). Unlike the 3D pipeline which uses mid-slice prompting, the video pipeline begins with users adding prompts to the segmentation targets on the first frame of the video. These prompts are then passed to the pre-trained MedSAM2 model to generate initial 2D segmentation masks for each target. The human annotators then review and refine these masks to ensure high quality, followed by feeding them back into MedSAM2 for propagation, where the model extends the refined segmentation across the remaining frames. After that, users further refine the video masks as needed, ensuring an accurate delineation of the anatomical structures throughout the sequence. The annotated dataset is then added to the training set, allowing further fine-tuning of MedSAM2 to improve its performance on future video annotation.",
|
| 676 |
+
"bbox": [
|
| 677 |
+
76,
|
| 678 |
+
674,
|
| 679 |
+
921,
|
| 680 |
+
804
|
| 681 |
+
],
|
| 682 |
+
"page_idx": 6
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"type": "text",
|
| 686 |
+
"text": "We studied the annotation pipeline for heart chamber annotation based on the right ventricular ejection (RVENet) dataset [45], [46], which contains apical four-chamber view cardiac ultrasound (Echocardiography) videos of 831 patients with varying image quality and heart conditions. Echocardiography is a widely used, non-invasive imaging modality for assessing cardiac function [1], [6], offering real-time visualization of heart chambers, valve motion, and blood flow. We applied a three-round annotation pipeline. Fig. 3h shows the annotation time per ultrasound (US) frame, demonstrating a substantial reduction across iterations. Manual annotation initially required 102.3 seconds per frame, whereas the first round of the pipeline reduced this time to 65.7 seconds, marking a $46\\%$ decrease. With further refinements in the second round, annotation time dropped to 23.1 seconds, and by the third round, it reached 8.4 seconds per frame, achieving a $92\\%$ reduction compared to manual annotation.",
|
| 687 |
+
"bbox": [
|
| 688 |
+
76,
|
| 689 |
+
806,
|
| 690 |
+
921,
|
| 691 |
+
936
|
| 692 |
+
],
|
| 693 |
+
"page_idx": 6
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"type": "page_number",
|
| 697 |
+
"text": "7",
|
| 698 |
+
"bbox": [
|
| 699 |
+
911,
|
| 700 |
+
32,
|
| 701 |
+
921,
|
| 702 |
+
42
|
| 703 |
+
],
|
| 704 |
+
"page_idx": 6
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"type": "text",
|
| 708 |
+
"text": "Fig. 3i highlights the expanding dataset size as the annotation process scales up. The first round processed 44,165 frames across 300 videos. In the second round, with the improved model, the dataset increased to 72,794 frames from 500 videos. Finally, in the third round, the pipeline annotated 134,591 frames from 1,000 videos, demonstrating its scalability and robustness. This represents a throughput increase of over $12\\mathrm{x}$ compared to manual annotation methods. Visualized segmentation examples are presented in Fig. 3j, showing that MedSAM2 accurately delineates both ventricles and atrium with consistent boundary tracking even during cardiac contraction phases.",
|
| 709 |
+
"bbox": [
|
| 710 |
+
71,
|
| 711 |
+
53,
|
| 712 |
+
923,
|
| 713 |
+
142
|
| 714 |
+
],
|
| 715 |
+
"page_idx": 7
|
| 716 |
+
},
|
| 717 |
+
{
|
| 718 |
+
"type": "image",
|
| 719 |
+
"img_path": "images/5adc6f2de8fb8211005aa566f50fbc3e799ffb2ddf53b6a0675bab8455dda641.jpg",
|
| 720 |
+
"image_caption": [
|
| 721 |
+
"Fig. 4. MedSAM2 can be deployed on local desktops and remote clusters with commonly used platforms: 3D Slicer, terminal, JupyterLab, Gradio, and Google Colab."
|
| 722 |
+
],
|
| 723 |
+
"image_footnote": [],
|
| 724 |
+
"bbox": [
|
| 725 |
+
205,
|
| 726 |
+
157,
|
| 727 |
+
785,
|
| 728 |
+
518
|
| 729 |
+
],
|
| 730 |
+
"page_idx": 7
|
| 731 |
+
},
|
| 732 |
+
{
|
| 733 |
+
"type": "text",
|
| 734 |
+
"text": "MedSAM2 supports community-wide deployment",
|
| 735 |
+
"text_level": 1,
|
| 736 |
+
"bbox": [
|
| 737 |
+
73,
|
| 738 |
+
599,
|
| 739 |
+
442,
|
| 740 |
+
614
|
| 741 |
+
],
|
| 742 |
+
"page_idx": 7
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"type": "text",
|
| 746 |
+
"text": "To bridge the gap between advanced segmentation models and real-world applications, we have integrated MedSAM2 into several commonly used platforms across the medical imaging and data science communities, such as 3D Slicer [47], terminal, JupyterLab, Colab, and Gradio [48] (Fig. 4). This multi-platform integration enables users to flexibly deploy and interact with MedSAM2 on both local desktops and remote computing environments, adapting to diverse workflows and computational resources.",
|
| 747 |
+
"bbox": [
|
| 748 |
+
71,
|
| 749 |
+
617,
|
| 750 |
+
923,
|
| 751 |
+
690
|
| 752 |
+
],
|
| 753 |
+
"page_idx": 7
|
| 754 |
+
},
|
| 755 |
+
{
|
| 756 |
+
"type": "text",
|
| 757 |
+
"text": "3D Slicer is one of the most widely used open-source medical image analysis platforms. We implemented MedSAM2 as a plug-and-play plugin (Methods), enabling users to seamlessly apply MedSAM2 for interactive lesion and organ segmentation, visualization, and analysis in a familiar environment (Supplementary Fig. 3). This integration facilitates fast annotation and refinement of segmentation results, making it a practical tool for clinicians and biomedical researchers working with diverse 3D medical imaging modalities.",
|
| 758 |
+
"bbox": [
|
| 759 |
+
71,
|
| 760 |
+
691,
|
| 761 |
+
923,
|
| 762 |
+
763
|
| 763 |
+
],
|
| 764 |
+
"page_idx": 7
|
| 765 |
+
},
|
| 766 |
+
{
|
| 767 |
+
"type": "text",
|
| 768 |
+
"text": "For high-throughput processing, the command-line terminal interface provides an efficient and scriptable way to process large datasets in batch mode. JupyterLab and Colab cater to researchers and developers who prefer an interactive, code-centric environment for experimentation. These platforms support notebook-based workflows, making it easy to visualize intermediate outputs, adjust model parameters, and document the segmentation process. In particular, Colab enables cloud-based access to free GPUs, allowing users without local hardware to test and deploy MedSAM2 with minimal setup.",
|
| 769 |
+
"bbox": [
|
| 770 |
+
71,
|
| 771 |
+
763,
|
| 772 |
+
921,
|
| 773 |
+
837
|
| 774 |
+
],
|
| 775 |
+
"page_idx": 7
|
| 776 |
+
},
|
| 777 |
+
{
|
| 778 |
+
"type": "text",
|
| 779 |
+
"text": "Additionally, we incorporated MedSAM2 into Gradio, a lightweight and web-based interface that allows users to interact with the model without requiring extensive technical expertise or complex installations. This web-based deployment is particularly beneficial for video segmentation, allowing users to upload and process video frames without requiring extensive computational resources. The user-friendly design enables quick previews and adjustments of segmentations, allowing human annotators to refine results as needed. Moreover, Gradio supports seamless deployment in both local and cloud-based environments, which is essential for multi-institutional collaborations and remote research settings.",
|
| 780 |
+
"bbox": [
|
| 781 |
+
71,
|
| 782 |
+
837,
|
| 783 |
+
923,
|
| 784 |
+
926
|
| 785 |
+
],
|
| 786 |
+
"page_idx": 7
|
| 787 |
+
},
|
| 788 |
+
{
|
| 789 |
+
"type": "page_number",
|
| 790 |
+
"text": "8",
|
| 791 |
+
"bbox": [
|
| 792 |
+
911,
|
| 793 |
+
32,
|
| 794 |
+
921,
|
| 795 |
+
42
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 7
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "text",
|
| 801 |
+
"text": "DISCUSSION",
|
| 802 |
+
"text_level": 1,
|
| 803 |
+
"bbox": [
|
| 804 |
+
75,
|
| 805 |
+
51,
|
| 806 |
+
181,
|
| 807 |
+
66
|
| 808 |
+
],
|
| 809 |
+
"page_idx": 8
|
| 810 |
+
},
|
| 811 |
+
{
|
| 812 |
+
"type": "text",
|
| 813 |
+
"text": "General segmentation foundation models, such as SAM2.1, are pre-trained on large-scale natural image and video datasets, providing strong general segmentation capabilities but typically lack the fine-grained domain knowledge required for precise medical image segmentation. Our results demonstrate that transfer learning is an effective strategy for adapting general-domain segmentation foundation models to medical imaging applications, enabling substantial improvements in segmentation accuracy and robustness across diverse medical imaging modalities.",
|
| 814 |
+
"bbox": [
|
| 815 |
+
71,
|
| 816 |
+
71,
|
| 817 |
+
923,
|
| 818 |
+
145
|
| 819 |
+
],
|
| 820 |
+
"page_idx": 8
|
| 821 |
+
},
|
| 822 |
+
{
|
| 823 |
+
"type": "text",
|
| 824 |
+
"text": "Medical imaging datasets often suffer from limited annotated samples due to the high cost, time demand, and expertise required for manual annotation. Lesion segmentation is one of the most challenging tasks as they vary in size, shape, location, and contrast across different imaging modalities and patients [42]. The scarcity of labeled data can hinder the development and generalization of general lesion detection and quantification models, limiting their clinical applicability. Our iterative annotation pipeline with transfer learning reduced annotation time by up to $92\\%$ while enabling dataset expansion by more than four times.",
|
| 825 |
+
"bbox": [
|
| 826 |
+
71,
|
| 827 |
+
145,
|
| 828 |
+
924,
|
| 829 |
+
232
|
| 830 |
+
],
|
| 831 |
+
"page_idx": 8
|
| 832 |
+
},
|
| 833 |
+
{
|
| 834 |
+
"type": "text",
|
| 835 |
+
"text": "Our first user study demonstrates that fine-tuning MedSAM2 on domain-specific CT and MRI lesion datasets leads to progressive improvements in annotation efficiency and segmentation quality. The iterative annotation pipeline enhances the model accuracy by continuously learning from newly annotated data, reducing manual correction efforts and overall annotation time. This progressive adaptation is particularly valuable for heterogeneous datasets, such as those containing a mix of common and rare lesion types.",
|
| 836 |
+
"bbox": [
|
| 837 |
+
71,
|
| 838 |
+
232,
|
| 839 |
+
924,
|
| 840 |
+
305
|
| 841 |
+
],
|
| 842 |
+
"page_idx": 8
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"type": "text",
|
| 846 |
+
"text": "Video modalities, such as Echocardiography, present unique challenges compared to CT and MRI due to the dynamic nature of the heart. Unlike static medical images, ultrasound videos capture continuous motion with typical acquisition rates of 30-60 frames per second, making frame-by-frame manual annotation by experts highly impractical. This inherent complexity limits the availability of large, well-annotated segmentation datasets. Our video annotation study demonstrates that these challenges can be effectively mitigated using an iterative annotation pipeline combined with transfer learning, achieving substantial reductions in annotation time while progressively improving segmentation quality. The model's ability to generalize across different patient demographics and ultrasound systems further highlights its scalability. This could further facilitate the development of cardiac assessment tools that support early disease detection and quantitative cardiac research.",
|
| 847 |
+
"bbox": [
|
| 848 |
+
71,
|
| 849 |
+
305,
|
| 850 |
+
924,
|
| 851 |
+
435
|
| 852 |
+
],
|
| 853 |
+
"page_idx": 8
|
| 854 |
+
},
|
| 855 |
+
{
|
| 856 |
+
"type": "text",
|
| 857 |
+
"text": "Our implementation of MedSAM2 as plug-ins and packages for standard medical imaging platforms reduces adoption barriers toward translating deep learning-based segmentation models into practical tools. By supporting deployment in 3D Slicer, terminal, JupyterLab, Colab, and Gradio, we provide both graphical interfaces and programmatic APIs for flexible access tailored to a wide range of users, from clinicians and radiologists to data scientists and algorithm developers, in both clinical research and translational settings.",
|
| 858 |
+
"bbox": [
|
| 859 |
+
71,
|
| 860 |
+
436,
|
| 861 |
+
924,
|
| 862 |
+
508
|
| 863 |
+
],
|
| 864 |
+
"page_idx": 8
|
| 865 |
+
},
|
| 866 |
+
{
|
| 867 |
+
"type": "text",
|
| 868 |
+
"text": "This work also has several limitations. One key limitation of MedSAM2 is its reliance on bounding boxes as the main prompts. This design choice reduces object selection ambiguity and enables efficient mask propagation, allowing the model to process and track multiple masks simultaneously. However, this approach inherently limits its ability to segment highly complex anatomical structures, such as vessels with thin and branching structures. Since the model does not explicitly consider 3D spatial continuity, it may struggle to accurately capture highly elongated and curved 3D objects. One promising direction is the incorporation of a 4D image encoder (3D + time), which would allow the model to jointly process spatial and temporal information. Moreover, supporting other prompts, such as point [25], [26], text [49], [50], scribble and lasso [27] would enable more flexible corrections.",
|
| 869 |
+
"bbox": [
|
| 870 |
+
71,
|
| 871 |
+
508,
|
| 872 |
+
924,
|
| 873 |
+
626
|
| 874 |
+
],
|
| 875 |
+
"page_idx": 8
|
| 876 |
+
},
|
| 877 |
+
{
|
| 878 |
+
"type": "text",
|
| 879 |
+
"text": "Another limitation stems from the fixed memory design, where the model maintains an eight-frame memory bank for all segmentation tasks. While this memory size is sufficient for the majority of cases with moderate object motion, it may lead to inferior tracking performance when dealing with rapid or large target movements. For example, in colonoscopy videos, the camera continuously moves through the gastrointestinal tract, and polyps may appear, disappear, or change shape as the viewpoint shifts. Tracking failures may occur when the polyp moves out of the current memory range and then reappears in later frames. Future work will focus on implementing an adaptive memory system to replace the fixed memory bank to allocate longer memory retention for rapidly moving or intermittently visible targets.",
|
| 880 |
+
"bbox": [
|
| 881 |
+
71,
|
| 882 |
+
626,
|
| 883 |
+
924,
|
| 884 |
+
728
|
| 885 |
+
],
|
| 886 |
+
"page_idx": 8
|
| 887 |
+
},
|
| 888 |
+
{
|
| 889 |
+
"type": "text",
|
| 890 |
+
"text": "In addition, MedSAM2 is built on the SAM2.1 tiny model with reduced input image size to optimize efficiency, but the inference process still requires GPU computation, limiting its applicability in resource-constrained environments, such as edge devices, point-of-care ultrasound machines, or low-power medical imaging workstations. Future optimizations, such as lightweight image encoder, model compression, quantization, or distillation techniques, will be necessary to enable efficient CPU-based inference, making MedSAM2 more practical in real-time and low-resource medical settings.",
|
| 891 |
+
"bbox": [
|
| 892 |
+
71,
|
| 893 |
+
728,
|
| 894 |
+
924,
|
| 895 |
+
800
|
| 896 |
+
],
|
| 897 |
+
"page_idx": 8
|
| 898 |
+
},
|
| 899 |
+
{
|
| 900 |
+
"type": "text",
|
| 901 |
+
"text": "In conclusion, this work presents a foundation model for 3D medical image and video segmentation. We also provide, to the best of our knowledge, the most extensive user study to annotate large-scale medical datasets. MedSAM2 not only achieves better performance across various organs and lesions compared to existing SAM variants, but also substantially reduces annotation costs for creating large-scale segmentation datasets. As annotation processes become more efficient, the potential for scaling up large, high-quality labeled datasets increases, which in turn benefits future diagnostic model development and clinical deployment. Our open-source implementations across multiple platforms will facilitate adoption and further community-driven improvements to medical image and video segmentation tools.",
|
| 902 |
+
"bbox": [
|
| 903 |
+
71,
|
| 904 |
+
800,
|
| 905 |
+
924,
|
| 906 |
+
905
|
| 907 |
+
],
|
| 908 |
+
"page_idx": 8
|
| 909 |
+
},
|
| 910 |
+
{
|
| 911 |
+
"type": "page_number",
|
| 912 |
+
"text": "9",
|
| 913 |
+
"bbox": [
|
| 914 |
+
911,
|
| 915 |
+
32,
|
| 916 |
+
921,
|
| 917 |
+
42
|
| 918 |
+
],
|
| 919 |
+
"page_idx": 8
|
| 920 |
+
},
|
| 921 |
+
{
|
| 922 |
+
"type": "text",
|
| 923 |
+
"text": "METHODS",
|
| 924 |
+
"text_level": 1,
|
| 925 |
+
"bbox": [
|
| 926 |
+
76,
|
| 927 |
+
51,
|
| 928 |
+
158,
|
| 929 |
+
66
|
| 930 |
+
],
|
| 931 |
+
"page_idx": 9
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"type": "text",
|
| 935 |
+
"text": "Dataset curation and pre-processing",
|
| 936 |
+
"text_level": 1,
|
| 937 |
+
"bbox": [
|
| 938 |
+
75,
|
| 939 |
+
71,
|
| 940 |
+
346,
|
| 941 |
+
85
|
| 942 |
+
],
|
| 943 |
+
"page_idx": 9
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"type": "text",
|
| 947 |
+
"text": "All training images and videos were curated from publicly available datasets with license permission for research purposes (Supplementary Table 1). The 3D test images were based on the recent 3D multi-phase liver tumor CT dataset [51] and the CVPR 2024 MedSAM on Laptop testing set [37], including 20, 7, 7, 5, and 1 tasks for CT organs, CT lesions, MRI organs, MRI lesions, and PET lesions, respectively. The pre-processing followed common practice [9], [19], [50]. Specifically, CT image intensities were adjusted to the proper window width and level (brain: 80/40, abdomen: 400/40, bone: 1800/400, lung: 1500/-600, mediastinum: 400/40) followed by rescaling to [0, 255]. For the remaining 3D images (MRI and PET), we applied an intensity cut-off with a lower-bound and upper-bound of $0.5\\%$ and $99.5\\%$ percentile of foreground intensity and then rescaled the intensity to [0, 255]. No intensity normalization was applied for videos.",
|
| 948 |
+
"bbox": [
|
| 949 |
+
73,
|
| 950 |
+
89,
|
| 951 |
+
924,
|
| 952 |
+
207
|
| 953 |
+
],
|
| 954 |
+
"page_idx": 9
|
| 955 |
+
},
|
| 956 |
+
{
|
| 957 |
+
"type": "text",
|
| 958 |
+
"text": "Lesion CT dataset and annotation pipeline",
|
| 959 |
+
"text_level": 1,
|
| 960 |
+
"bbox": [
|
| 961 |
+
75,
|
| 962 |
+
219,
|
| 963 |
+
390,
|
| 964 |
+
234
|
| 965 |
+
],
|
| 966 |
+
"page_idx": 9
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "text",
|
| 970 |
+
"text": "DeepLesion dataset [42] contains 32,735 diverse lesions in 32,120 CT slices from 10,594 studies of 4,427 unique patients. Each lesion has a bounding box annotation on the key slice, which is derived from the longest diameter and longest perpendicular diameter. We prioritized lesions with a minimal diameter of $25mm$ because larger lesions are more time-consuming during manual annotation. A senior radiologist with more than 10 years of experience manually annotated five cases to get the manual annotation time. In the human-in-the-loop experiment, we first generated the 2D segmentation mask on the key slice by MedSAM2 and then two radiology students manually revised the mask and specified the top slice and bottom slice of the lesion. To improve the efficiency, we concatenated eight preprocessed lesion images along the axial plane as one 3D volume. In this way, human annotators can open eight lesion images at once for manual revision and reduce the time costs to adjust the window level and width for each lesion. All lesion images and masks were resampled to $512 \\times 512$ on the axial plane in the concatenation with third-order spline interpolation and nearest-neighbor interpolation, respectively. Images with out-of-the-plane spacing less than $3mm$ were resampled to $3mm$ . After manual revisions, we separated the merged eight-lesion scan into single images and resampled them to the original shape. We excluded images without measurable lesions or an out-of-plane spacing of more than $5mm$ . Finally, all annotations were checked and revised by the senior radiologist.",
|
| 971 |
+
"bbox": [
|
| 972 |
+
73,
|
| 973 |
+
237,
|
| 974 |
+
924,
|
| 975 |
+
443
|
| 976 |
+
],
|
| 977 |
+
"page_idx": 9
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "text",
|
| 981 |
+
"text": "Liver lesion MRI dataset and annotation pipeline",
|
| 982 |
+
"text_level": 1,
|
| 983 |
+
"bbox": [
|
| 984 |
+
75,
|
| 985 |
+
454,
|
| 986 |
+
433,
|
| 987 |
+
469
|
| 988 |
+
],
|
| 989 |
+
"page_idx": 9
|
| 990 |
+
},
|
| 991 |
+
{
|
| 992 |
+
"type": "text",
|
| 993 |
+
"text": "LLD-MMRI dataset [43] contains diverse liver lesions from 498 unique patients, including hepatocellular carcinoma, intrahepatic cholangiocarcinoma, liver metastases (HM), hepatic cysts (HC), hepatic hemangioma, focal nodular hyperplasia, and hepatic abscess. Each lesion has eight MRI scans: non-contrast, arterial, venous, delay, T2-weighted imaging, diffusion-weighted imaging, T1 in-phase, and T1 out-of-phase, resulting in 3984 cases in total. Each liver lesion has both 3D and slice-wise 2D bounding boxes. We ran MedSAM2 with the two types of bounding boxes separately and got two groups of segmentation results. For the 3D bounding box prompt, we first generated a 2D segmentation mask on the median slice followed by propagating the mask to the remaining slices until it reached the top and bottom slices. For the 2D bounding box prompt, we ran MedSAM2 for each slice with the corresponding box prompts. After that, we computed the DSC score between the two groups of segmentation results. We hypothesized that hard cases have larger disagreements between the two segmentation masks. For each patient, we selected the case with the lowest DSC score among the eight MRI scans as the first-round revision candidates, aiming to achieve a trade-off between data diversity and difficulty. The same selection criteria were also used in the second-round iteration. A senior radiologist manually annotated five cases to get the manual annotation time. Two radiology students participated in the manual revision process. Different from the CT lesion annotation, slice-wise 2D bounding box-based segmentation results were used in the revision because we found the segmentation accuracy was better than the 3D bounding box-based results. During the revision process, we resampled the images to $352 \\times 352$ and merged five preprocessed lesion images as one volume for better efficiency. Finally, all annotations were checked and revised by the senior radiologist.",
|
| 994 |
+
"bbox": [
|
| 995 |
+
73,
|
| 996 |
+
473,
|
| 997 |
+
924,
|
| 998 |
+
722
|
| 999 |
+
],
|
| 1000 |
+
"page_idx": 9
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"type": "text",
|
| 1004 |
+
"text": "Cardiac ultrasound (echocardiography) video dataset and annotation pipeline",
|
| 1005 |
+
"text_level": 1,
|
| 1006 |
+
"bbox": [
|
| 1007 |
+
75,
|
| 1008 |
+
734,
|
| 1009 |
+
648,
|
| 1010 |
+
750
|
| 1011 |
+
],
|
| 1012 |
+
"page_idx": 9
|
| 1013 |
+
},
|
| 1014 |
+
{
|
| 1015 |
+
"type": "text",
|
| 1016 |
+
"text": "RVENet dataset [45], [46] consists of 3583 echocardiography videos from 831 unique patients. The same annotation protocol in CAMUS dataset [38] was followed to delineate the left ventricle, myocardium, and left atrium. Since the videos were acquired in the apical four-chamber view, the right ventricle and atrium were also annotated to provide a more comprehensive cardiac analysis. Videos with low image quality or incomplete ventricles and atrium were excluded. The raw videos have a high resolution of $1016 \\times 708$ and $800 \\times 600$ . We downsampled the videos by a factor of two to reduce the annotation workload while essential structure details were preserved to differentiate the heart chambers. We first annotated the first frame of 200 videos from different patients with the bounding box or point prompts followed by manual refinement by three radiology students. The corrected first-frame mask was then propagated across subsequent frames using MedSAM2. To enhance segmentation accuracy, human annotators manually refined three to ten frames at approximately uniform intervals before inferencing MedSAM2 to update the segmentation results. Finally, all frames underwent manual adjustments where necessary, and the annotations were rigorously verified by the senior radiologist before being used to fine-tune MedSAM2 for the next iteration. To compare with manual annotation efficiency, a senior radiologist annotated 10 frames as a reference for manual annotation time cost.",
|
| 1017 |
+
"bbox": [
|
| 1018 |
+
73,
|
| 1019 |
+
752,
|
| 1020 |
+
924,
|
| 1021 |
+
941
|
| 1022 |
+
],
|
| 1023 |
+
"page_idx": 9
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "page_number",
|
| 1027 |
+
"text": "10",
|
| 1028 |
+
"bbox": [
|
| 1029 |
+
906,
|
| 1030 |
+
32,
|
| 1031 |
+
923,
|
| 1032 |
+
42
|
| 1033 |
+
],
|
| 1034 |
+
"page_idx": 9
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "text",
|
| 1038 |
+
"text": "Network architecture",
|
| 1039 |
+
"text_level": 1,
|
| 1040 |
+
"bbox": [
|
| 1041 |
+
73,
|
| 1042 |
+
53,
|
| 1043 |
+
232,
|
| 1044 |
+
66
|
| 1045 |
+
],
|
| 1046 |
+
"page_idx": 10
|
| 1047 |
+
},
|
| 1048 |
+
{
|
| 1049 |
+
"type": "text",
|
| 1050 |
+
"text": "MedSAM2 was built upon SAM2 [28] with four main components: an image encoder, a prompt encoder, a memory attention module, and a mask decoder. First, we modified the image encoder by downsizing the input image size from $3 \\times 1024 \\times 1024$ to $3 \\times 512 \\times 512$ , which not only fitted better for typical medical image size but also reduced computational burden. The image encoder employs a hierarchical vision transformer (Hiera) [33] with a four-stage architecture (layers=\\{1,2,7,2\\}). We incorporated global attention blocks at the 5th, 7th and 9th layers to capture long-range dependencies critical for medical image analysis. A feature pyramid network (FPN) [52] neck extracts multi-scale features from the backbone, enabling detailed segmentation at various resolutions. Second, the memory attention module contains 4 transformer layers with both self-attention and cross-attention mechanisms. Each layer employs Rotary Position Embedding (RoPE) [53] with 2D spatial encoding (feature size $32 \\times 32$ ) to maintain spatial awareness across slices or frames. This module conditions the current frame features on a memory bank storing information from previously processed frames, effectively exploiting the spatial continuity in volumetric data and temporal coherence in videos. Third, the prompt encoder transforms coordinates into embeddings that guide the segmentation process, allowing clinicians to specify regions of interest efficiently. Finally, the mask decoder integrates features from multiple scales of the image encoder through skip connections and produces segmentation masks at $128 \\times 128$ resolution, which are then upsampled to the original $512 \\times 512$ input size using bilinear interpolation.",
|
| 1051 |
+
"bbox": [
|
| 1052 |
+
71,
|
| 1053 |
+
71,
|
| 1054 |
+
924,
|
| 1055 |
+
292
|
| 1056 |
+
],
|
| 1057 |
+
"page_idx": 10
|
| 1058 |
+
},
|
| 1059 |
+
{
|
| 1060 |
+
"type": "text",
|
| 1061 |
+
"text": "Training protocol",
|
| 1062 |
+
"text_level": 1,
|
| 1063 |
+
"bbox": [
|
| 1064 |
+
73,
|
| 1065 |
+
309,
|
| 1066 |
+
205,
|
| 1067 |
+
324
|
| 1068 |
+
],
|
| 1069 |
+
"page_idx": 10
|
| 1070 |
+
},
|
| 1071 |
+
{
|
| 1072 |
+
"type": "text",
|
| 1073 |
+
"text": "The model was initialized from the pre-trained SAM2.1-Tiny model checkpoint. During training, we used a full model fine-tuning strategy with two different learning rates: a lower learning rate $(3.0 \\times 10^{-5})$ for the image encoder (28M parameters) to preserve learned features, and a higher rate $(5.0 \\times 10^{-5})$ for other components (10.9M parameters) to adapt to the characteristics of the medical domains. The training utilized a combination of 3D images and videos with a batch size of eight per GPU, where each training sample consisted of eight consecutive slices or video frames. In the human-in-the-loop annotation study, we halved the learning rate and fine-tuned the trained MedSAM2 model 6 and 15 epochs in the second and third round iterations, respectively.",
|
| 1074 |
+
"bbox": [
|
| 1075 |
+
71,
|
| 1076 |
+
328,
|
| 1077 |
+
924,
|
| 1078 |
+
430
|
| 1079 |
+
],
|
| 1080 |
+
"page_idx": 10
|
| 1081 |
+
},
|
| 1082 |
+
{
|
| 1083 |
+
"type": "text",
|
| 1084 |
+
"text": "The data augmentations included random horizontal flipping, affine transformations, color jittering, and random grayscale conversion. For videos, we also augmented the frame sample rate by a factor of 2 and 4. Since the training set was imbalanced between different modalities, we increased the sampling frequency of MRI, PET, and video data by a factor of 3, 40, and 40, respectively. The bounding box prompts were simulated from expert annotations with random perturbations of 0-10 pixels. The loss function combined focal loss and dice loss for mask prediction with weights of 20:1. We used the AdamW optimizer [54] with $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.999$ , and weight decay of 0.01. The model was trained for 70 epochs on three compute nodes, each equipped with four H100 GPUs, with a total training time of four days. External validation was performed on held-out datasets to assess the model's generalization capability across different tasks and modalities.",
|
| 1085 |
+
"bbox": [
|
| 1086 |
+
71,
|
| 1087 |
+
430,
|
| 1088 |
+
924,
|
| 1089 |
+
561
|
| 1090 |
+
],
|
| 1091 |
+
"page_idx": 10
|
| 1092 |
+
},
|
| 1093 |
+
{
|
| 1094 |
+
"type": "text",
|
| 1095 |
+
"text": "3D Slicer Integration",
|
| 1096 |
+
"text_level": 1,
|
| 1097 |
+
"bbox": [
|
| 1098 |
+
73,
|
| 1099 |
+
580,
|
| 1100 |
+
230,
|
| 1101 |
+
595
|
| 1102 |
+
],
|
| 1103 |
+
"page_idx": 10
|
| 1104 |
+
},
|
| 1105 |
+
{
|
| 1106 |
+
"type": "text",
|
| 1107 |
+
"text": "We implemented MedSAM2 as a plugin (extension) in 3D Slicer to reuse the built-in modules for essential operations, such as loading diverse medical imaging formats (e.g., DICOM, NiFiTI), drawing prompts, refining masks, and visualizing both 2D slices and 3D segmentation results. The plugin is built on a client-server architecture, offering users the flexibility to perform inference either locally on personal machines or remotely on high-performance computing clusters. The interface contains three clear sections:",
|
| 1108 |
+
"bbox": [
|
| 1109 |
+
71,
|
| 1110 |
+
599,
|
| 1111 |
+
924,
|
| 1112 |
+
672
|
| 1113 |
+
],
|
| 1114 |
+
"page_idx": 10
|
| 1115 |
+
},
|
| 1116 |
+
{
|
| 1117 |
+
"type": "list",
|
| 1118 |
+
"sub_type": "text",
|
| 1119 |
+
"list_items": [
|
| 1120 |
+
"- Preprocessing panel: users can select predefined pre-processing options (e.g., CT, MRI) to normalize the input image intensity before segmentation.",
|
| 1121 |
+
"- Region-Of-Interest (ROI) selection: users can define the ROI directly by choosing start and end slices and draw bounding boxes prompts on the key slice.",
|
| 1122 |
+
"- Segmentation controls: users can choose the model variant and initiate segmentation for the middle slice and full volume. Moreover, users can load their own customized models for specific imaging modalities or segmentation targets."
|
| 1123 |
+
],
|
| 1124 |
+
"bbox": [
|
| 1125 |
+
73,
|
| 1126 |
+
675,
|
| 1127 |
+
921,
|
| 1128 |
+
758
|
| 1129 |
+
],
|
| 1130 |
+
"page_idx": 10
|
| 1131 |
+
},
|
| 1132 |
+
{
|
| 1133 |
+
"type": "text",
|
| 1134 |
+
"text": "For the server component, we implemented a Flask API server to provide the necessary arguments and inputs to the local API offered by MedSAM2. The server also features a temporary most recently used (MRU)-style cache to facilitate refinement of the most recent segmentation.",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
71,
|
| 1137 |
+
765,
|
| 1138 |
+
924,
|
| 1139 |
+
809
|
| 1140 |
+
],
|
| 1141 |
+
"page_idx": 10
|
| 1142 |
+
},
|
| 1143 |
+
{
|
| 1144 |
+
"type": "text",
|
| 1145 |
+
"text": "Evaluation metrics and platform",
|
| 1146 |
+
"text_level": 1,
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
73,
|
| 1149 |
+
828,
|
| 1150 |
+
315,
|
| 1151 |
+
843
|
| 1152 |
+
],
|
| 1153 |
+
"page_idx": 10
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "text",
|
| 1157 |
+
"text": "We followed the recommendations in Metrics Reloaded [55] to evaluate the segmentation accuracy. Specifically, we used Dice Similarity Coefficient (DSC) and Normalized Surface Distance (NSD) with a boundary tolerance of $2mm$ to quantitatively evaluate the region overlap and boundary similarity, respectively. For CT, MRI, and PET images, we compute the metrics in 3D while for video datasets, we first compute the frame-wise metric scores followed by averaging them to obtain the video-level metric scores. Wilcoxon signed-rank test was used for statistical significance analysis. Results were considered statistically significant if the $p$ -value was less than 0.05.",
|
| 1158 |
+
"bbox": [
|
| 1159 |
+
71,
|
| 1160 |
+
845,
|
| 1161 |
+
924,
|
| 1162 |
+
936
|
| 1163 |
+
],
|
| 1164 |
+
"page_idx": 10
|
| 1165 |
+
},
|
| 1166 |
+
{
|
| 1167 |
+
"type": "page_number",
|
| 1168 |
+
"text": "11",
|
| 1169 |
+
"bbox": [
|
| 1170 |
+
906,
|
| 1171 |
+
32,
|
| 1172 |
+
919,
|
| 1173 |
+
42
|
| 1174 |
+
],
|
| 1175 |
+
"page_idx": 10
|
| 1176 |
+
},
|
| 1177 |
+
{
|
| 1178 |
+
"type": "text",
|
| 1179 |
+
"text": "Data availability",
|
| 1180 |
+
"text_level": 1,
|
| 1181 |
+
"bbox": [
|
| 1182 |
+
73,
|
| 1183 |
+
53,
|
| 1184 |
+
196,
|
| 1185 |
+
68
|
| 1186 |
+
],
|
| 1187 |
+
"page_idx": 11
|
| 1188 |
+
},
|
| 1189 |
+
{
|
| 1190 |
+
"type": "text",
|
| 1191 |
+
"text": "All data used in the study are from public datasets, and detailed references are provided in Supplementary Table 1. We also create a dedicated website, accessible at https://medsam-datasetlist.github.io, to provide a comprehensive and continuously updated repository of medical image segmentation datasets. This resource is intended for long-term maintenance and accessibility to the research community.",
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
71,
|
| 1194 |
+
71,
|
| 1195 |
+
924,
|
| 1196 |
+
131
|
| 1197 |
+
],
|
| 1198 |
+
"page_idx": 11
|
| 1199 |
+
},
|
| 1200 |
+
{
|
| 1201 |
+
"type": "text",
|
| 1202 |
+
"text": "Code availability",
|
| 1203 |
+
"text_level": 1,
|
| 1204 |
+
"bbox": [
|
| 1205 |
+
73,
|
| 1206 |
+
147,
|
| 1207 |
+
200,
|
| 1208 |
+
162
|
| 1209 |
+
],
|
| 1210 |
+
"page_idx": 11
|
| 1211 |
+
},
|
| 1212 |
+
{
|
| 1213 |
+
"type": "text",
|
| 1214 |
+
"text": "The code, model weights, and annotated datasets are publicly available at https://github.com/bowang-lab/MedSAM2. 3D slicer plugin can be accessed at https://github.com/bowang-lab/MedSAMSlicer.",
|
| 1215 |
+
"bbox": [
|
| 1216 |
+
71,
|
| 1217 |
+
165,
|
| 1218 |
+
923,
|
| 1219 |
+
196
|
| 1220 |
+
],
|
| 1221 |
+
"page_idx": 11
|
| 1222 |
+
},
|
| 1223 |
+
{
|
| 1224 |
+
"type": "text",
|
| 1225 |
+
"text": "REFERENCES",
|
| 1226 |
+
"text_level": 1,
|
| 1227 |
+
"bbox": [
|
| 1228 |
+
73,
|
| 1229 |
+
214,
|
| 1230 |
+
187,
|
| 1231 |
+
228
|
| 1232 |
+
],
|
| 1233 |
+
"page_idx": 11
|
| 1234 |
+
},
|
| 1235 |
+
{
|
| 1236 |
+
"type": "list",
|
| 1237 |
+
"sub_type": "ref_text",
|
| 1238 |
+
"list_items": [
|
| 1239 |
+
"[1] D. Ouyang, B. He, A. Ghorbani, N. Yuan, J. Ebinger, C. P. Langlotz, P. A. Heidenreich, R. A. Harrington, D. H. Liang, E. A. Ashley, and J. Y. Zou, \"Video-based ai for beat-to-beat assessment of cardiac function,\" Nature, vol. 580, no. 7802, pp. 252-256, 2020.",
|
| 1240 |
+
"[2] Y.-R. Wang, K. Yang, Y. Wen, P. Wang, Y. Hu, Y. Lai, Y. Wang, K. Zhao, S. Tang, A. Zhang, H. Zhan, M. Lu, X. Chen, S. Yang, Z. Dong, Y. Wang, H. Liu, L. Zhao, L. Huang, Y. Li, L. Wu, Z. Chen, Y. Luo, D. Liu, P. Zhao, K. Lin, J. C. Wu, and S. Zhao, \"Screening and diagnosis of cardiovascular disease using artificial intelligence-enabled cardiac magnetic resonance imaging,\" Nature Medicine, vol. 30, no. 5, p. 1471-1480, 2024.",
|
| 1241 |
+
"[3] K. Cao, Y. Xia, J. Yao, X. Han, L. Lambert, T. Zhang, W. Tang, G. Jin, H. Jiang, X. Fang et al., \"Large-scale pancreatic cancer detection via non-contrast ct and deep learning,\" Nature medicine, vol. 29, no. 12, pp. 3033-3043, 2023.",
|
| 1242 |
+
"[4] L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy, and A. L. Yuille, \"Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 40, no. 4, pp. 834-848, 2018.",
|
| 1243 |
+
"[5] L.-C. Chen, Y. Zhu, G. Papandreou, F. Schroff, and H. Adam, \"Encoder-decoder with atrous separable convolution for semantic image segmentation,\" in Proceedings of the European Conference on Computer Vision, 2018, pp. 801-818.",
|
| 1244 |
+
"[6] B. He, A. C. Kwan, J. H. Cho, N. Yuan, C. Pollick, T. Shiota, J. Ebinger, N. A. Bello, J. Wei, K. Josan, G. Duffy, M. Jujavarapu, R. Siegel, S. Cheng, J. Y. Zou, and D. Ouyang, \"Blinded, randomized trial of sonographer versus AI cardiac function assessment,\" Nature, vol. 616, no. 7957, pp. 520-524, 2023.",
|
| 1245 |
+
"[7] O. Ronneberger, P. Fischer, and T. Brox, \"U-net: Convolutional networks for biomedical image segmentation,\" in Medical Image Computing and Computer-Assisted Intervention, 2015, pp. 234-241.",
|
| 1246 |
+
"[8] T. Falk, D. Mai, R. Bensch, O. undefiniedicek, A. Abdulkadir, Y. Marrakchi, A. Böhm, J. Deubner, Z. Jäckel, K. Seiwald, A. Dovzhenko, O. Tietz, C. Dal Bosco, S. Walsh, D. Saltukoglu, T. L. Tay, M. Prinz, K. Palme, M. Simons, I. Diester, T. Brox, and O. Ronneberger, “U-net: deep learning for cell counting, detection, and morphometry,” Nature Methods, vol. 16, no. 1, p. 67-70, 2018.",
|
| 1247 |
+
"[9] F. Isensee, P. F. Jaeger, S. A. Kohl, J. Petersen, and K. H. Maier-Hein, \"nnu-net: a self-configuring method for deep learning-based biomedical image segmentation,\" Nature Methods, vol. 18, no. 2, pp. 203-211, 2021.",
|
| 1248 |
+
"[10] J. Ma, Y. Zhang, S. Gu, C. Ge, S. Mae, A. Young, C. Zhu, X. Yang, K. Meng, Z. Huang, F. Zhang, Y. Pan, S. Huang, J. Wang, M. Sun, R. Zhang, D. Jia, J. W. Choi, N. Alves, B. de Wilde, G. Koehler, H. Lai, E. Wang, M. Wiesenfarth, Q. Zhu, G. Dong, J. He, J. He, H. Yang, B. Huang, M. Lyu, Y. Ma, H. Guo, W. Xu, K. Maier-Hein, Y. Wu, and B. Wang, \"Unleashing the strengths of unlabelled data in deep learning-assisted pan-cancer abdominal organ quantification: the flare22 challenge,\" The Lancet Digital Health, vol. 6, no. 11, p. e815-e826, 2024.",
|
| 1249 |
+
"[11] S. Gatidis, M. Früh, M. P. Fabritius, S. Gu, K. Nikolaou, C. L. Fougère, J. Ye, J. He, Y. Peng, L. Bi, J. Ma, B. Wang, J. Zhang, Y. Huang, L. Heiliger, Z. Marinov, R. Stiefelhagen, J. Egger, J. Kleesiek, L. Sibille, L. Xiang, S. Bendazzoli, M. Astaraki, M. Ingrisch, C. C. Cyran, and T. Küstner, \"Results from the autopet challenge on fully automated lesion segmentation in oncologic pet/ct imaging,\" Nature Machine Intelligence, vol. 6, no. 11, p. 1396-1405, 2024.",
|
| 1250 |
+
"[12] A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly et al., \"An image is worth 16x16 words: Transformers for image recognition at scale,\" in International Conference on Learning Representations, 2020.",
|
| 1251 |
+
"[13] A. Kirillov, E. Mintun, N. Ravi, H. Mao, C. Rolland, L. Gustafson, T. Xiao, S. Whitehead, A. C. Berg, W.-Y. Lo, P. Dollar, and R. Girshick, \"Segment anything,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 4015-4026.",
|
| 1252 |
+
"[14] M. Moor, O. Banerjee, Z. S. H. Abad, H. M. Krumholz, J. Leskovec, E. J. Topol, and P. Rajpurkar, \"Foundation models for generalist medical artificial intelligence,\" Nature, vol. 616, no. 7956, pp. 259-265, 2023.",
|
| 1253 |
+
"[15] Y. He, F. Huang, X. Jiang, Y. Nie, M. Wang, J. Wang, and H. Chen, \"Foundation model for advancing healthcare: Challenges, opportunities and future directions,\" IEEE Reviews in Biomedical Engineering, pp. 1-20, 2024.",
|
| 1254 |
+
"[16] W. Khan, S. Leem, K. B. See, J. K. Wong, S. Zhang, and R. Fang, \"A comprehensive survey of foundation models in medicine,\" IEEE Reviews in Biomedical Engineering, 2025.",
|
| 1255 |
+
"[17] M. A. Mazurowski, H. Dong, H. Gu, J. Yang, N. Konz, and Y. Zhang, \"Segment anything model for medical image analysis: an experimental study,\" Medical Image Analysis, vol. 89, p. 102918, 2023.",
|
| 1256 |
+
"[18] Y. Huang, X. Yang, L. Liu, H. Zhou, A. Chang, X. Zhou, R. Chen, J. Yu, J. Chen, C. Chen et al., \"Segment anything model for medical images?\" Medical Image Analysis, vol. 92, p. 103061, 2024.",
|
| 1257 |
+
"[19] J. Ma, Y. He, F. Li, L. Han, C. You, and B. Wang, \"Segment anything in medical images,\" Nature Communications, vol. 15, p. 654, 2024.",
|
| 1258 |
+
"[20] J. Cheng, J. Ye, Z. Deng, J. Chen, T. Li, H. Wang, Y. Su, Z. Huang, J. Chen, L. Jiang, H. Sun, J. He, S. Zhang, M. Zhu, and Y. Qiao, \"Sam-med2d,\" arXiv preprint arXiv:2308.16184, 2023.",
|
| 1259 |
+
"[21] H. Wang, S. Guo, J. Ye, Z. Deng, J. Cheng, T. Li, J. Chen, Y. Su, Z. Huang, Y. Shen, B. Fu, S. Zhang, J. He, and Y. Qiao, \"Sam-med3d: Towards general-purpose segmentation models for volumetric medical images,\" arXiv preprint arXiv:2310.15161, 2024.",
|
| 1260 |
+
"[22] C. Chen, J. Miao, D. Wu, A. Zhong, Z. Yan, S. Kim, J. Hu, Z. Liu, L. Sun, X. Li, T. Liu, P.-A. Heng, and Q. Li, \"Ma-sam: Modality-agnostic sam adaptation for 3d medical image segmentation,\" Medical Image Analysis, vol. 98, p. 103310, 2024.",
|
| 1261 |
+
"[23] S. Gong, Y. Zhong, W. Ma, J. Li, Z. Wang, J. Zhang, P.-A. Heng, and Q. Dou, \"3dsam-adapter: Holistic adaptation of sam from 2d to 3d for promptable tumor segmentation,\" Medical Image Analysis, vol. 98, p. 103324, 2024.",
|
| 1262 |
+
"[24] J. Wu, Z. Wang, M. Hong, W. Ji, H. Fu, Y. Xu, M. Xu, and Y. Jin, \"Medical sam adapter: Adapting segment anything model for medical image segmentation,\" Medical Image Analysis, p. 103547, 2025.",
|
| 1263 |
+
"[25] Y. Du, F. Bai, T. Huang, and B. Zhao, \"Segvol: Universal and interactive volumetric medical image segmentation,\" in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 110746-110783.",
|
| 1264 |
+
"[26] Y. He, P. Guo, Y. Tang, A. Myronenko, V. Nath, Z. Xu, D. Yang, C. Zhao, B. Simon, M. Belue, S. Harmon, B. Turkbey, D. Xu, and W. Li, \"VISTA3D: A unified segmentation foundation model for 3D medical imaging,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision and Pattern Recognition, 2024.",
|
| 1265 |
+
"[27] I. Fabian, R. Maximilian, K. Lars, D. Stefan, R. Ashis, S. Florian, H. Benjamin, W. Tassilo, L. Moritz, U. Constantin, D. Jonathan, F. Ralf, and M.-H. Klaus, \"nninteractive: Redefining 3D promptable segmentation,\" arXiv preprint arXiv:2503.08373, 2025."
|
| 1266 |
+
],
|
| 1267 |
+
"bbox": [
|
| 1268 |
+
73,
|
| 1269 |
+
234,
|
| 1270 |
+
924,
|
| 1271 |
+
941
|
| 1272 |
+
],
|
| 1273 |
+
"page_idx": 11
|
| 1274 |
+
},
|
| 1275 |
+
{
|
| 1276 |
+
"type": "page_number",
|
| 1277 |
+
"text": "12",
|
| 1278 |
+
"bbox": [
|
| 1279 |
+
906,
|
| 1280 |
+
32,
|
| 1281 |
+
921,
|
| 1282 |
+
42
|
| 1283 |
+
],
|
| 1284 |
+
"page_idx": 11
|
| 1285 |
+
},
|
| 1286 |
+
{
|
| 1287 |
+
"type": "list",
|
| 1288 |
+
"sub_type": "ref_text",
|
| 1289 |
+
"list_items": [
|
| 1290 |
+
"[28] N. Ravi, V. Gabeur, Y.-T. Hu, R. Hu, C. Ryali, T. Ma, H. Khedr, R. Rädle, C. Rolland, L. Gustafson, E. Mintun, J. Pan, K. V. Alwala, N. Carion, C.-Y. Wu, R. Girshick, P. Dollar, and C. Feichtenhofer, \"Sam 2: Segment anything in images and videos,\" in International Conference on Learning Representations, 2025.",
|
| 1291 |
+
"[29] Y. Zhang and Z. Shen, \"Unleashing the potential of sam2 for biomedical images and videos: A survey,\" arXiv preprint arXiv:2408.12889, 2024.",
|
| 1292 |
+
"[30] J. Zhu, A. Hamdi, Y. Qi, Y. Jin, and J. Wu, \"Medical sam 2: Segment medical images as video via segment anything model 2,\" arXiv preprint arXiv:2408.00874, 2024.",
|
| 1293 |
+
"[31] Z. Yan, W. Sun, R. Zhou, Z. Yuan, K. Zhang, Y. Li, T. Liu, Q. Li, X. Li, L. He, and L. Sun, \"Biomedical sam 2: Segment anything in biomedical images and videos,\" 2024.",
|
| 1294 |
+
"[32] J. Ma, S. Kim, F. Li, M. Baharoon, R. Asakereh, H. Lyu, and B. Wang, \"Segment anything in medical images and videos: Benchmark and deployment,\" arXiv preprint arXiv:2408.03322, 2024.",
|
| 1295 |
+
"[33] C. Ryali, Y.-T. Hu, D. Bolya, C. Wei, H. Fan, P.-Y. Huang, V. Aggarwal, A. Chowdhury, O. Poursaeed, J. Hoffman et al., \"Hiera: A hierarchical vision transformer without the bells-and-whistles,\" in International conference on machine learning. PMLR, 2023, pp. 29441-29454.",
|
| 1296 |
+
"[34] A. Archit, L. Freckmann, S. Nair, N. Khalid, P. Hilt, V. Rajashekar, M. Freitag, C. Teuber, G. Buckley, S. von Haaren, S. Gupta, A. Dengel, S. Ahmed, and C. Pape, \"Segment anything for microscopy,\" Nature Methods, 2025.",
|
| 1297 |
+
"[35] L. Ke, M. Ye, M. Danelljan, Y. liu, Y.-W. Tai, C.-K. Tang, and F. Yu, \"Segment anything in high quality,\" in Advances in Neural Information Processing Systems, vol. 36, 2023, pp. 29914-29934.",
|
| 1298 |
+
"[36] A. Pfefferle, L. Purucker, and F. Hutter, \"Daft: Data-aware fine-tuning of foundation models for efficient and effective medical image segmentation,\" in CVPR 2024: Segment Anything In Medical Images On Laptop, 2024.",
|
| 1299 |
+
"[37] J. Ma, F. Li, S. Kim, R. Asakereh, B.-H. Le, D.-K. Nguyen-Vu, A. Pfefferle, M. Wei, R. Gao, D. Lyu, S. Yang, L. Purucker, Z. Marinov, M. Staring, H. Lu, T. T. Dao, X. Ye, Z. Li, G. Brugnara, P. Vollmuth, M. Foltyn-Dumitru, J. Cho, M. A. Mahmutoglu, M. Bendszus, I. Pflüger, A. Rastogi, D. Ni, X. Yang, G.-Q. Zhou, K. Wang, N. Heller, N. Papanikolopoulos, C. Weight, Y. Tong, J. K. Udupa, C. J. Patrick, Y. Wang, Y. Zhang, F. Contijoch, E. McVeigh, X. Ye, S. He, R. Haase, T. Pinetz, A. Radbruch, I. Krause, E. Kobler, J. He, Y. Tang, H. Yang, Y. Huo, G. Luo, K. Kushibar, J. Amankulov, D. Toleshbayev, A. Mukhamejan, J. Egger, A. Pepe, C. Gsaxner, G. Luijten, S. Fujita, T. Kikuchi, B. Wiestler, J. S. Kirschke, E. de la Rosa, F. Bolelli, L. Lumetti, C. Grana, K. Xie, G. Wu, B. Puladi, C. Martin-Isla, K. Lekadir, V. M. Campello, W. Shao, W. Brisbane, H. Jiang, H. Wei, W. Yuan, S. Li, Y. Zhou, and B. Wang, \"Efficient medsams: Segment anything in medical images on laptop,\" arXiv:2412.16085, 2024.",
|
| 1300 |
+
"[38] S. Leclerc, E. Smistad, J. Pedrosa, A. Østvik, F. Cervenansky, F. Espinosa, T. Espeland, E. A. R. Berg, P-M. Jodoin, T. Grenier et al., \"Deep learning for segmentation using an open large-scale dataset in 2d echocardiography,\" IEEE Transactions on Medical Imaging, vol. 38, no. 9, pp. 2198-2210, 2019.",
|
| 1301 |
+
"[39] M. Misawa, S.-e. Kudo, Y. Mori, K. Hotta, K. Ohtsuka, T. Matsuda, S. Saito, T. Kudo, T. Baba, F. Ishida, H. Itoh, M. Oda, and K. Mori, \"Development of a computer-aided detection system for colonoscopy and a publicly accessible large colonoscopy video database (with video),\" Gastrointestinal Endoscopy, vol. 93, no. 4, pp. 960-967.e3, 2021.",
|
| 1302 |
+
"[40] G.-P. Ji, G. Xiao, Y.-C. Chou, D.-P. Fan, K. Zhao, G. Chen, and L. Van Gool, \"Video polyp segmentation: A deep learning perspective,\" Machine Intelligence Research, vol. 19, no. 6, pp. 531-549, 2022.",
|
| 1303 |
+
"[41] E. A. Eisenhauer, P. Therasse, J. Bogaerts, L. H. Schwartz, D. Sargent, R. Ford, J. Dancey, S. Arbuck, S. Gwyther, M. Mooney et al., \"New response evaluation criteria in solid tumours: revised recist guideline (version 1.1),\" European Journal of Cancer, vol. 45, no. 2, pp. 228-247, 2009.",
|
| 1304 |
+
"[42] K. Yan, X. Wang, L. Lu, and R. M. Summers, \"Deeplion: automated mining of large-scale lesion annotations and universal lesion detection with deep learning,\" Journal of Medical Imaging, vol. 5, no. 3, pp. 036-036-036-036, 2018.",
|
| 1305 |
+
"[43] M. Lou, H. Ying, X. Liu, H.-Y. Zhou, Y. Zhang, and Y. Yu, \"Sdr-former: A siamese dual-resolution transformer for liver lesion classification using 3d multi-phase imaging,\" Neural Networks, p. 107228, 2025.",
|
| 1306 |
+
"[44] C. Varghese, E. M. Harrison, G. O'Grady, and E. J. Topol, \"Artificial intelligence in surgery,\" Nature Medicine, vol. 30, no. 5, pp. 1257-1268, 2024.",
|
| 1307 |
+
"[45] B. Magyar, M. Tokodi, A. Soos, M. Tolvaj, B. K. Lakatos, A. Fabian, E. Surkova, B. Merkely, A. Kovacs, and A. Horvath, \"Rvenet: A large echocardiographic dataset for the deep learning-based assessment of right ventricular function,\" in Computer Vision - ECCV 2022 Workshops. Springer Nature Switzerland, 2023, p. 569-583.",
|
| 1308 |
+
"[46] M. Tokodi, B. Magyar, A. Soos, M. Takeuchi, M. Tolvaj, B. K. Lakatos, T. Kitano, Y. Nabeshima, A. Fábian, M. B. Szigeti et al., \"Deep learning-based prediction of right ventricular ejection fraction using 2d echocardiograms,\" Cardiovascular Imaging, vol. 16, no. 8, pp. 1005-1018, 2023.",
|
| 1309 |
+
"[47] R. Kikinis, S. D. Pieper, and K. G. Vosburgh, \"3d slicer: a platform for subject-specific image analysis, visualization, and clinical support,\" in Intraoperative imaging and image-guided therapy. Springer, 2013, pp. 277-289.",
|
| 1310 |
+
"[48] A. Abid, A. Abdalla, A. Abid, D. Khan, A. Alfozan, and J. Zou, \"Gradio: Hassle-free sharing and testing of ml models in the wild,\" arXiv preprint arXiv:1906.02569, 2019.",
|
| 1311 |
+
"[49] Z. Zhao, Y. Zhang, C. Wu, X. Zhang, Y. Zhang, Y. Wang, and W. Xie, \"One model to rule them all: Towards universal segmentation for medical images with text prompts,\" arXiv preprint arXiv:2312.17183, 2023.",
|
| 1312 |
+
"[50] T. Zhao, Y. Gu, J. Yang, N. Usuyama, H. H. Lee, S. Kiblawi, T. Naumann, J. Gao, A. Crabtree, J. Abel, C. Moung-Wen, B. Piening, C. Bifulco, M. Wei, H. Poon, and S. Wang, \"A foundation model for joint segmentation, detection and recognition of biomedical objects across nine modalities,\" Nature Methods, 2024.",
|
| 1313 |
+
"[51] K. Bartnik, T. Bartczak, M. Krzyzinski, K. Korzeniowski, K. Lamparski, P. Wegrzyn, E. Lam, M. Bartkowiak, T. Wroblewski, K. Mech, M. Januszewicz, and P. Biecek, \"Waw-tace: A hepatocellular carcinoma multiphase ct dataset with segmentations, radiomics features, and clinical data,\" Radiology: Artificial Intelligence, vol. 6, no. 6, p. e240296, 2024.",
|
| 1314 |
+
"[52] T.-Y. Lin, P. Dólar, R. Girshick, K. He, B. Hariharan, and S. Belongie, “Feature pyramid networks for object detection,” in Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, 2017, pp. 2117-2125.",
|
| 1315 |
+
"[53] J. Su, M. Ahmed, Y. Lu, S. Pan, W. Bo, and Y. Liu, \"Rofomer: Enhanced transformer with rotary position embedding,\" Neurocomputing, vol. 568, p. 127063, 2024.",
|
| 1316 |
+
"[54] I. Loshchilov and F. Hutter, \"Decoupled weight decay regularization,\" in International Conference on Learning Representations, 2019.",
|
| 1317 |
+
"[55] L. Maier-Hein, A. Reinke, P. Godau, M. D. Tizabi, F. Buettner, E. Christodoulou, B. Glocker, F. Isensee, J. Kleesiek, M. Kozubek, M. Reyes, M. A. Riegler, M. Wiesenfarth, A. E. Kavur, C. H. Sudre, M. Baumgartner, M. Eisenmann, D. Heckmann-Nötzel, T. Rädsch, L. Acion, M. Antonelli, T. Arbel, S. Bakas, A. Benis, M. B. Blaschko, M. J. Cardoso, V. Cheplygina, B. A. Cimini, G. S. Collins, K. Farahani, L. Ferrer, A. Galdran, B. van Ginneken, R. Haase, D. A. Hashimoto, M. M. Hoffman, M. Huisman, P. Jannin, C. E. Kahn, D. Kainmueller, B. Kainz, A. Karargyris, A. Karthikesalingam, F. Kofler, A. Kopp-Schneider, A. Kreshuk, T. Kurc, B. A. Landman, G. Litjens, A. Madani, K. Maier-Hein, A. L. Martel, P. Mattson, E. Meijering, B. Menze, K. G. M. Moons, H. Müller, B. Nichyporuk, F. Nickel, J. Petersen, N. Rajpoot, N. Rieke, J. Saez-Rodriguez, C. I. Sánchez, S. Shetty, M. van Smeden, R. M. Summers, A. A. Taha, A. Tiulpin, S. A. Tsaftaris, B. Van Calster, G. Varoquaux, and P. F. Jäger, \"Metrics reloaded: recommendations for image analysis validation,\" Nature Methods, vol. 21, no. 2, p. 195-212, 2024."
|
| 1318 |
+
],
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
76,
|
| 1321 |
+
54,
|
| 1322 |
+
921,
|
| 1323 |
+
886
|
| 1324 |
+
],
|
| 1325 |
+
"page_idx": 12
|
| 1326 |
+
},
|
| 1327 |
+
{
|
| 1328 |
+
"type": "page_number",
|
| 1329 |
+
"text": "13",
|
| 1330 |
+
"bbox": [
|
| 1331 |
+
906,
|
| 1332 |
+
32,
|
| 1333 |
+
921,
|
| 1334 |
+
42
|
| 1335 |
+
],
|
| 1336 |
+
"page_idx": 12
|
| 1337 |
+
}
|
| 1338 |
+
]
|
data/2025/2504_03xxx/2504.03600/e5e6f2c9-b520-45ba-a6d8-f048ec675c39_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_03xxx/2504.03600/e5e6f2c9-b520-45ba-a6d8-f048ec675c39_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e256b32fdb7b6cdc13d7b9c496afab9921f6fd4d7b156715ffcd8ed1bacec3da
|
| 3 |
+
size 9428793
|
data/2025/2504_03xxx/2504.03600/full.md
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MedSAM2: Segment Anything in 3D Medical Images and Videos
|
| 2 |
+
|
| 3 |
+
Jun Ma*, Zongxin Yang*, Sumin Kim, Bihui Chen, Mohammed Baharoon, Adibvafa Fallahpour, Reza Asakereh, Hongwei Lyu, and Bo Wang†
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Medical image and video segmentation is a critical task for precision medicine, which has witnessed considerable progress in developing task or modality-specific and generalist models for 2D images. However, there have been limited studies on building general-purpose models for 3D images and videos with comprehensive user studies. Here, we present MedSAM2, a promptable segmentation foundation model for 3D image and video segmentation. The model is developed by fine-tuning the Segment Anything Model 2 on a large medical dataset with over 455,000 3D image-mask pairs and 76,000 frames, outperforming previous models across a wide range of organs, lesions, and imaging modalities. Furthermore, we implement a human-in-the-loop pipeline to facilitate the creation of large-scale datasets resulting in, to the best of our knowledge, the most extensive user study to date, involving the annotation of 5,000 CT lesions, 3,984 liver MRI lesions, and 251,550 echocardiogram video frames, demonstrating that MedSAM2 can reduce manual costs by more than $85\%$ . MedSAM2 is also integrated into widely used platforms with user-friendly interfaces for local and cloud deployment, making it a practical tool for supporting efficient, scalable, and high-quality segmentation in both research and healthcare environments.
|
| 8 |
+
|
| 9 |
+
# INTRODUCTION
|
| 10 |
+
|
| 11 |
+
Medical image segmentation plays a pivotal role in numerous clinical applications, including anatomical structure analysis [1], disease diagnosis [2], surgery planning, and treatment monitoring [3]. By delineating the boundaries of organs, lesions, and other relevant anatomies, segmentation algorithms provide clinicians with crucial information for precise disease analysis. Over the past decade, deep learning-based methods have revolutionized this field, delivering unprecedented performance on various segmentation tasks and benchmarks. For example, DeepLab [4] [5] has achieved human-level performance in left ventricle segmentation from echocardiography for ejection fraction assessment [1], which has proven to save time for both sonographers and cardiologists via blinding and randomization clinical trial [6]. U-Net [7] has been employed for accurate cell detection and segmentation in light microscopy images [8] and 3D nnU-Net [9] has been widely used in various anatomy and lesion segmentation, such as heart chamber segmentation in Magnetic Resonance Imaging (MRI) scans [2], pancreas cancer and abdominal organ segmentation in Computed Tomograph (CT) scans [3] [10], and whole-body lesion segmentation in Positron Emission Tomography (PET) scans [11].
|
| 12 |
+
|
| 13 |
+
Driven by advanced network architectures [12] and large-scale datasets [13], recent trends in segmentation present a paradigm shift from specialist models tailored for specific tasks to generalist or foundation models capable of performing segmentation without extensive task-specific model development [14]-[16]. One prominent example is the Segment Anything Model (SAM) [13], a pioneer segmentation foundation model in computer vision that has shown remarkable generalization ability across a wide range of two-dimensional (2D) natural image segmentation tasks. However, due to the substantial domain gap, its performance remains suboptimal in medical images [17] [18]. Despite these limitations, SAM can be effectively adapted to the medical domain through transfer learning. For instance, models such as MedSAM [19] and SAM-Med [20] [21] have demonstrated strong capabilities in segmenting various organs and abnormalities across diverse medical imaging modalities by fine-tuning SAM on large-scale medical datasets.
|
| 14 |
+
|
| 15 |
+
- Jun Ma is with AI Collaborative Centre, University Health Network; Vector Institute, Toronto, Canada (* Equal Contribution).
|
| 16 |
+
Zongxin Yang is with Department of Biomedical Informatics, Harvard Medical School, Harvard University, Boston, USA (* Equal Contribution).
|
| 17 |
+
- Sumin Kim is with Peter Munk Cardiac Centre, University Health Network; Department of Computer Science, University of Toronto; Vector Institute, Toronto, Canada.
|
| 18 |
+
- Bihui Chen is with Peter Munk Cardiac Centre, University Health Network; Department of Computer Science, University of Toronto; Vector Institute, Toronto, Canada.
|
| 19 |
+
- Mohammed Baharoon is with Department of Biomedical Informatics, Harvard Medical School, Harvard University, Boston, USA. Part of this work was done at the University of Toronto, Toronto, Canada.
|
| 20 |
+
Adibvaf Fallahpour is with Peter Munk Cardiac Centre, University Health Network; Department of Computer Science, University of Toronto; Vector Institute, Toronto, Canada.
|
| 21 |
+
- Reza Asakereh participated in this project when he was with Peter Munk Cardiac Centre, University Health Network, Toronto, Canada.
|
| 22 |
+
- Hongwei Lyu is with Peter Munk Cardiac Centre, University Health Network, Toronto, Canada.
|
| 23 |
+
Bo Wang is with Peter Munk Cardiac Centre and AI Hub, University Health Network; Department of Laboratory Medicine and Pathobiology and Department of Computer Science, University of Toronto; Vector Institute, Toronto, Canada(†Corresponding Author). E-mail: bowang@vectorinstitute.ai
|
| 24 |
+
|
| 25 |
+
Despite the potential of these foundation models, their application to medical imaging is still limited and faces three main limitations. First, most medical image segmentation foundation models [19] [20] are primarily designed for 2D image data and may not capture the three-dimensional (3D) spatial relationships or temporal information in volumetric and video medical data. Second, although some studies have extended SAM to 3D image segmentation using 3D image encoders [21] and adapters [22]-[24] or developed interactive 3D segmentation models [25]-[27] to incorporate manual corrections, there is still a lack of general models to segment both 3D images and videos, which are frequently necessary in real-world clinical workflows. The state-of-the-art video segmentation model, SAM2 [28], has shown great potential to fill this gap [29]-[32], but adaption on large-scale datasets has been underexplored. Finally, large-scale validation of these models in practical image-labeling scenarios remains notably absent, leaving important questions about their scalability and utility in facilitating high-throughput medical image annotation tasks.
|
| 26 |
+
|
| 27 |
+
In this work, we address these limitations by presenting MedSAM2, a general model for 3D medical image and video segmentation. Specifically, we first curate a large-scale dataset consisting of more than 455,000 3D image-mask pairs and 76,000 annotated video frames, spanning multiple organs, pathologies, and imaging protocols for model development. Then, we build MedSAM2 by modifying and fine-tuning SAM2 on the large dataset. Extensive experiments show that MedSAM2 is capable of handling both volumetric medical scans and successive video frames, enabling versatile segmentation across diverse medical data. Furthermore, we conduct three user studies to demonstrate that MedSAM2 substantially facilitates annotation workflows for high-throughput and efficient segmentation, substantially reducing the time and effort required for creating large-scale medical datasets in various imaging modalities. MedSAM2 has the potential to transform clinical workflows by enabling more efficient diagnostic processes, treatment planning, and longitudinal monitoring across cardiology, oncology, and surgical specialties, where precise 3D organ and lesion segmentation is critical but traditionally time-consuming.
|
| 28 |
+
|
| 29 |
+
# RESULTS
|
| 30 |
+
|
| 31 |
+
# Dataset and model architecture
|
| 32 |
+
|
| 33 |
+
A large amount of training data is the foundation for developing generalist segmentation models. We assembled a large-scale and diverse 3D medical image and video dataset based on public datasets, including various normal anatomical structures and pathologies from various medical imaging modalities (Fig 1a, Methods, Supplementary Table 1). In particular, we collected 363,161, 14,818, and 77,154 3D image-mask pairs for CT, PET, and MRI modalities, respectively. In addition, we curated 19,232 and 56,462 annotated frames for ultrasound and endoscopy, respectively.
|
| 34 |
+
|
| 35 |
+
The pre-trained SAM2 model [28] has provided a strong backbone for general feature representations, which was trained on 256 A100 GPUs. To reuse the pre-trained model weights and avoid prohibitive computing costs, MedSAM2 adopted the SAM2 network architecture, including an image encoder, a memory attention module, a prompt encoder, and a mask decoder (Fig 1b). The image encoder extracts multi-scale features from each 2D slice or video frame using the hierarchical vision transformer [33] (Hiera), which achieves faster and more accurate performance than the naïve vision transformer [12] in SAM. The memory attention module employs transformer blocks with self-attention and cross-attention mechanisms to condition current frame features on previous frames' predictions through a streaming memory bank. The prompt encoders convert various user interactions (i.e., points, bounding boxes, and masks) to embedding. We used bounding boxes as the main prompt because they are less ambiguous in specifying the segmentation target, making them suitable for most organs and lesions. Specifically, for 3D images, we applied the bounding box prompt on the middle slice and propagated the segmentation mask bidirectionally toward both ends of the volume data. Finally, the mask decoder incorporates memory-conditioned features and prompt embeddings to produce accurate segmentation masks.
|
| 36 |
+
|
| 37 |
+
Existing studies have demonstrated that fine-tuning all parts of the model yields better performance than only fine-tuning parts of the model, such as the image encoder, the mask decoder, and the prompt encoder [34], [35]. For MedSAM2, we employ a comprehensive full-model fine-tuning approach using the lightweight SAM2.1-Tiny variant, which achieved competitive performance with fewer parameters compared to larger variants. During fine-tuning, we applied lower learning rates for the image encoder to preserve pre-trained feature extraction capabilities and higher learning rates for other model parts. We carefully balanced our training data with different sampling rates across 3D images and videos to ensure optimal performance across diverse modalities (Methods).
|
| 38 |
+
|
| 39 |
+
# Performance on various 3D medical image and video segmentation tasks
|
| 40 |
+
|
| 41 |
+
We first evaluated the trained model on the holdout 3D test set, which contains 40 segmentation tasks from different cohorts across a wide range of organs and lesions in CT, MRI, and PET scans. We also compared the latest SAM2.1 models with different sizes (tiny, small, base, and large) [28] and the current state-of-the-art (SOTA) bounding box-based segmentation foundation model (EfficientMedSAM-Top1) [36], which is the winning solution in the CVPR 2024 Efficient MedSAMs competition [37]. All models were initialized with a bounding box prompt on the middle slice of the segmentation target. Each model first generated a 2D mask at the middle slice and then propagated it bidirectionally to create the full 3D segmentation.
|
| 42 |
+
|
| 43 |
+
Fig. 2a shows the quantitative results on the 3D testing set (Supplementary Table 2-3 and Fig. 1). The SAM2.1 models exhibit similar performance across all categories, with no significant differences in median DSC scores ( $p$ -value
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
a.
|
| 47 |
+
3D CT & PET (378K 3D image-mask pairs)
|
| 48 |
+
|
| 49 |
+

|
| 50 |
+
3D MRI (77K 3D images-mask pairs)
|
| 51 |
+
Ultrasound and Endoscopy Videos (76K frames)
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
Fig. 1. Dataset and network architecture for MedSAM2 development. a, The dataset includes diverse 3D CT, PET, MRI images, ultrasound, and endoscopy videos. For each 3D image example, we visualize both 2D slices and 3D structures. For each video example, we visualize frames at different time points. b, MedSAM2 is a promptable segmentation network with an image encoder, a prompt encoder, a memory attention module, and a mask decoder. The image encoder extracts multiscale features from each frame or 2D slice. The memory attention module conditions the current frame features on past frames' features and predictions using streaming memory. The mask decoder generates accurate segmentation masks based on bounding box prompts and memory-conditioned features. This architecture enables MedSAM2 to effectively segment both 3D medical images and videos by exploiting spatial continuity across slices and frames.
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
Fig. 2. Segmentation performance on hold-out 3D image and video datasets. a, Performance distribution of six models across five typical 3D segmentation tasks in terms of Dice similarity coefficient (DSC) scores: CT organs $(N = 783)$ , CT Lesions $(N = 409)$ , MRI organs $(N = 734)$ , MRI lesions $(N = 318)$ , and PET lesions $(N = 65)$ . The center line within the box represents the median value, with the bottom and top bounds of the box delineating the 25th and 75th percentiles, respectively. Whiskers are chosen to show the 1.5 of the interquartile range. Up-triangles denote the minima and down-triangles denote the maxima. b, Visualized segmentation examples for stomach and liver cancer in computed tomography (CT), and spleen and brain cancer in Magnetic Resonance Imaging (MRI). Blue: initial bounding box prompts; Yellow: reference standards; Blue: best SAM2.1 segmentation results; Green: EfficientMedSAM-Top1 segmentation results; Magenta: MedSAM2 segmentation results. c, Performance distribution of SAM2.1 and MedSAM2 for left ventricle $(N = 100)$ , left ventricle epicardium $(N = 100)$ , and left atrium $(N = 100)$ segmentation in ultrasound videos and easy $(N = 119)$ and hard $(N = 54)$ polyp segmentation in endoscopy videos. d, Visualized segmentation examples for heart chambers and polyps in ultrasound and endoscopy videos, respectively.
|
| 60 |
+
|
| 61 |
+
$>0.05)$ . This suggests that increasing model size within the SAM2.1 family does not necessarily translate to substantial improvements in segmentation accuracy for 3D medical images. The EfficientMedSAM-Top1 outperforms all SAM2.1 variants in CT Organs, CT Lesions, and MR Lesions, achieving median DSC scores of $83.55\%$ (interquartile range (IQR): $67.20 - 91.78\%$ ), $77.95\%$ (69.15-84.81%), and $82.25\%$ (68.30-90.53%), respectively. However, its performance is not consistently superior to SAM2.1 models in MRI organ and PET lesion tasks, which is $9.22\%$ and $2.74\%$ lower than the best SAM2.1 model, respectively. One possible reason could be that the MRI Organs dataset includes images from unseen MRI sequences that introduce variations in image characteristics.
|
| 62 |
+
|
| 63 |
+
The comparable performance of different SAM2.1 model sizes motivated us to build MedSAM2 by fine-tuning the lightweight SAM2.1-Tiny model, aiming to improve segmentation performance for medical image datasets without relying on immense computational resources. MedSAM2 consistently achieves the highest DSC scores across all targets (CT organs: $88.84\%$ (80.03-94.03%), CT lesions: $86.68\%$ (74.32-91.14%), MRI organs: $87.06\%$ (82.96-90.04%), MRI lesions: $88.37\%$ (79.91-93.26%), PET lesions $87.22\%$ (79.07-90.45)), indicating that transfer learning is an effective way to adapt general domain foundation models to the medical image domain. First, for simple and well-defined anatomical structures, such as the kidneys and Lungs, all methods, including SAM2.1 variants, achieve high DSC scores (often above $95\%$ ), indicating that even general-purpose models can segment these targets accurately due to their clear boundaries and consistent appearances. However, for more challenging targets with heterogeneous appearances and complex shapes, such as the kidney lesions and pancreas, MedSAM2 shows substantial performance improvements, highlighting MedSAM2's enhanced ability to handle greater anatomical variability. Qualitative results (Fig. 2b) show that MedSAM2 produces more accurate and robust boundaries than other methods during propagation, owing to its memory design that effectively models temporal information across slices.
|
| 64 |
+
|
| 65 |
+
Next, we evaluated video segmentation performance for heart chambers and polyp segmentation in cardiac ultrasound (Echocardiography) and endoscopy videos on the widely used CAMUS [38] and SUN [39], [40] datasets, respectively (Fig. 2c, Supplementary Table 3). The heart chamber dataset focuses on delineating three structures: left ventricle, left ventricle epicardium, and left atrium. All SAM2.1 models perform similarly for left ventricle and atrium segmentation with high DSC scores, but have greater variance for the left ventricle epicardium in DSC score because of the heterogeneous appearances and diverse shape changes. MedSAM2 achieves better performance across the three tasks, with the highest DSC scores of $96.13\%$ (95.09-97.15%), $93.10\%$ (91.07-94.11%), $95.79\%$ (94.38-96.96%) and less spread DSC distributions for the left ventricle, left ventricle epicardium, and left atrium, respectively, indicating better robustness in segmenting dynamic structures.
|
| 66 |
+
|
| 67 |
+
The polyp test set contains an easy and a hard subset. On the easy polyp subset, SAM2.1 models achieve comparable results, with similar median DSC scores ranging from $92.11\%$ (75.74-96.47%) to $93.87\%$ (77.48-96.64%) across different model sizes. MedSAM2 obtains a similar median DSC score of $92.24\%$ (85.15-96.11%), but exhibits a much more compact distribution with a smaller interquartile range and fewer outliers. On the hard polyp subset, SAM2.1 models show a clear DSC score drop of $6.29\%$ to $10.33\%$ with wider variability and some outliers with low DSC scores. In contrast, MedSAM2 outperforms SAM2.1 with a noticeable gap and more consistent DSC scores of $92.22\%$ (83.37-95.88%).
|
| 68 |
+
|
| 69 |
+
Qualitative segmentation results (Fig. 2d) show that SAM2.1 models struggle to capture fine structural boundaries, especially in regions with diverse contrast or complex tissue transitions. For example, the contours of SAM2.1 align with the anatomical boundaries of the left ventricle and atrium for most frames, but the segmentation quality deteriorates remarkably for the left ventricle epicardium, where the contours exhibit irregular boundaries, fragmented edges, and deviations from the true anatomical shape. MedSAM2 appears to produce smoother and more accurate segmentation results with fewer misaligned contours. For the polyp segmentation, while all models successfully track the polyp, SAM2.1 exhibits over-segmentation by including surrounding tissues in some frames. This suggests that SAM2.1 models have difficulty maintaining spatial coherence for medical video segmentation. MedSAM2 provides a more refined and closely fitting contour, indicating its superior capability in distinguishing polyps from the background, particularly in challenging lighting and texture variations.
|
| 70 |
+
|
| 71 |
+
Altogether, SAM2.1 models perform well in simpler cases but exhibit higher variability and lower accuracy in difficult segmentation tasks. MedSAM2 consistently outperforms SAM2.1 across all tasks and produces more consistent and reliable segmentation results with reduced variability across different tasks, particularly in challenging cases, highlighting the importance of domain-specific fine-tuning for foundation models in medical image and video segmentation.
|
| 72 |
+
|
| 73 |
+
# MedSAM2 enables efficient 3D lesion annotation for large 3D CT and MRI datasets
|
| 74 |
+
|
| 75 |
+
Beyond evaluating the segmentation accuracy of MedSAM2, we assess its practical value in assisting annotations of large-scale 3D lesion datasets. Accurate and efficient lesion segmentation in 3D medical images represents one of the most critical tasks for quantitative assessment of disease progression, treatment planning, and response evaluation. However, the heterogeneity of lesions (such as size, shape, texture, and contrast) and the noise and artifacts inherent in medical images make manual segmentation a time-consuming and labor-intensive task.
|
| 76 |
+
|
| 77 |
+
To address this limitation, we developed a human-in-the-loop pipeline with MedSAM2 to assist in 3D lesion annotation (Fig. 3a). Human annotators first draw a 2D bounding box, specifying the lesion at the middle slice, where the lesion usually has the longest diameter. Lesion diameter is commonly used in RECIST (Response Evaluation Criteria in Solid Tumors) [41] to measure the lesion burden in cancer therapeutics. The 2D image and the lesion bounding box are fed into
|
| 78 |
+
|
| 79 |
+

|
| 80 |
+
a.
|
| 81 |
+
d.
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
b.
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
c.
|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
e.
|
| 93 |
+
h.
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
f.
|
| 97 |
+
i.
|
| 98 |
+
j.
|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
g.
|
| 102 |
+
|
| 103 |
+

|
| 104 |
+
Fig. 3. MedSAM2 for efficient lesion annotation in 3D CT and MRI scans. a, A human-in-the-loop pipeline for 3D lesion segmentation. b, Annotation time per CT lesion and c, the number of generated CT lesions during the iterative annotation process. d, Visualized segmentation examples of the liver lesion and femoral osteosarcoma in CT scans. e, Annotation time per liver MRI lesion and f, the number of generated MRI lesions during the iterative annotation process. g, Visualized segmentation examples of hepatocellular carcinoma and hepatic abscess in venous contrast-enhanced phase and T2-weighted MRI scans, respectively. f, Average annotation time (seconds) per frame and g, the number of annotated frames during the iterative annotation process. h, Visualized segmentation examples of the left ventricle (red), myocardium (green), left atrium (blue), right ventricle (yellow), and right atrium (cyan).
|
| 105 |
+
|
| 106 |
+

|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
|
| 110 |
+
MedSAM2 to generate a 2D segmentation mask followed by human revision to get the refined 2D mask. At this step, the human annotator also specifies the top and bottom slices of the lesion. Then, MedSAM2 is executed again to generate a complete 3D lesion segmentation mask by forward and backward propagating the refined mask to the top slice and bottom slice, respectively. Finally, the human annotator manually refines the 3D segmentation to obtain the accurate 3D lesion mask. When dozens of new annotations are completed, we fine-tune MedSAM2 six to fifteen epochs to get a new model with improved performance. This pipeline is iterated multiple times to generate large-scale annotations gradually.
|
| 111 |
+
|
| 112 |
+
We first applied the annotation pipeline to lesion segmentation in CT scans. DeepLesion [42], the largest lesion CT dataset, was used in this study, containing a wide range of lesion types (Methods). This dataset provided 2D bounding box annotations on the key slice where the lesion reaches its maximum 2D diameter. These bounding boxes followed the RECIST guideline, which defined the lesion size with long-axis and short-axis diameter markers on the key slice. Our annotation pipeline runs for three iterative rounds to refine segmentation accuracy and efficiency. Fig. 3b-c present the average annotation time per lesion and the increasing number of annotated lesions across these rounds. In the first round, we selected 500 lesions of various sizes and used the trained MedSAM2 model in the annotation pipeline. Compared to manual annotation, requiring an average of 525.9 seconds per lesion, the first round reduced the annotation time by $45\%$ , bringing it down to 289.2 seconds per lesion. Then we fine-tuned MedSAM2 by combining the annotated dataset and existing CT lesion cases to derive a CT lesion-specific segmentation model, which was used in the second-round annotation. Using this improved model, we annotated 1,500 additional cases, further reducing the average annotation time to 185.3 seconds per lesion. For the third round, we updated the model again and annotated 3,000 unlabeled cases, achieving a remarkable reduction in annotation time to 74.3 seconds per lesion. Fig. 3d shows the segmentation results of two large lesions on the liver and femur. Notably, the femoral osteosarcoma was not presented in the training set, but the model was still able to generate good results, highlighting the model's capacity to generalize to unseen lesion types.
|
| 113 |
+
|
| 114 |
+
In addition, we used the pipeline to annotate the largest multi-phase MRI liver lesion LLD-MMRI2023 dataset [43]. This dataset consists of seven liver lesion types across eight MRI phases and each lesion has pre-defined bounding box prompts (Methods). Manual annotation required an average of 520.3 seconds per lesion, making it a time-intensive process. We conducted a three-round iterative annotation process similar to the CT experiments, progressively refining the segmentation model with annotated data. As shown in Fig. 3e-f, in the first round, MedSAM2 substantially reduced the annotation time by $54\%$ to 240.5 seconds per lesion while successfully segmenting 498 lesions. To further enhance segmentation performance, we incorporated first-round annotations into the training set and fine-tuned MedSAM2, leading to a more efficient second-round annotation process that reduced the time to 150.7 seconds per lesion and expanded the dataset to additional 996 lesions. Building on this iterative improvement, we fine-tuned MedSAM2 once more for the third round to annotate the remaining 2,490 lesions, achieving an average annotation time of 65.2 seconds per lesion. Fig. 3g visualizes two segmentation examples of different lesion types: hepatocellular carcinoma in venous contrast-enhanced MRI and hepatic abscess in T2-weighted MRI, demonstrating that the annotation pipeline effectively handles diverse lesion appearances and generalizes to multi-phase MRI images with different characteristics. Across all rounds, this iterative process enabled the annotation of 3,984 liver lesions in approximately the time it would have taken to manually annotate only 500 cases.
|
| 115 |
+
|
| 116 |
+
# MedSAM2 enables high-throughput video annotation
|
| 117 |
+
|
| 118 |
+
Medical video annotation is a particularly resource-intensive and demanding task [44] because it requires frame-by-frame labeling of anatomical structures and pathological regions, making it much more complex than static 2D image segmentation. The dynamic nature of medical videos introduces additional challenges such as motion artifacts, varying illumination, and temporal consistency. Manual annotation in such cases is tedious and expensive, making it difficult to generate sufficient labeled data for deep learning model training or large-scale studies.
|
| 119 |
+
|
| 120 |
+
We adapted our annotation strategy for video data by leveraging MedSAM2's ability to process sequential frames with spatial and temporal coherence (Supplementary Fig.2). Unlike the 3D pipeline which uses mid-slice prompting, the video pipeline begins with users adding prompts to the segmentation targets on the first frame of the video. These prompts are then passed to the pre-trained MedSAM2 model to generate initial 2D segmentation masks for each target. The human annotators then review and refine these masks to ensure high quality, followed by feeding them back into MedSAM2 for propagation, where the model extends the refined segmentation across the remaining frames. After that, users further refine the video masks as needed, ensuring an accurate delineation of the anatomical structures throughout the sequence. The annotated dataset is then added to the training set, allowing further fine-tuning of MedSAM2 to improve its performance on future video annotation.
|
| 121 |
+
|
| 122 |
+
We studied the annotation pipeline for heart chamber annotation based on the right ventricular ejection (RVENet) dataset [45], [46], which contains apical four-chamber view cardiac ultrasound (Echocardiography) videos of 831 patients with varying image quality and heart conditions. Echocardiography is a widely used, non-invasive imaging modality for assessing cardiac function [1], [6], offering real-time visualization of heart chambers, valve motion, and blood flow. We applied a three-round annotation pipeline. Fig. 3h shows the annotation time per ultrasound (US) frame, demonstrating a substantial reduction across iterations. Manual annotation initially required 102.3 seconds per frame, whereas the first round of the pipeline reduced this time to 65.7 seconds, marking a $46\%$ decrease. With further refinements in the second round, annotation time dropped to 23.1 seconds, and by the third round, it reached 8.4 seconds per frame, achieving a $92\%$ reduction compared to manual annotation.
|
| 123 |
+
|
| 124 |
+
Fig. 3i highlights the expanding dataset size as the annotation process scales up. The first round processed 44,165 frames across 300 videos. In the second round, with the improved model, the dataset increased to 72,794 frames from 500 videos. Finally, in the third round, the pipeline annotated 134,591 frames from 1,000 videos, demonstrating its scalability and robustness. This represents a throughput increase of over $12\mathrm{x}$ compared to manual annotation methods. Visualized segmentation examples are presented in Fig. 3j, showing that MedSAM2 accurately delineates both ventricles and atrium with consistent boundary tracking even during cardiac contraction phases.
|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
Fig. 4. MedSAM2 can be deployed on local desktops and remote clusters with commonly used platforms: 3D Slicer, terminal, JupyterLab, Gradio, and Google Colab.
|
| 128 |
+
|
| 129 |
+
# MedSAM2 supports community-wide deployment
|
| 130 |
+
|
| 131 |
+
To bridge the gap between advanced segmentation models and real-world applications, we have integrated MedSAM2 into several commonly used platforms across the medical imaging and data science communities, such as 3D Slicer [47], terminal, JupyterLab, Colab, and Gradio [48] (Fig. 4). This multi-platform integration enables users to flexibly deploy and interact with MedSAM2 on both local desktops and remote computing environments, adapting to diverse workflows and computational resources.
|
| 132 |
+
|
| 133 |
+
3D Slicer is one of the most widely used open-source medical image analysis platforms. We implemented MedSAM2 as a plug-and-play plugin (Methods), enabling users to seamlessly apply MedSAM2 for interactive lesion and organ segmentation, visualization, and analysis in a familiar environment (Supplementary Fig. 3). This integration facilitates fast annotation and refinement of segmentation results, making it a practical tool for clinicians and biomedical researchers working with diverse 3D medical imaging modalities.
|
| 134 |
+
|
| 135 |
+
For high-throughput processing, the command-line terminal interface provides an efficient and scriptable way to process large datasets in batch mode. JupyterLab and Colab cater to researchers and developers who prefer an interactive, code-centric environment for experimentation. These platforms support notebook-based workflows, making it easy to visualize intermediate outputs, adjust model parameters, and document the segmentation process. In particular, Colab enables cloud-based access to free GPUs, allowing users without local hardware to test and deploy MedSAM2 with minimal setup.
|
| 136 |
+
|
| 137 |
+
Additionally, we incorporated MedSAM2 into Gradio, a lightweight and web-based interface that allows users to interact with the model without requiring extensive technical expertise or complex installations. This web-based deployment is particularly beneficial for video segmentation, allowing users to upload and process video frames without requiring extensive computational resources. The user-friendly design enables quick previews and adjustments of segmentations, allowing human annotators to refine results as needed. Moreover, Gradio supports seamless deployment in both local and cloud-based environments, which is essential for multi-institutional collaborations and remote research settings.
|
| 138 |
+
|
| 139 |
+
# DISCUSSION
|
| 140 |
+
|
| 141 |
+
General segmentation foundation models, such as SAM2.1, are pre-trained on large-scale natural image and video datasets, providing strong general segmentation capabilities but typically lack the fine-grained domain knowledge required for precise medical image segmentation. Our results demonstrate that transfer learning is an effective strategy for adapting general-domain segmentation foundation models to medical imaging applications, enabling substantial improvements in segmentation accuracy and robustness across diverse medical imaging modalities.
|
| 142 |
+
|
| 143 |
+
Medical imaging datasets often suffer from limited annotated samples due to the high cost, time demand, and expertise required for manual annotation. Lesion segmentation is one of the most challenging tasks as they vary in size, shape, location, and contrast across different imaging modalities and patients [42]. The scarcity of labeled data can hinder the development and generalization of general lesion detection and quantification models, limiting their clinical applicability. Our iterative annotation pipeline with transfer learning reduced annotation time by up to $92\%$ while enabling dataset expansion by more than four times.
|
| 144 |
+
|
| 145 |
+
Our first user study demonstrates that fine-tuning MedSAM2 on domain-specific CT and MRI lesion datasets leads to progressive improvements in annotation efficiency and segmentation quality. The iterative annotation pipeline enhances the model accuracy by continuously learning from newly annotated data, reducing manual correction efforts and overall annotation time. This progressive adaptation is particularly valuable for heterogeneous datasets, such as those containing a mix of common and rare lesion types.
|
| 146 |
+
|
| 147 |
+
Video modalities, such as Echocardiography, present unique challenges compared to CT and MRI due to the dynamic nature of the heart. Unlike static medical images, ultrasound videos capture continuous motion with typical acquisition rates of 30-60 frames per second, making frame-by-frame manual annotation by experts highly impractical. This inherent complexity limits the availability of large, well-annotated segmentation datasets. Our video annotation study demonstrates that these challenges can be effectively mitigated using an iterative annotation pipeline combined with transfer learning, achieving substantial reductions in annotation time while progressively improving segmentation quality. The model's ability to generalize across different patient demographics and ultrasound systems further highlights its scalability. This could further facilitate the development of cardiac assessment tools that support early disease detection and quantitative cardiac research.
|
| 148 |
+
|
| 149 |
+
Our implementation of MedSAM2 as plug-ins and packages for standard medical imaging platforms reduces adoption barriers toward translating deep learning-based segmentation models into practical tools. By supporting deployment in 3D Slicer, terminal, JupyterLab, Colab, and Gradio, we provide both graphical interfaces and programmatic APIs for flexible access tailored to a wide range of users, from clinicians and radiologists to data scientists and algorithm developers, in both clinical research and translational settings.
|
| 150 |
+
|
| 151 |
+
This work also has several limitations. One key limitation of MedSAM2 is its reliance on bounding boxes as the main prompts. This design choice reduces object selection ambiguity and enables efficient mask propagation, allowing the model to process and track multiple masks simultaneously. However, this approach inherently limits its ability to segment highly complex anatomical structures, such as vessels with thin and branching structures. Since the model does not explicitly consider 3D spatial continuity, it may struggle to accurately capture highly elongated and curved 3D objects. One promising direction is the incorporation of a 4D image encoder (3D + time), which would allow the model to jointly process spatial and temporal information. Moreover, supporting other prompts, such as point [25], [26], text [49], [50], scribble and lasso [27] would enable more flexible corrections.
|
| 152 |
+
|
| 153 |
+
Another limitation stems from the fixed memory design, where the model maintains an eight-frame memory bank for all segmentation tasks. While this memory size is sufficient for the majority of cases with moderate object motion, it may lead to inferior tracking performance when dealing with rapid or large target movements. For example, in colonoscopy videos, the camera continuously moves through the gastrointestinal tract, and polyps may appear, disappear, or change shape as the viewpoint shifts. Tracking failures may occur when the polyp moves out of the current memory range and then reappears in later frames. Future work will focus on implementing an adaptive memory system to replace the fixed memory bank to allocate longer memory retention for rapidly moving or intermittently visible targets.
|
| 154 |
+
|
| 155 |
+
In addition, MedSAM2 is built on the SAM2.1 tiny model with reduced input image size to optimize efficiency, but the inference process still requires GPU computation, limiting its applicability in resource-constrained environments, such as edge devices, point-of-care ultrasound machines, or low-power medical imaging workstations. Future optimizations, such as lightweight image encoder, model compression, quantization, or distillation techniques, will be necessary to enable efficient CPU-based inference, making MedSAM2 more practical in real-time and low-resource medical settings.
|
| 156 |
+
|
| 157 |
+
In conclusion, this work presents a foundation model for 3D medical image and video segmentation. We also provide, to the best of our knowledge, the most extensive user study to annotate large-scale medical datasets. MedSAM2 not only achieves better performance across various organs and lesions compared to existing SAM variants, but also substantially reduces annotation costs for creating large-scale segmentation datasets. As annotation processes become more efficient, the potential for scaling up large, high-quality labeled datasets increases, which in turn benefits future diagnostic model development and clinical deployment. Our open-source implementations across multiple platforms will facilitate adoption and further community-driven improvements to medical image and video segmentation tools.
|
| 158 |
+
|
| 159 |
+
# METHODS
|
| 160 |
+
|
| 161 |
+
# Dataset curation and pre-processing
|
| 162 |
+
|
| 163 |
+
All training images and videos were curated from publicly available datasets with license permission for research purposes (Supplementary Table 1). The 3D test images were based on the recent 3D multi-phase liver tumor CT dataset [51] and the CVPR 2024 MedSAM on Laptop testing set [37], including 20, 7, 7, 5, and 1 tasks for CT organs, CT lesions, MRI organs, MRI lesions, and PET lesions, respectively. The pre-processing followed common practice [9], [19], [50]. Specifically, CT image intensities were adjusted to the proper window width and level (brain: 80/40, abdomen: 400/40, bone: 1800/400, lung: 1500/-600, mediastinum: 400/40) followed by rescaling to [0, 255]. For the remaining 3D images (MRI and PET), we applied an intensity cut-off with a lower-bound and upper-bound of $0.5\%$ and $99.5\%$ percentile of foreground intensity and then rescaled the intensity to [0, 255]. No intensity normalization was applied for videos.
|
| 164 |
+
|
| 165 |
+
# Lesion CT dataset and annotation pipeline
|
| 166 |
+
|
| 167 |
+
DeepLesion dataset [42] contains 32,735 diverse lesions in 32,120 CT slices from 10,594 studies of 4,427 unique patients. Each lesion has a bounding box annotation on the key slice, which is derived from the longest diameter and longest perpendicular diameter. We prioritized lesions with a minimal diameter of $25mm$ because larger lesions are more time-consuming during manual annotation. A senior radiologist with more than 10 years of experience manually annotated five cases to get the manual annotation time. In the human-in-the-loop experiment, we first generated the 2D segmentation mask on the key slice by MedSAM2 and then two radiology students manually revised the mask and specified the top slice and bottom slice of the lesion. To improve the efficiency, we concatenated eight preprocessed lesion images along the axial plane as one 3D volume. In this way, human annotators can open eight lesion images at once for manual revision and reduce the time costs to adjust the window level and width for each lesion. All lesion images and masks were resampled to $512 \times 512$ on the axial plane in the concatenation with third-order spline interpolation and nearest-neighbor interpolation, respectively. Images with out-of-the-plane spacing less than $3mm$ were resampled to $3mm$ . After manual revisions, we separated the merged eight-lesion scan into single images and resampled them to the original shape. We excluded images without measurable lesions or an out-of-plane spacing of more than $5mm$ . Finally, all annotations were checked and revised by the senior radiologist.
|
| 168 |
+
|
| 169 |
+
# Liver lesion MRI dataset and annotation pipeline
|
| 170 |
+
|
| 171 |
+
LLD-MMRI dataset [43] contains diverse liver lesions from 498 unique patients, including hepatocellular carcinoma, intrahepatic cholangiocarcinoma, liver metastases (HM), hepatic cysts (HC), hepatic hemangioma, focal nodular hyperplasia, and hepatic abscess. Each lesion has eight MRI scans: non-contrast, arterial, venous, delay, T2-weighted imaging, diffusion-weighted imaging, T1 in-phase, and T1 out-of-phase, resulting in 3984 cases in total. Each liver lesion has both 3D and slice-wise 2D bounding boxes. We ran MedSAM2 with the two types of bounding boxes separately and got two groups of segmentation results. For the 3D bounding box prompt, we first generated a 2D segmentation mask on the median slice followed by propagating the mask to the remaining slices until it reached the top and bottom slices. For the 2D bounding box prompt, we ran MedSAM2 for each slice with the corresponding box prompts. After that, we computed the DSC score between the two groups of segmentation results. We hypothesized that hard cases have larger disagreements between the two segmentation masks. For each patient, we selected the case with the lowest DSC score among the eight MRI scans as the first-round revision candidates, aiming to achieve a trade-off between data diversity and difficulty. The same selection criteria were also used in the second-round iteration. A senior radiologist manually annotated five cases to get the manual annotation time. Two radiology students participated in the manual revision process. Different from the CT lesion annotation, slice-wise 2D bounding box-based segmentation results were used in the revision because we found the segmentation accuracy was better than the 3D bounding box-based results. During the revision process, we resampled the images to $352 \times 352$ and merged five preprocessed lesion images as one volume for better efficiency. Finally, all annotations were checked and revised by the senior radiologist.
|
| 172 |
+
|
| 173 |
+
# Cardiac ultrasound (echocardiography) video dataset and annotation pipeline
|
| 174 |
+
|
| 175 |
+
RVENet dataset [45], [46] consists of 3583 echocardiography videos from 831 unique patients. The same annotation protocol in CAMUS dataset [38] was followed to delineate the left ventricle, myocardium, and left atrium. Since the videos were acquired in the apical four-chamber view, the right ventricle and atrium were also annotated to provide a more comprehensive cardiac analysis. Videos with low image quality or incomplete ventricles and atrium were excluded. The raw videos have a high resolution of $1016 \times 708$ and $800 \times 600$ . We downsampled the videos by a factor of two to reduce the annotation workload while essential structure details were preserved to differentiate the heart chambers. We first annotated the first frame of 200 videos from different patients with the bounding box or point prompts followed by manual refinement by three radiology students. The corrected first-frame mask was then propagated across subsequent frames using MedSAM2. To enhance segmentation accuracy, human annotators manually refined three to ten frames at approximately uniform intervals before inferencing MedSAM2 to update the segmentation results. Finally, all frames underwent manual adjustments where necessary, and the annotations were rigorously verified by the senior radiologist before being used to fine-tune MedSAM2 for the next iteration. To compare with manual annotation efficiency, a senior radiologist annotated 10 frames as a reference for manual annotation time cost.
|
| 176 |
+
|
| 177 |
+
# Network architecture
|
| 178 |
+
|
| 179 |
+
MedSAM2 was built upon SAM2 [28] with four main components: an image encoder, a prompt encoder, a memory attention module, and a mask decoder. First, we modified the image encoder by downsizing the input image size from $3 \times 1024 \times 1024$ to $3 \times 512 \times 512$ , which not only fitted better for typical medical image size but also reduced computational burden. The image encoder employs a hierarchical vision transformer (Hiera) [33] with a four-stage architecture (layers=\{1,2,7,2\}). We incorporated global attention blocks at the 5th, 7th and 9th layers to capture long-range dependencies critical for medical image analysis. A feature pyramid network (FPN) [52] neck extracts multi-scale features from the backbone, enabling detailed segmentation at various resolutions. Second, the memory attention module contains 4 transformer layers with both self-attention and cross-attention mechanisms. Each layer employs Rotary Position Embedding (RoPE) [53] with 2D spatial encoding (feature size $32 \times 32$ ) to maintain spatial awareness across slices or frames. This module conditions the current frame features on a memory bank storing information from previously processed frames, effectively exploiting the spatial continuity in volumetric data and temporal coherence in videos. Third, the prompt encoder transforms coordinates into embeddings that guide the segmentation process, allowing clinicians to specify regions of interest efficiently. Finally, the mask decoder integrates features from multiple scales of the image encoder through skip connections and produces segmentation masks at $128 \times 128$ resolution, which are then upsampled to the original $512 \times 512$ input size using bilinear interpolation.
|
| 180 |
+
|
| 181 |
+
# Training protocol
|
| 182 |
+
|
| 183 |
+
The model was initialized from the pre-trained SAM2.1-Tiny model checkpoint. During training, we used a full model fine-tuning strategy with two different learning rates: a lower learning rate $(3.0 \times 10^{-5})$ for the image encoder (28M parameters) to preserve learned features, and a higher rate $(5.0 \times 10^{-5})$ for other components (10.9M parameters) to adapt to the characteristics of the medical domains. The training utilized a combination of 3D images and videos with a batch size of eight per GPU, where each training sample consisted of eight consecutive slices or video frames. In the human-in-the-loop annotation study, we halved the learning rate and fine-tuned the trained MedSAM2 model 6 and 15 epochs in the second and third round iterations, respectively.
|
| 184 |
+
|
| 185 |
+
The data augmentations included random horizontal flipping, affine transformations, color jittering, and random grayscale conversion. For videos, we also augmented the frame sample rate by a factor of 2 and 4. Since the training set was imbalanced between different modalities, we increased the sampling frequency of MRI, PET, and video data by a factor of 3, 40, and 40, respectively. The bounding box prompts were simulated from expert annotations with random perturbations of 0-10 pixels. The loss function combined focal loss and dice loss for mask prediction with weights of 20:1. We used the AdamW optimizer [54] with $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ , and weight decay of 0.01. The model was trained for 70 epochs on three compute nodes, each equipped with four H100 GPUs, with a total training time of four days. External validation was performed on held-out datasets to assess the model's generalization capability across different tasks and modalities.
|
| 186 |
+
|
| 187 |
+
# 3D Slicer Integration
|
| 188 |
+
|
| 189 |
+
We implemented MedSAM2 as a plugin (extension) in 3D Slicer to reuse the built-in modules for essential operations, such as loading diverse medical imaging formats (e.g., DICOM, NiFiTI), drawing prompts, refining masks, and visualizing both 2D slices and 3D segmentation results. The plugin is built on a client-server architecture, offering users the flexibility to perform inference either locally on personal machines or remotely on high-performance computing clusters. The interface contains three clear sections:
|
| 190 |
+
|
| 191 |
+
- Preprocessing panel: users can select predefined pre-processing options (e.g., CT, MRI) to normalize the input image intensity before segmentation.
|
| 192 |
+
- Region-Of-Interest (ROI) selection: users can define the ROI directly by choosing start and end slices and draw bounding boxes prompts on the key slice.
|
| 193 |
+
- Segmentation controls: users can choose the model variant and initiate segmentation for the middle slice and full volume. Moreover, users can load their own customized models for specific imaging modalities or segmentation targets.
|
| 194 |
+
|
| 195 |
+
For the server component, we implemented a Flask API server to provide the necessary arguments and inputs to the local API offered by MedSAM2. The server also features a temporary most recently used (MRU)-style cache to facilitate refinement of the most recent segmentation.
|
| 196 |
+
|
| 197 |
+
# Evaluation metrics and platform
|
| 198 |
+
|
| 199 |
+
We followed the recommendations in Metrics Reloaded [55] to evaluate the segmentation accuracy. Specifically, we used Dice Similarity Coefficient (DSC) and Normalized Surface Distance (NSD) with a boundary tolerance of $2mm$ to quantitatively evaluate the region overlap and boundary similarity, respectively. For CT, MRI, and PET images, we compute the metrics in 3D while for video datasets, we first compute the frame-wise metric scores followed by averaging them to obtain the video-level metric scores. Wilcoxon signed-rank test was used for statistical significance analysis. Results were considered statistically significant if the $p$ -value was less than 0.05.
|
| 200 |
+
|
| 201 |
+
# Data availability
|
| 202 |
+
|
| 203 |
+
All data used in the study are from public datasets, and detailed references are provided in Supplementary Table 1. We also create a dedicated website, accessible at https://medsam-datasetlist.github.io, to provide a comprehensive and continuously updated repository of medical image segmentation datasets. This resource is intended for long-term maintenance and accessibility to the research community.
|
| 204 |
+
|
| 205 |
+
# Code availability
|
| 206 |
+
|
| 207 |
+
The code, model weights, and annotated datasets are publicly available at https://github.com/bowang-lab/MedSAM2. 3D slicer plugin can be accessed at https://github.com/bowang-lab/MedSAMSlicer.
|
| 208 |
+
|
| 209 |
+
# REFERENCES
|
| 210 |
+
|
| 211 |
+
[1] D. Ouyang, B. He, A. Ghorbani, N. Yuan, J. Ebinger, C. P. Langlotz, P. A. Heidenreich, R. A. Harrington, D. H. Liang, E. A. Ashley, and J. Y. Zou, "Video-based ai for beat-to-beat assessment of cardiac function," Nature, vol. 580, no. 7802, pp. 252-256, 2020.
|
| 212 |
+
[2] Y.-R. Wang, K. Yang, Y. Wen, P. Wang, Y. Hu, Y. Lai, Y. Wang, K. Zhao, S. Tang, A. Zhang, H. Zhan, M. Lu, X. Chen, S. Yang, Z. Dong, Y. Wang, H. Liu, L. Zhao, L. Huang, Y. Li, L. Wu, Z. Chen, Y. Luo, D. Liu, P. Zhao, K. Lin, J. C. Wu, and S. Zhao, "Screening and diagnosis of cardiovascular disease using artificial intelligence-enabled cardiac magnetic resonance imaging," Nature Medicine, vol. 30, no. 5, p. 1471-1480, 2024.
|
| 213 |
+
[3] K. Cao, Y. Xia, J. Yao, X. Han, L. Lambert, T. Zhang, W. Tang, G. Jin, H. Jiang, X. Fang et al., "Large-scale pancreatic cancer detection via non-contrast ct and deep learning," Nature medicine, vol. 29, no. 12, pp. 3033-3043, 2023.
|
| 214 |
+
[4] L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy, and A. L. Yuille, "Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs," IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 40, no. 4, pp. 834-848, 2018.
|
| 215 |
+
[5] L.-C. Chen, Y. Zhu, G. Papandreou, F. Schroff, and H. Adam, "Encoder-decoder with atrous separable convolution for semantic image segmentation," in Proceedings of the European Conference on Computer Vision, 2018, pp. 801-818.
|
| 216 |
+
[6] B. He, A. C. Kwan, J. H. Cho, N. Yuan, C. Pollick, T. Shiota, J. Ebinger, N. A. Bello, J. Wei, K. Josan, G. Duffy, M. Jujavarapu, R. Siegel, S. Cheng, J. Y. Zou, and D. Ouyang, "Blinded, randomized trial of sonographer versus AI cardiac function assessment," Nature, vol. 616, no. 7957, pp. 520-524, 2023.
|
| 217 |
+
[7] O. Ronneberger, P. Fischer, and T. Brox, "U-net: Convolutional networks for biomedical image segmentation," in Medical Image Computing and Computer-Assisted Intervention, 2015, pp. 234-241.
|
| 218 |
+
[8] T. Falk, D. Mai, R. Bensch, O. undefiniedicek, A. Abdulkadir, Y. Marrakchi, A. Böhm, J. Deubner, Z. Jäckel, K. Seiwald, A. Dovzhenko, O. Tietz, C. Dal Bosco, S. Walsh, D. Saltukoglu, T. L. Tay, M. Prinz, K. Palme, M. Simons, I. Diester, T. Brox, and O. Ronneberger, “U-net: deep learning for cell counting, detection, and morphometry,” Nature Methods, vol. 16, no. 1, p. 67-70, 2018.
|
| 219 |
+
[9] F. Isensee, P. F. Jaeger, S. A. Kohl, J. Petersen, and K. H. Maier-Hein, "nnu-net: a self-configuring method for deep learning-based biomedical image segmentation," Nature Methods, vol. 18, no. 2, pp. 203-211, 2021.
|
| 220 |
+
[10] J. Ma, Y. Zhang, S. Gu, C. Ge, S. Mae, A. Young, C. Zhu, X. Yang, K. Meng, Z. Huang, F. Zhang, Y. Pan, S. Huang, J. Wang, M. Sun, R. Zhang, D. Jia, J. W. Choi, N. Alves, B. de Wilde, G. Koehler, H. Lai, E. Wang, M. Wiesenfarth, Q. Zhu, G. Dong, J. He, J. He, H. Yang, B. Huang, M. Lyu, Y. Ma, H. Guo, W. Xu, K. Maier-Hein, Y. Wu, and B. Wang, "Unleashing the strengths of unlabelled data in deep learning-assisted pan-cancer abdominal organ quantification: the flare22 challenge," The Lancet Digital Health, vol. 6, no. 11, p. e815-e826, 2024.
|
| 221 |
+
[11] S. Gatidis, M. Früh, M. P. Fabritius, S. Gu, K. Nikolaou, C. L. Fougère, J. Ye, J. He, Y. Peng, L. Bi, J. Ma, B. Wang, J. Zhang, Y. Huang, L. Heiliger, Z. Marinov, R. Stiefelhagen, J. Egger, J. Kleesiek, L. Sibille, L. Xiang, S. Bendazzoli, M. Astaraki, M. Ingrisch, C. C. Cyran, and T. Küstner, "Results from the autopet challenge on fully automated lesion segmentation in oncologic pet/ct imaging," Nature Machine Intelligence, vol. 6, no. 11, p. 1396-1405, 2024.
|
| 222 |
+
[12] A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly et al., "An image is worth 16x16 words: Transformers for image recognition at scale," in International Conference on Learning Representations, 2020.
|
| 223 |
+
[13] A. Kirillov, E. Mintun, N. Ravi, H. Mao, C. Rolland, L. Gustafson, T. Xiao, S. Whitehead, A. C. Berg, W.-Y. Lo, P. Dollar, and R. Girshick, "Segment anything," in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 4015-4026.
|
| 224 |
+
[14] M. Moor, O. Banerjee, Z. S. H. Abad, H. M. Krumholz, J. Leskovec, E. J. Topol, and P. Rajpurkar, "Foundation models for generalist medical artificial intelligence," Nature, vol. 616, no. 7956, pp. 259-265, 2023.
|
| 225 |
+
[15] Y. He, F. Huang, X. Jiang, Y. Nie, M. Wang, J. Wang, and H. Chen, "Foundation model for advancing healthcare: Challenges, opportunities and future directions," IEEE Reviews in Biomedical Engineering, pp. 1-20, 2024.
|
| 226 |
+
[16] W. Khan, S. Leem, K. B. See, J. K. Wong, S. Zhang, and R. Fang, "A comprehensive survey of foundation models in medicine," IEEE Reviews in Biomedical Engineering, 2025.
|
| 227 |
+
[17] M. A. Mazurowski, H. Dong, H. Gu, J. Yang, N. Konz, and Y. Zhang, "Segment anything model for medical image analysis: an experimental study," Medical Image Analysis, vol. 89, p. 102918, 2023.
|
| 228 |
+
[18] Y. Huang, X. Yang, L. Liu, H. Zhou, A. Chang, X. Zhou, R. Chen, J. Yu, J. Chen, C. Chen et al., "Segment anything model for medical images?" Medical Image Analysis, vol. 92, p. 103061, 2024.
|
| 229 |
+
[19] J. Ma, Y. He, F. Li, L. Han, C. You, and B. Wang, "Segment anything in medical images," Nature Communications, vol. 15, p. 654, 2024.
|
| 230 |
+
[20] J. Cheng, J. Ye, Z. Deng, J. Chen, T. Li, H. Wang, Y. Su, Z. Huang, J. Chen, L. Jiang, H. Sun, J. He, S. Zhang, M. Zhu, and Y. Qiao, "Sam-med2d," arXiv preprint arXiv:2308.16184, 2023.
|
| 231 |
+
[21] H. Wang, S. Guo, J. Ye, Z. Deng, J. Cheng, T. Li, J. Chen, Y. Su, Z. Huang, Y. Shen, B. Fu, S. Zhang, J. He, and Y. Qiao, "Sam-med3d: Towards general-purpose segmentation models for volumetric medical images," arXiv preprint arXiv:2310.15161, 2024.
|
| 232 |
+
[22] C. Chen, J. Miao, D. Wu, A. Zhong, Z. Yan, S. Kim, J. Hu, Z. Liu, L. Sun, X. Li, T. Liu, P.-A. Heng, and Q. Li, "Ma-sam: Modality-agnostic sam adaptation for 3d medical image segmentation," Medical Image Analysis, vol. 98, p. 103310, 2024.
|
| 233 |
+
[23] S. Gong, Y. Zhong, W. Ma, J. Li, Z. Wang, J. Zhang, P.-A. Heng, and Q. Dou, "3dsam-adapter: Holistic adaptation of sam from 2d to 3d for promptable tumor segmentation," Medical Image Analysis, vol. 98, p. 103324, 2024.
|
| 234 |
+
[24] J. Wu, Z. Wang, M. Hong, W. Ji, H. Fu, Y. Xu, M. Xu, and Y. Jin, "Medical sam adapter: Adapting segment anything model for medical image segmentation," Medical Image Analysis, p. 103547, 2025.
|
| 235 |
+
[25] Y. Du, F. Bai, T. Huang, and B. Zhao, "Segvol: Universal and interactive volumetric medical image segmentation," in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 110746-110783.
|
| 236 |
+
[26] Y. He, P. Guo, Y. Tang, A. Myronenko, V. Nath, Z. Xu, D. Yang, C. Zhao, B. Simon, M. Belue, S. Harmon, B. Turkbey, D. Xu, and W. Li, "VISTA3D: A unified segmentation foundation model for 3D medical imaging," in Proceedings of the IEEE/CVF International Conference on Computer Vision and Pattern Recognition, 2024.
|
| 237 |
+
[27] I. Fabian, R. Maximilian, K. Lars, D. Stefan, R. Ashis, S. Florian, H. Benjamin, W. Tassilo, L. Moritz, U. Constantin, D. Jonathan, F. Ralf, and M.-H. Klaus, "nninteractive: Redefining 3D promptable segmentation," arXiv preprint arXiv:2503.08373, 2025.
|
| 238 |
+
|
| 239 |
+
[28] N. Ravi, V. Gabeur, Y.-T. Hu, R. Hu, C. Ryali, T. Ma, H. Khedr, R. Rädle, C. Rolland, L. Gustafson, E. Mintun, J. Pan, K. V. Alwala, N. Carion, C.-Y. Wu, R. Girshick, P. Dollar, and C. Feichtenhofer, "Sam 2: Segment anything in images and videos," in International Conference on Learning Representations, 2025.
|
| 240 |
+
[29] Y. Zhang and Z. Shen, "Unleashing the potential of sam2 for biomedical images and videos: A survey," arXiv preprint arXiv:2408.12889, 2024.
|
| 241 |
+
[30] J. Zhu, A. Hamdi, Y. Qi, Y. Jin, and J. Wu, "Medical sam 2: Segment medical images as video via segment anything model 2," arXiv preprint arXiv:2408.00874, 2024.
|
| 242 |
+
[31] Z. Yan, W. Sun, R. Zhou, Z. Yuan, K. Zhang, Y. Li, T. Liu, Q. Li, X. Li, L. He, and L. Sun, "Biomedical sam 2: Segment anything in biomedical images and videos," 2024.
|
| 243 |
+
[32] J. Ma, S. Kim, F. Li, M. Baharoon, R. Asakereh, H. Lyu, and B. Wang, "Segment anything in medical images and videos: Benchmark and deployment," arXiv preprint arXiv:2408.03322, 2024.
|
| 244 |
+
[33] C. Ryali, Y.-T. Hu, D. Bolya, C. Wei, H. Fan, P.-Y. Huang, V. Aggarwal, A. Chowdhury, O. Poursaeed, J. Hoffman et al., "Hiera: A hierarchical vision transformer without the bells-and-whistles," in International conference on machine learning. PMLR, 2023, pp. 29441-29454.
|
| 245 |
+
[34] A. Archit, L. Freckmann, S. Nair, N. Khalid, P. Hilt, V. Rajashekar, M. Freitag, C. Teuber, G. Buckley, S. von Haaren, S. Gupta, A. Dengel, S. Ahmed, and C. Pape, "Segment anything for microscopy," Nature Methods, 2025.
|
| 246 |
+
[35] L. Ke, M. Ye, M. Danelljan, Y. liu, Y.-W. Tai, C.-K. Tang, and F. Yu, "Segment anything in high quality," in Advances in Neural Information Processing Systems, vol. 36, 2023, pp. 29914-29934.
|
| 247 |
+
[36] A. Pfefferle, L. Purucker, and F. Hutter, "Daft: Data-aware fine-tuning of foundation models for efficient and effective medical image segmentation," in CVPR 2024: Segment Anything In Medical Images On Laptop, 2024.
|
| 248 |
+
[37] J. Ma, F. Li, S. Kim, R. Asakereh, B.-H. Le, D.-K. Nguyen-Vu, A. Pfefferle, M. Wei, R. Gao, D. Lyu, S. Yang, L. Purucker, Z. Marinov, M. Staring, H. Lu, T. T. Dao, X. Ye, Z. Li, G. Brugnara, P. Vollmuth, M. Foltyn-Dumitru, J. Cho, M. A. Mahmutoglu, M. Bendszus, I. Pflüger, A. Rastogi, D. Ni, X. Yang, G.-Q. Zhou, K. Wang, N. Heller, N. Papanikolopoulos, C. Weight, Y. Tong, J. K. Udupa, C. J. Patrick, Y. Wang, Y. Zhang, F. Contijoch, E. McVeigh, X. Ye, S. He, R. Haase, T. Pinetz, A. Radbruch, I. Krause, E. Kobler, J. He, Y. Tang, H. Yang, Y. Huo, G. Luo, K. Kushibar, J. Amankulov, D. Toleshbayev, A. Mukhamejan, J. Egger, A. Pepe, C. Gsaxner, G. Luijten, S. Fujita, T. Kikuchi, B. Wiestler, J. S. Kirschke, E. de la Rosa, F. Bolelli, L. Lumetti, C. Grana, K. Xie, G. Wu, B. Puladi, C. Martin-Isla, K. Lekadir, V. M. Campello, W. Shao, W. Brisbane, H. Jiang, H. Wei, W. Yuan, S. Li, Y. Zhou, and B. Wang, "Efficient medsams: Segment anything in medical images on laptop," arXiv:2412.16085, 2024.
|
| 249 |
+
[38] S. Leclerc, E. Smistad, J. Pedrosa, A. Østvik, F. Cervenansky, F. Espinosa, T. Espeland, E. A. R. Berg, P-M. Jodoin, T. Grenier et al., "Deep learning for segmentation using an open large-scale dataset in 2d echocardiography," IEEE Transactions on Medical Imaging, vol. 38, no. 9, pp. 2198-2210, 2019.
|
| 250 |
+
[39] M. Misawa, S.-e. Kudo, Y. Mori, K. Hotta, K. Ohtsuka, T. Matsuda, S. Saito, T. Kudo, T. Baba, F. Ishida, H. Itoh, M. Oda, and K. Mori, "Development of a computer-aided detection system for colonoscopy and a publicly accessible large colonoscopy video database (with video)," Gastrointestinal Endoscopy, vol. 93, no. 4, pp. 960-967.e3, 2021.
|
| 251 |
+
[40] G.-P. Ji, G. Xiao, Y.-C. Chou, D.-P. Fan, K. Zhao, G. Chen, and L. Van Gool, "Video polyp segmentation: A deep learning perspective," Machine Intelligence Research, vol. 19, no. 6, pp. 531-549, 2022.
|
| 252 |
+
[41] E. A. Eisenhauer, P. Therasse, J. Bogaerts, L. H. Schwartz, D. Sargent, R. Ford, J. Dancey, S. Arbuck, S. Gwyther, M. Mooney et al., "New response evaluation criteria in solid tumours: revised recist guideline (version 1.1)," European Journal of Cancer, vol. 45, no. 2, pp. 228-247, 2009.
|
| 253 |
+
[42] K. Yan, X. Wang, L. Lu, and R. M. Summers, "Deeplion: automated mining of large-scale lesion annotations and universal lesion detection with deep learning," Journal of Medical Imaging, vol. 5, no. 3, pp. 036-036-036-036, 2018.
|
| 254 |
+
[43] M. Lou, H. Ying, X. Liu, H.-Y. Zhou, Y. Zhang, and Y. Yu, "Sdr-former: A siamese dual-resolution transformer for liver lesion classification using 3d multi-phase imaging," Neural Networks, p. 107228, 2025.
|
| 255 |
+
[44] C. Varghese, E. M. Harrison, G. O'Grady, and E. J. Topol, "Artificial intelligence in surgery," Nature Medicine, vol. 30, no. 5, pp. 1257-1268, 2024.
|
| 256 |
+
[45] B. Magyar, M. Tokodi, A. Soos, M. Tolvaj, B. K. Lakatos, A. Fabian, E. Surkova, B. Merkely, A. Kovacs, and A. Horvath, "Rvenet: A large echocardiographic dataset for the deep learning-based assessment of right ventricular function," in Computer Vision - ECCV 2022 Workshops. Springer Nature Switzerland, 2023, p. 569-583.
|
| 257 |
+
[46] M. Tokodi, B. Magyar, A. Soos, M. Takeuchi, M. Tolvaj, B. K. Lakatos, T. Kitano, Y. Nabeshima, A. Fábian, M. B. Szigeti et al., "Deep learning-based prediction of right ventricular ejection fraction using 2d echocardiograms," Cardiovascular Imaging, vol. 16, no. 8, pp. 1005-1018, 2023.
|
| 258 |
+
[47] R. Kikinis, S. D. Pieper, and K. G. Vosburgh, "3d slicer: a platform for subject-specific image analysis, visualization, and clinical support," in Intraoperative imaging and image-guided therapy. Springer, 2013, pp. 277-289.
|
| 259 |
+
[48] A. Abid, A. Abdalla, A. Abid, D. Khan, A. Alfozan, and J. Zou, "Gradio: Hassle-free sharing and testing of ml models in the wild," arXiv preprint arXiv:1906.02569, 2019.
|
| 260 |
+
[49] Z. Zhao, Y. Zhang, C. Wu, X. Zhang, Y. Zhang, Y. Wang, and W. Xie, "One model to rule them all: Towards universal segmentation for medical images with text prompts," arXiv preprint arXiv:2312.17183, 2023.
|
| 261 |
+
[50] T. Zhao, Y. Gu, J. Yang, N. Usuyama, H. H. Lee, S. Kiblawi, T. Naumann, J. Gao, A. Crabtree, J. Abel, C. Moung-Wen, B. Piening, C. Bifulco, M. Wei, H. Poon, and S. Wang, "A foundation model for joint segmentation, detection and recognition of biomedical objects across nine modalities," Nature Methods, 2024.
|
| 262 |
+
[51] K. Bartnik, T. Bartczak, M. Krzyzinski, K. Korzeniowski, K. Lamparski, P. Wegrzyn, E. Lam, M. Bartkowiak, T. Wroblewski, K. Mech, M. Januszewicz, and P. Biecek, "Waw-tace: A hepatocellular carcinoma multiphase ct dataset with segmentations, radiomics features, and clinical data," Radiology: Artificial Intelligence, vol. 6, no. 6, p. e240296, 2024.
|
| 263 |
+
[52] T.-Y. Lin, P. Dólar, R. Girshick, K. He, B. Hariharan, and S. Belongie, “Feature pyramid networks for object detection,” in Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, 2017, pp. 2117-2125.
|
| 264 |
+
[53] J. Su, M. Ahmed, Y. Lu, S. Pan, W. Bo, and Y. Liu, "Rofomer: Enhanced transformer with rotary position embedding," Neurocomputing, vol. 568, p. 127063, 2024.
|
| 265 |
+
[54] I. Loshchilov and F. Hutter, "Decoupled weight decay regularization," in International Conference on Learning Representations, 2019.
|
| 266 |
+
[55] L. Maier-Hein, A. Reinke, P. Godau, M. D. Tizabi, F. Buettner, E. Christodoulou, B. Glocker, F. Isensee, J. Kleesiek, M. Kozubek, M. Reyes, M. A. Riegler, M. Wiesenfarth, A. E. Kavur, C. H. Sudre, M. Baumgartner, M. Eisenmann, D. Heckmann-Nötzel, T. Rädsch, L. Acion, M. Antonelli, T. Arbel, S. Bakas, A. Benis, M. B. Blaschko, M. J. Cardoso, V. Cheplygina, B. A. Cimini, G. S. Collins, K. Farahani, L. Ferrer, A. Galdran, B. van Ginneken, R. Haase, D. A. Hashimoto, M. M. Hoffman, M. Huisman, P. Jannin, C. E. Kahn, D. Kainmueller, B. Kainz, A. Karargyris, A. Karthikesalingam, F. Kofler, A. Kopp-Schneider, A. Kreshuk, T. Kurc, B. A. Landman, G. Litjens, A. Madani, K. Maier-Hein, A. L. Martel, P. Mattson, E. Meijering, B. Menze, K. G. M. Moons, H. Müller, B. Nichyporuk, F. Nickel, J. Petersen, N. Rajpoot, N. Rieke, J. Saez-Rodriguez, C. I. Sánchez, S. Shetty, M. van Smeden, R. M. Summers, A. A. Taha, A. Tiulpin, S. A. Tsaftaris, B. Van Calster, G. Varoquaux, and P. F. Jäger, "Metrics reloaded: recommendations for image analysis validation," Nature Methods, vol. 21, no. 2, p. 195-212, 2024.
|
data/2025/2504_03xxx/2504.03600/images/1085ef736e68e6396ba4ffec8a770702736c2199e39003abff238711c34f1928.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/130fd5f966b0e7ba408da3d2cf52c7359fbbc84e45fd10032cd51ef78bcf3cf1.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/17fd29cbf2cd19784c0bb0d2cfb0e23354962e03a41bb725e1fbaa214aac2aef.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/3868cdfd0b9dda8f2a8d7a45edb94cedec57782bbe1280e2eb66b7773ca707b2.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/39f85da958d90f259c0b08e48d0cd2f6ee0c6fb10e5f02130280442abe1a26e7.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/4851f885c97704d72c4fa799ec785f4b12a8acc796bb89a31287d7a93e590b58.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/5adc6f2de8fb8211005aa566f50fbc3e799ffb2ddf53b6a0675bab8455dda641.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/60084a3f5a332127b3d5175f91f9e633214ac09405a0bf3c48f56eb35078d71d.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/732aab5a71796f9876fca6ce2f448fd5f7eb67d7afaa475bcf3cdec437b0d556.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/7fdcc8eed4e5e4497f7ac24395973bcf772b520fc3584b58a0307c6576742633.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/85593d1218f423c1e77a2d537f1efc37f4cbe8e0a39d6d6d7e504ef0e701d4f6.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/a6580f62096183a517a5efbbf8dc3cc33516b4fe224c9d73262984951b762cf9.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/b73121f12808a4f15320ee8ab6e137119314a0e4cbc6d98ff9a1dabf4554de6f.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/ba99d36fc4ebe552b5550e833041c34c6c9c450be92d14b1b940279625df93c6.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/c0d5c685a4c1e7652706f3329c06bdb7685a7b0dd30ef73d04cad8f009f5502b.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/images/ebbc9e0f16c0a44fad1a4bf9ae569948bc6d1ef7ebee52b2e64705398d7e9332.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03600/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_03xxx/2504.03601/868ff8de-112e-45e3-a5e7-d3a76d78b931_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_03xxx/2504.03601/868ff8de-112e-45e3-a5e7-d3a76d78b931_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_03xxx/2504.03601/868ff8de-112e-45e3-a5e7-d3a76d78b931_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:485460db987127e3532ba22c7c183402a0247269b07c4c4debf4fb667f429271
|
| 3 |
+
size 3027217
|
data/2025/2504_03xxx/2504.03601/full.md
ADDED
|
@@ -0,0 +1,497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# APIGen-MT: Agentic Pipeline for Multi-Turn Data Generation via Simulated Agent-Human Interplay
|
| 2 |
+
|
| 3 |
+
Akshara Prabhakar* Zuxin Liu* Ming Zhu† Jianguo Zhang† Tulika Awalgaonkar† Shiyu Wang Zhiwei Liu Haolin Chen Thai Hoang Juan Carlos Niebles Shelby Heinecke‡ Weiran Yao‡ Huan Wang‡ Silvio Savarese‡ Caiming Xiong‡
|
| 4 |
+
|
| 5 |
+
Salesforce AI Research
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Training effective AI agents for multi-turn interactions requires high-quality data that captures realistic human-agent dynamics, yet such data is scarce and expensive to collect manually. We introduce APIGen-MT, a two-phase framework that generates verifiable and diverse multi-turn agent data. In the first phase, our agentic pipeline produces detailed task blueprints with ground-truth actions, leveraging a committee of LLM reviewers and iterative feedback loops. These blueprints are then transformed into complete interaction trajectories through simulated human-agent interplay. We train a family of models—the xLAM-2-fc-r series with sizes ranging from 1B to 70B parameters. Our models outperform frontier models such as GPT-4o and Claude 3.5 on $\tau$ -bench and BFCL benchmarks, with the smaller models surpassing their larger counterparts, particularly in multi-turn settings, while maintaining superior consistency across multiple trials. Comprehensive experiments demonstrate that our verified blueprint-to-details approach yields high-quality training data, enabling the development of more reliable, efficient, and capable agents. We open-source 5K synthetic data trajectories and the trained xLAM-2-fc-r models to advance research in AI agents.
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
|
| 13 |
+

|
| 14 |
+
|
| 15 |
+
Model https://huggingface.co/Salesforce/xLAM-2
|
| 16 |
+
|
| 17 |
+
Dataset https://huggingface.co/Salesforce/APIGen-MT-5k
|
| 18 |
+
|
| 19 |
+
Website https://apigen-mt.github.io
|
| 20 |
+
|
| 21 |
+
# 1 Introduction
|
| 22 |
+
|
| 23 |
+
The growth of Large Language Model (LLM) agents has been accelerating at an unprecedented rate, driven by advancements in AI capabilities and increasing demand across various industries [21, 1, 10, 5, 30, 54, 23, 7, 25]. Their role has evolved beyond simple conversational chatbots to AI agents capable of executing real-world tasks, such as managing financial transactions, scheduling appointments, and handling customer service requests. These applications demand not only linguistic fluency but also precise execution, reliability, and adherence to domain-specific policies. Realistic enterprise use cases involve having an assistant (also referred to as agent in this document) that is capable of fluently conversing with humans of different personalities, incrementally understanding their intent, extracting the background details needed, accurately invoke APIs, and operate over a complex business logic structure.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: Comparative performance of larger xLAM-2-fc-r models (8B-70B, trained with APIGen-MT data) against state-of-the-art baselines on function-calling (BFCL v3 [45]) and agentic ( $\tau$ -bench [49]) capabilities.
|
| 27 |
+
|
| 28 |
+
Despite their potential, building robust and reliable AI agents presents significant challenges [49]. Recent benchmarks reveal that even advanced LLMs struggle with multi-turn interactions, particularly when required to perform complex function calls, track long-term dependencies, or request missing information [50, 28, 46, 45, 19]. Although framework design and prompt engineering have shown promise, the underlying model capabilities remain the primary bottleneck, largely due to two fundamental obstacles: (1) the scarcity of high-quality agent interaction data in public pretraining corpora, and (2) the prohibitive cost and time required to manually collect and label such data, especially for domain-specific applications requiring specialized knowledge.
|
| 29 |
+
|
| 30 |
+
Several approaches have attempted to address these challenges. APIGen [26] introduced techniques for generating single-turn function calling data, while [41] explored methods for knowledge distillation in agent training. However, these approaches primarily focus on single-turn interactions, failing to capture the complexity of real-world agent usage, where multiple turns are often required. Other efforts like [51, 50, 11], while incorporating multi-turn aspects, lack human-agent interplay—crucial for realistic data generation. The verification and synthesis of high-quality multi-turn trajectories containing both linguistic diversity and grounded actions remains largely unsolved, creating a significant barrier to advancing agent capabilities.
|
| 31 |
+
|
| 32 |
+
To address these limitations, we introduce APIGen-MT, an agentic data synthesis pipeline for generating high-quality multi-turn agent data. It operates in two main steps: first, a data agent generates a detailed and verified task "blueprint", and second, this blueprint guides the generation of realistic multi-turn interactions through simulated agent-human interplay (Subsection 4.2). The blueprint generation includes sampling relevant APIs, policies, domain data, and user personas to create grounded general tasks configurations, and using reverse task recombination (SubSubsection 4.1.3) to enhance complexity. These blueprints are validated through format/execution checks and an LLM committee review using a reflection-based mechanism [39]. Subsequently, the validated blueprint seeds a simulated interaction between a human LM and an agent (e.g., gpt-4o), producing a complete interaction trajectory with dialogue, actions, and environment feedback for training.
|
| 33 |
+
|
| 34 |
+
The main contributions of our work are summarized as follows:
|
| 35 |
+
|
| 36 |
+
- We propose APIGen-MT, an agentic data synthesis pipeline that leverages environment execution feedback and a review committee to ensure the high-quality of generated multi-turn agent data.
|
| 37 |
+
- We develop a two-phase framework that first creates detailed task blueprints with verifiable groundtruth actions, then transforms these blueprints into realistic multi-turn conversational agent trajectories with tool-usage through simulated human-agent interplay.
|
| 38 |
+
- We train a series of models across multiple architectures and scales (Llama 3.1/3.2 and Qwen 2.5 at 1B to 70B parameters), demonstrating superior performance on two popular agentic benchmarks: $\tau$ -bench and BFCL, surpassing many frontier models including gpt-4o (Figure 1).
|
| 39 |
+
- We open-source 5K high-quality synthetic data (APIGen-MT-5k) and trained models, i.e., the xLAM-2-fc-r series, to advance research in AI agent space.
|
| 40 |
+
|
| 41 |
+
# 2 Related Work
|
| 42 |
+
|
| 43 |
+
Tool-Use Agents. Tool-use capabilities enhance LLMs by enabling interaction with external tools, extending their reasoning and functionality [44, 33, 24]. Function-calling frameworks allow LLMs to parse queries, select tools, and interpret results, but often require predefined tools, limiting adaptability [26, 43]. Efforts were made to address this by creating reusable tools from scratch on the fly [9], built upon by ToolMaker [44] which leverages tools from existing code repositories. Others compose workflows or learn from demonstrations [33, 36]. Recently, several works have adopted specialized approaches for agent training—critique-informed planning [13], fine-tuning on selective steps [48], teasing apart reasoning from format following (Agent-FLAN) [12], and autonomously invoking tools without explicit post-training (ToRL) [22].
|
| 44 |
+
|
| 45 |
+
Interactive Conversational Benchmarks. Evaluating LLM agents in multi-turn settings requires specialized benchmarks. MultiChallenge [40] and ToolDial [38] assess agents on context maintenance and tool-augmented dialogue. InterCode [47] and CRMArena [19] evaluate iterative problem-solving and customer management. ToolSandbox [28] provides a stateful, interactive benchmark for tool use. User simulations have become essential in these benchmarks, offering systematic, realistic interactions [49, 28, 31]. Our work complements these efforts by generating synthetic multi-turn conversations to train and evaluate agents in such realistic settings.
|
| 46 |
+
|
| 47 |
+
Synthetic Data Generation. The scarcity of high-quality training data drives synthetic data generation. Multi-agent frameworks like MAG-V [37], AgentInstruct [29], MATRIX [42], and IntellAgent [20] create realistic datasets by simulating agent interactions. Other approaches utilize instruction composition [18, 11], intermediate graphs [6] and multi-turn planning to produce complex dialogues [55]. Related to our effort in generating multi-turn training data, BUTTON [11] generates synthetic compositional instruction tuning data by combining 2-3 atomic tasks and conducting trajectory collection via a multi-agent setup. However, this involves construction of APIs based on the task generated and lacks systematic quality control and filtering during task composition limiting data verification. MAGNET [50] proposed a graph-based method to generate function signature paths which are iteratively transformed to a sequence of queries and function calls.
|
| 48 |
+
|
| 49 |
+
While many of these prior approaches have been tested mainly on reasoning or single-turn interaction scenarios, our framework, APIGen-MT, advances this line of work, being applicable to any existing environment by generating high-quality multi-turn data for realistic agent-human interactions, focusing on reliable tool selection and parameter generation. By systematically preparing the context, we first generate tasks adhering to any domain constraints and the corresponding executable groundtruth function calls in an agentic fashion with iterative refinement via feedback loops. Further, the simulated agent-human interplay mechanism allows us to generate verifiable long interaction trajectories.
|
| 50 |
+
|
| 51 |
+
# 3 APIGen-MT Method for Synthesizing High-Quality Multi-Turn Data
|
| 52 |
+
|
| 53 |
+
In this section, we present APIGen-MT, an agentic pipeline for generating multi-turn data through simulated agent-human interplay. We first formalize the multi-turn interaction problem and then describe our two-phase framework for generating high-quality, verifiable multi-turn data.
|
| 54 |
+
|
| 55 |
+
# 3.1 Multi-Turn Interaction Problem Formulation
|
| 56 |
+
|
| 57 |
+
Multi-turn interactions between an AI assistant and a human user present unique challenges that go beyond single-turn exchanges. We formalize this interaction as a Partially Observable Markov Decision Process (POMDP) defined by the tuple $(\mathcal{U},\mathcal{S},\mathcal{A},\mathcal{O},\mathcal{T},\mathcal{R})$ , where $\mathcal{U}$ represents the instruction space containing possible user intents; $\mathcal{S}$ denotes the state space of the environment and conversation history; $\mathcal{A} = \{\text{tool\_call, response}\}$ is the action space available to the assistant; $\mathcal{O} = \mathcal{O}_E \cup \mathcal{O}_H$ is the observation space comprising observations from the environment $(\mathcal{O}_E)$ and response from the human $(\mathcal{O}_H)$ ; $\mathcal{T}: \mathcal{S} \times \mathcal{A} \to \mathcal{S} \times \mathcal{O}$ is the transition function; and $\mathcal{R}$ is the reward function evaluating interaction success. The AI assistant must engage in a multi-turn conversation with the human user to incrementally understand their intent $q \in \mathcal{U}$ and solve it through appropriate interactions with the environment while adhering to any domain rules. At turn $t$ , the assistant predicts an action $a^t \in \mathcal{A}$ based on the interaction history and understanding of $q$ thus far. When $a^t$ is a tool_call compliant with the rules, it triggers a state transition $(s_E^t, \text{tool\_call}) \to (s_E^{t+1}, o_E)$ , where $o_E \in \mathcal{O}_E$ is the tool output (typically in structured format like JSON). When $a^t$ is a response to the human, it causes a
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
Phase 1: Task Configuration and Groundtruth Generation
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
Phase 2: Human-Agent-Environment Interaction Trajectory Collection
|
| 64 |
+
Figure 2: Overview of the APIGen-MT framework. Phase 1 generates task configurations and groundtruth actions through an agentic process with feedback loops. Phase 2 collects human-agent-environment interaction trajectories by simulating realistic conversations between a human user and a test agent in an executable environment.
|
| 65 |
+
|
| 66 |
+
state transition $(s_H^t, response) \to (s_H^{t+1}, o_H)$ , where $o_H \in \mathcal{O}_H$ is the human's follow-up message. Importantly, the environment state $s_E^{t+1}$ remains latent to both the assistant and the human. The interaction completes when the human sends a terminating message or the maximum number of turns is reached. The reward $\mathcal{R}(\Delta S_E, a)$ is calculated based on the cumulative state change in the environment $\Delta S_E$ and the sequence of responses $a = \{a_i \mid a_i \in response \text{ to human}\}$ provided by the assistant throughout the episode. The assistant's objective is to maximize this reward.
|
| 67 |
+
|
| 68 |
+
# 3.2 APIGen-MT Framework Overview
|
| 69 |
+
|
| 70 |
+
Generating high-quality multi-turn data that captures the complexities of agent-human interactions presents significant challenges. Directly synthesizing multi-turn conversations in one shot is difficult for two key reasons: (1) a single error or hallucination in any intermediate step can lead to complete failure, and (2) the content of each turn depends on previous function calls and their outputs, creating complex dependencies that are difficult to maintain consistently.
|
| 71 |
+
|
| 72 |
+
To address these challenges, we introduce APIGen-MT, a two-phase framework for generating verifiable and diverse multi-turn data (Figure 2). Our approach extends the APIGen framework [26] by adding an agentic feedback loop and simulated human-agent interplay to generate realistic multi-turn conversations.
|
| 73 |
+
|
| 74 |
+
The core insight of our approach is to separate the task generation process into two distinct phases: first creating a detailed "blueprint" of the task (Phase 1), and then using this blueprint to guide the generation of realistic multi-turn interactions that fill in the conversational details (Phase 2). This separation allows us to ensure both the correctness of the underlying task structure and the naturalness of the resulting conversations.
|
| 75 |
+
|
| 76 |
+
# 3.2.1 Phase 1: Task Configuration and Groundtruth Generation
|
| 77 |
+
|
| 78 |
+
The initial phase of APIGen-MT focuses on systematically generating well-defined task configurations, each comprising a user instruction $(q)$ , a corresponding sequence of verifiable groundtruth actions $(a_{gt})$ , and the expected final outputs $(o_{gt})$ . This phase establishes a solid, verifiable foundation for
|
| 79 |
+
|
| 80 |
+
each interaction scenario before the complexities of conversational dynamics are introduced. As depicted in Figure 2, this is achieved through an agentic workflow incorporating multi-stage validation and refinement loops. More specifically, it has the following steps:
|
| 81 |
+
|
| 82 |
+
1. Context Preparation: Relevant information such as available APIs, domain-specific rules or policies, and reference data is assembled. This context grounds the subsequent generation step in the specific constraints and capabilities of the target environment.
|
| 83 |
+
2. LLM-based Data Generator: An LLM utilizes the prepared context to propose initial task configurations. Each configuration consists of:
|
| 84 |
+
|
| 85 |
+
- A detailed user instruction $q$ describing the high-level intent.
|
| 86 |
+
- A sequence of groundtruth actions $a_{qt}$ required to fulfill the intent.
|
| 87 |
+
Expected final outputs $o_{qt}$ to be provided to the user.
|
| 88 |
+
|
| 89 |
+
3. Format & Execution Checker: Proposed configurations undergo automated technical validation. This component performs multiple checks:
|
| 90 |
+
|
| 91 |
+
- Verifies the structural correctness of generated actions (e.g., valid API call formats) and outputs.
|
| 92 |
+
- Confirms the executability of each action in $a_{gt}$ within a simulated target environment $E$ (checking API names, arguments, types).
|
| 93 |
+
|
| 94 |
+
4. Review Committee: Configurations passing rule-based checks proceed to semantic evaluation by a committee of multiple LLM reviewers. This committee assesses quality aspects like the coherence between $q$ and $a_{gt}$ , completeness, and overall task sensibility. We use majority voting to achieve a more stable assessment.
|
| 95 |
+
5. Feedback Generation and Refinement: If a task fails at either the validation (Step 3) or review (Step 4) stage, a Feedback Generator aggregates failure reasons and reviews, reflects upon them, and produces a summarized improvement plan. This plan guides the Data Generator (Step 2) in refining the task proposal in a subsequent iteration. Successfully validated tasks exit this loop.
|
| 96 |
+
|
| 97 |
+
This agentic design with feedback loops is crucial for generating high-quality tasks efficiently. By incorporating reflection and improvement based on validation results, the system can learn from failures and progressively generate better tasks.
|
| 98 |
+
|
| 99 |
+
# 3.2.2 Phase 2: Human-Agent-Environment Interaction Trajectory Collection
|
| 100 |
+
|
| 101 |
+
Building upon the validated task configurations $q, a_{gt}, o_{gt}$ from Phase 1, the second phase generates realistic multi-turn interaction data by simulating dynamic conversations between an LLM-based human user and a test agent operating within an executable environment. Guided by the task instruction $q$ and often a specific persona, the simulated human naturally reveals information or sub-goals incrementally, while the agent interprets the evolving context, interacts with the environment via API calls when needed, and responds coherently. Importantly, the simulated user is unaware of the underlying environment and available APIs mimicking a real-world user.
|
| 102 |
+
|
| 103 |
+
The simulation produces complete interaction trajectories that capture dialogue turns, agent actions, and environment responses. Each trajectory is validated by comparing its outcome against the groundtruth actions $(a_{gt})$ and expected outputs $(o_{gt})$ from Phase 1. Only those trajectories that verifiably achieve the task using both state-based and output-based checks are accepted into the dataset, ensuring that interactions are both dynamically plausible and grounded in a correct solution.
|
| 104 |
+
|
| 105 |
+
This two-phase design offers several benefits. First, it provides verifiability by grounding interaction data in pre-validated task configurations. Second, it enhances realism by focusing the simulation on natural turn-by-turn dynamics without the simultaneous burden of task solution generation. Lastly, the modular approach isolates issues in task design from those in conversational modeling, facilitating debugging and scalability across diverse interaction patterns. In essence, by integrating agentic generation of verifiable task "blueprint" with realistic simulation of conversational dynamics, APIGen-MT produces high-quality, multi-turn interaction data that balances structural correctness with the naturalness required for training agent models.
|
| 106 |
+
|
| 107 |
+
# 4 A Case Study of APIGen-MT on $\tau$ -bench
|
| 108 |
+
|
| 109 |
+
This section details the instantiation of the APIGen-MT framework (Subsection 3.2) with $\tau$ -bench [49]. Generating high-quality, multi-turn interaction data with nuanced human-agent dynamics
|
| 110 |
+
|
| 111 |
+

|
| 112 |
+
Figure 3: Realization of APIGen-MT framework for $\tau$ -bench. We first generate realistic task instances by random walk down the API graph and sampling. Next the tasks are validated following a multi-stage pipeline. Instances which fail are sent back to the Generator to be refined based on the validation feedback. Finally, trajectories are generated by a simulated human user that interacts with a test agent by supplying the query details in a turn-wise manner. Trajectories which pass state- and output- based evaluations are collected.
|
| 113 |
+
|
| 114 |
+
presents challenges, as direct conversation simulation often leads to inconsistencies or task deviations. Therefore, our two-phase approach addresses this by first synthesizing detail task configurations that define the user's high-level intent $(q)$ , groundtruth actions $(a_{gt})$ , and the expected final outputs $(o_{gt})$ . By establishing this verifiable "blueprint" first (Phase 1), we can then more reliably simulate the fine-grained, turn-by-turn interaction dynamics between a human and an agent within the executable environment (Phase 2), ensuring the collected trajectories are both realistic and grounded in a verifiable solution path. $\tau$ -bench, with its realistic domains, executable APIs, and specific policies, provides an ideal testbed for this methodology. Figure 3 illustrates this specific implementation.
|
| 115 |
+
|
| 116 |
+
# 4.1 Phase 1 Implementation: Task Configuration Generation and Validation
|
| 117 |
+
|
| 118 |
+
# 4.1.1 API Dependency Graph and Context Samplers
|
| 119 |
+
|
| 120 |
+
Generating realistic tasks for $\tau$ -bench requires navigating its specific APIs, policies, and data structures. We implemented the following techniques for task generation and validation.
|
| 121 |
+
|
| 122 |
+
API Graph Modeling. We model the available APIs in each $\tau$ -bench domain as a directed graph, where nodes represent APIs and edges represent dependencies between them. An edge exists from API $A$ to API $B$ if $B$ 's input arguments can depend on $A$ 's output and the co-occurrence of this tool-call pair is permitted under domain policies. This graph-based approach enables us to generate realistic task sequences by performing random walks through the API dependency graph.
|
| 123 |
+
|
| 124 |
+
Specialized Context Samplers. To ensure task diversity, realism, and grounding, we utilize several domain-specific samplers that provide context to the LLM-based task generator.
|
| 125 |
+
|
| 126 |
+
- API Sampler: We distinguish between state-exploring ('read') APIs and state-changing ('write') APIs which can modify the environment states. The generator focuses on sampling the necessary 'write' APIs to form the core of $a_{gt}$ , allowing flexibility in how 'read' APIs might be used during the subsequent interaction phase. This approach encourages exploration while ensuring that specific state-changing actions are included in the groundtruth.
|
| 127 |
+
- Policy Sampler: For each $\tau$ -bench domain, we extract and sample from the domain-specific policies and rules. These policies are incorporated into the task generation process to ensure compliance of real-world use cases. Task complexity is influenced by the number of 'write' calls and the associated policy constraints.
|
| 128 |
+
- **Domain Data Sampler:** To ground tasks in realistic domain data without exceeding context limits, we sample domain-specific data with additional metadata (e.g., cost, time, attributes). This metadata enhances coverage and enables more creative and diverse task scenarios.
|
| 129 |
+
- Persona Sampler: We incorporate user persona descriptions from PersonaHub [17] to inform the user intent $q$ and inject realistic human qualities and situational context, enhancing diversity for subsequent Phase 2 human-agent interaction simulation.
|
| 130 |
+
- Example Sampler: We provide few-shot examples of well-formed tasks relevant to the sampled APIs, guiding the generator on structure and format.
|
| 131 |
+
|
| 132 |
+
For each task generation iteration, we randomly vary the sampling frequency for each sampler to enhance diversity and prevent repetitive scenarios. The sampled information is compiled into a prompt instructing the LLM generator to produce a <thought> (its reasoning process), the user <instruction> $(q)$ , the corresponding groundtruth <actions> $(a_{gt})$ , and the expected final <outputs> $(o_{gt})$ .
|
| 133 |
+
|
| 134 |
+
# 4.1.2 Multi-Stage Validation for $\tau$ -bench
|
| 135 |
+
|
| 136 |
+
We implement a rigorous three-stage validation process for the $\tau$ -bench environment:
|
| 137 |
+
|
| 138 |
+
# Stage 1: Action Validation.
|
| 139 |
+
|
| 140 |
+
- Format Check: Verifies the presence and basic structure of required task components (<thought>, <instruction>, <actions>, <outputs>) and ensures all tool calls in <actions> are valid JSON and outputs in <outputs> are strings.
|
| 141 |
+
- Execution Check: Simulates each action in $a_{gt}$ within the $\tau$ -bench environment, validating API names, argument names, and data types. The cumulative effect on the environment state $(\Delta S_E)$ is captured as a diff_batch, similar to git diff.
|
| 142 |
+
- Policy Compliance Check: Leverages the executable nature of $\tau$ -bench by translating domain policies into Python unit tests. These tests run against the simulated execution trace of $a_{gt}$ to detect violations, especially those arising from interactions between multiple actions (e.g., action B is invalid given the state change caused by prior action A). Failures yield detailed feedback on the specific policy violation.
|
| 143 |
+
|
| 144 |
+
Stage 2: Alignment Validation. Tasks successfully passing Stage 1's action validation are then assessed for semantic alignment. Specifically, we evaluate whether the groundtruth actions $(a_{gt})$ , as reflected by their environmental effects summarized in the diff_batch, accurately and comprehensively fulfill the user's intent expressed in the instruction $(q)$ . To mitigate the potential biases and inconsistencies of a single evaluator, we employ a committee of diverse LLM judges [54, 8]. These judges review each task based on a systematic rubric with metrics such as Correctness, Completeness, Satisfaction, and Creativity (refer Figure 9 in Appendix B for details).
|
| 145 |
+
|
| 146 |
+
Each judge provides scores and qualitative feedback. We utilize a majority voting strategy across the committee's judgments to determine the final assessment for each metric and the overall task quality. This approach yields more stable and reliable evaluation results compared to single-judge assessments.
|
| 147 |
+
|
| 148 |
+
Stage 3: Final Semantic Review & Refinement. Based on the aggregated scores from the committee (determined via majority voting), tasks achieving an average score above a predefined threshold are accepted and added to the pool of validated task configurations. Tasks that fail this review trigger the feedback loop mechanism. Consolidated feedback, summarizing the points raised by the committee majority, is sent back to the LLM task generator. This initiates a reflection process [39], guiding the generator to revise the task in the subsequent iteration to address the identified shortcomings.
|
| 149 |
+
|
| 150 |
+
# 4.1.3 Reverse Task Recombination for Complex Task Construction
|
| 151 |
+
|
| 152 |
+
While the iterative refinement process improves task quality and efficiency, directly generating complex, long-horizon tasks involving multiple steps remains challenging. Validation failures can occur due to subtle policy conflicts or difficulties in ensuring perfect alignment across many steps. To overcome this and systematically construct more complicated scenarios, we implement Reverse Task Recombination, a technique that leverages the principle of compositionality [11, 18], similar to modular design in software engineering. The core idea is to build complex tasks from simpler, independently validated "building blocks":
|
| 153 |
+
|
| 154 |
+
1. Select Validated Tasks: Identify multiple simpler tasks $(T_{1}, T_{2}, \ldots)$ that have successfully passed all validation stages (Stages 1-3) and are associated with the same user persona.
|
| 155 |
+
2. Concatenate Components: Combine their respective groundtruth actions ( $a_{\text{combined}} = a_{gt,1} \circ a_{gt,2} \circ \ldots$ ) and expected outputs ( $o_{\text{combined}} = o_{gt,1} \oplus o_{gt,2} \oplus \ldots$ , where $\circ$ denotes action sequence concatenation and $\oplus$ denotes output aggregation).
|
| 156 |
+
|
| 157 |
+
3. Re-Check Policy Compliance: Rerun the Policy Check on $a_{\text{combined}}$ to ensure that the cumulative action sequence remains logically sound and adheres to the domain rules as combinations could cause conflicting actions to appear together, for e.g., returning and canceling the same order.
|
| 158 |
+
4. Synthesize Combined Instruction: Instruct the LLM generator to create a new, coherent, overarching user instruction ( $q_{\text{combined}}$ ) that logically integrates the goals and steps represented by $a_{\text{combined}}$ and $o_{\text{combined}}$ . This new instruction should frame the combined actions as a single, more complex user request.
|
| 159 |
+
5. Re-Validate Semantics: Submit the newly formed complex task $T_{\text{combined}} = \{q_{\text{combined}}, a_{\text{combined}}, o_{\text{combined}}\}$ for validation starting from Stage 2 (Alignment Validation). Stage 1 (Action Validation) can be safely skipped for $a_{\text{combined}}$ because each constituent action sequence $(a_{gt,1}, a_{gt,2}, \ldots)$ has already been individually checked for format and execution within its original context, and policy compliance in the current context. Stage 3 (Final Semantic Review) proceeds based on the outcome of Stage 2 for the combined task.
|
| 160 |
+
|
| 161 |
+
This method allows for the scalable generation of complex, multi-step tasks with greater reliability, as it builds upon verified components while focusing the validation effort on the semantic coherence and alignment of the combined whole.
|
| 162 |
+
|
| 163 |
+
# 4.2 Phase 2: Simulated Human-Agent Interplay and Trajectory Collection
|
| 164 |
+
|
| 165 |
+
Building on the verified tasks from Phase 1—which include a detailed user intent $q$ , groundtruth actions $a_{gt}$ , and expected outputs $o_{gt}$ —we simulate multi-turn interaction trajectories between an agent $(A)$ and a human user $(H)$ modeled by an LLM. Guided by the instruction $q$ and an associated persona, the simulated human incrementally reveals task details to mimic realistic interactions. The agent, instantiated as gpt-40 with its function-calling mode, interprets the evolving intent and executes the necessary actions to complete the task.
|
| 166 |
+
|
| 167 |
+
Trajectory Collection. We employ rejection sampling to ensure that only trajectories achieving the task goal $(r = 1)$ are retained. Success is determined by comparing the final environment state to $a_{gt}$ and the agent's final responses to $o_{gt}$ . For enhanced data coverage, each task is attempted up to three times, and the union of all unique successful trajectories is compiled into an offline dataset suitable for downstream applications such as behavioral cloning.
|
| 168 |
+
|
| 169 |
+
Stabilizing Simulated Human. A critical challenge in this phase is maintaining the stability and fidelity of the simulated human. Over multiple conversational turns, the human LLM may drift from the original instruction or be unduly influenced by the agent's responses [32], introducing variability that hinders reliable evaluation [49]. To address this, we adopt a Best-of-N (N=4) sampling strategy in combination with a self-critique mechanism for the human LLM's responses (see Figure 12 in Appendix B for details), allowing it to adhere to the task instruction more accurately and not be mislead by the test agent responses. Its effectiveness was validated on the $\tau$ -bench test set, where improved consistency in agent performance evaluation across multiple trials was observed (Table 3).
|
| 170 |
+
|
| 171 |
+
# 4.3 Data Collection & Statistics
|
| 172 |
+
|
| 173 |
+
Data Collection Procedure. We source APIs implemented as Python functions from $\tau$ -bench. Among these, we have 15 'read' and 13 'write' APIs across both domains. $\tau$ -bench is accompanied with detailed policies and domain rules in two settings - Retail and Airline which we use as guideline policies. We utilize gpt-4o and DeepSeek V3 models in the task generation, validation and agent-human interplay stages to collect training data. The prompts used in every stage are provided in Appendix B. We set the maximum number of reflection-based feedback turns to 3 for retail and 5 for airline respectively.
|
| 174 |
+
|
| 175 |
+
Statistics. A summary of the data collection is shown in Figure 4. Figure 5 shows that we can efficiently collect long trajectories requiring a strong model like gpt-4o to take an average 12 turns to complete the task using APIGen-MT. Our agentic pipeline involving review committee and iterative refinement via reflection provides a $2.5\mathbf{x}$ boost to the task collection success rate to attain $70\%$ .
|
| 176 |
+
|
| 177 |
+
Our implementation demonstrates that the APIGen-MT framework can successfully generate high-quality multi-turn data for complex domains with strict policy constraints. The two-phase approach
|
| 178 |
+
|
| 179 |
+
<table><tr><td>Metric</td><td>Value</td></tr><tr><td>Task Config. S.R. (Phase 1)</td><td>70%</td></tr><tr><td>Task Config. S.R. w/o Agentic Feedback</td><td>28%</td></tr><tr><td>Trajectory Sim. S.R. (Phase 2)</td><td>67%</td></tr><tr><td>Min. Turns per Trajectory</td><td>1</td></tr><tr><td>Max. Turns per Trajectory</td><td>29</td></tr><tr><td>Avg. Tool Calls per Trajectory</td><td>7</td></tr><tr><td>Avg. User Turns per Trajectory</td><td>6</td></tr></table>
|
| 180 |
+
|
| 181 |
+
Figure 4: Statistics for the dataset generated using APIGen-MT. Success rates (S.R.) are reported for the task configuration (w. and w/o agentic feedback in Phase 1) and trajectory simulation (Phase 2) stages.
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
Figure 5: Density distribution of assistant and user turns in collected trajectories.
|
| 185 |
+
|
| 186 |
+
with agentic feedback loops and simulated human-agent interplay proves effective in creating diverse, realistic, and verifiable datasets for training and evaluating conversational agents.
|
| 187 |
+
|
| 188 |
+
# 5 Experiments
|
| 189 |
+
|
| 190 |
+
# 5.1 Experimental Setup
|
| 191 |
+
|
| 192 |
+
Training Details. We perform filtered Behavioral Cloning (BC) using the collected trajectories with Llama 3.1/3.2 Instruct models [16] and Qwen 2.5 Instruct models [34]. The collected trajectories are split at every assistant response and we train to predict only the assistant response tokens by masking the prompt and other messages. To enhance the dataset diversity, we also jointly train our xLAM-2-fc-r models with function-calling data from [26] and other domains of agentic data from [52, 53]. We utilize the Llama-Factory library [56] and perform full-finetuning using DeepSpeed ZeRO [35] stage 3, Flash Attention 2 [15] in bfloat16 precision with AdamW optimizer [27] and train for at most 3 epochs on a NVIDIA H200 node.
|
| 193 |
+
|
| 194 |
+
**Benchmarks.** We evaluate on two challenging benchmarks designed specifically for assessing agent capabilities – (1) BFCL v3 [45], a leading benchmark for tool-use evaluation, specifically designed to assess LLMs' function calling capabilities and (2) $\tau$ -bench [49], a comprehensive benchmark for evaluating AI agents in realistic scenarios. More details are in Appendix A. Both are particularly well-suited for evaluating the effectiveness of our APIGen-MT approach, as they focus on multi-turn interactions and tool use capabilities, which are central to our data generation methodology.
|
| 195 |
+
|
| 196 |
+
# 5.2 Experiment Results
|
| 197 |
+
|
| 198 |
+
We compare the performance of our trained models (xLAM-2-fc-r) against state-of-the-art proprietary models such as gpt models (o1, gpt-4o); claude models (claude-3.5-haiku, claude-3.5-sonnet, claude-3.5-sonnet (new), and claude-3.7-sonnet), and open-source LLMs including DeepSeek v3, and the baselines Llama 70B and Qwen 32B.
|
| 199 |
+
|
| 200 |
+
BFCL v3 Results. On the BFCL v3 benchmark, our models demonstrate exceptional performance. As shown in Table 1, xLAM-2-70b-fc-r and xLAM-2-32b-fc-r achieve the top 2 positions on the leaderboard with overall accuracies of $78.19\%$ and $75.83\%$ respectively, surpassing all proprietary and open-source models. The most striking advantage appears in multi-turn scenarios, where our models excel across all parameter scales. xLAM-2-70b-fc-r achieves $75.12\%$ multi-turn accuracy, while our smaller models show remarkable capabilities with xLAM-2-8b-fc-r at $69.25\%$ , xLAM-2-3b-fc-r at $56.00\%$ , and even xLAM-2-1b-fc-r at $43.12\%$ - all substantially outperforming o1 $(36\%)$ and gpt-4o in function-calling mode $(41\%)$ . Additionally, our models demonstrate strong hallucination detection, with xLAM-2-3b-fc-r achieving $94.44\%$ on relevance detection, matching the best score in this category.
|
| 201 |
+
|
| 202 |
+
Table 1: Performance of different models on BFCL leaderboard (as of date 04/03/2025). The rank is based on the overall accuracy, which is a weighted average of different evaluation categories. "FC" stands for function-calling mode in contrast to using a customized "prompt" to extract the function calls. See the benchmark [45] for details.
|
| 203 |
+
|
| 204 |
+
<table><tr><td rowspan="2">Rank</td><td rowspan="2">Overall Acc</td><td rowspan="2">Model</td><td colspan="3">Single-Turn</td><td>Multi-Turn</td><td colspan="2">Hallucination</td></tr><tr><td>Non-live (AST)</td><td>Non-live (Exec)</td><td>Live (AST)</td><td>Overall Acc</td><td>Relevance</td><td>Irrelevance</td></tr><tr><td>1</td><td>78.19</td><td>xLAM-2-70b-fc-r (FC)</td><td>88.48</td><td>85.98</td><td>72.63</td><td>75.12</td><td>66.67</td><td>78.74</td></tr><tr><td>2</td><td>75.83</td><td>xLAM-2-32b-fc-r (FC)</td><td>89.50</td><td>86.48</td><td>73.79</td><td>66.38</td><td>83.33</td><td>76.25</td></tr><tr><td>3</td><td>74.31</td><td>watt-tool-70b (FC)</td><td>84.06</td><td>89.39</td><td>77.74</td><td>58.75</td><td>94.44</td><td>76.32</td></tr><tr><td>4</td><td>72.83</td><td>xLAM-2-8b-fc-r (FC)</td><td>84.35</td><td>85.59</td><td>66.73</td><td>69.25</td><td>83.33</td><td>64.11</td></tr><tr><td>5</td><td>72.08</td><td>GPT-4o-2024-11-20 (Prompt)</td><td>88.1</td><td>89.38</td><td>79.83</td><td>47.62</td><td>83.33</td><td>83.76</td></tr><tr><td>6</td><td>69.94</td><td>GPT-4.5-Preview-02-27 (FC)</td><td>86.12</td><td>83.98</td><td>79.34</td><td>45.25</td><td>66.67</td><td>83.64</td></tr><tr><td>7</td><td>69.58</td><td>GPT-4o-2024-11-20 (FC)</td><td>87.42</td><td>89.2</td><td>79.65</td><td>41</td><td>83.33</td><td>83.15</td></tr><tr><td>8</td><td>68.39</td><td>ToolACE-2-8B (FC)</td><td>87.58</td><td>87.11</td><td>80.05</td><td>36.88</td><td>72.22</td><td>90.11</td></tr><tr><td>9</td><td>67.98</td><td>watt-tool-8B (FC)</td><td>86.56</td><td>89.34</td><td>76.5</td><td>39.12</td><td>83.33</td><td>83.15</td></tr><tr><td>10</td><td>67.88</td><td>GPT-4-2024-04-09 (FC)</td><td>84.73</td><td>85.21</td><td>80.5</td><td>38.12</td><td>72.22</td><td>83.81</td></tr><tr><td>11</td><td>67.87</td><td>o1-2024-12-17 (Prompt)</td><td>85.67</td><td>87.45</td><td>80.63</td><td>36</td><td>72.22</td><td>87.78</td></tr><tr><td>12</td><td>67.72</td><td>BitAgent-8B</td><td>86.92</td><td>89.52</td><td>76.14</td><td>38.5</td><td>83.33</td><td>82.38</td></tr><tr><td>13</td><td>65.12</td><td>o3-mini-25-01-31 (Prompt)</td><td>86.15</td><td>89.46</td><td>79.08</td><td>28.75</td><td>72.22</td><td>82.96</td></tr><tr><td>14</td><td>65.11</td><td>xLAM-2-3b-fc-r (FC)</td><td>82.94</td><td>81.88</td><td>58.69</td><td>56.00</td><td>94.44</td><td>57.94</td></tr><tr><td>15</td><td>64.1</td><td>CoALM-405B</td><td>90.58</td><td>89.07</td><td>74.5</td><td>28.75</td><td>100</td><td>71.79</td></tr><tr><td>16</td><td>64.1</td><td>GPT-4o-mini-24-07-18 (FC)</td><td>85.21</td><td>83.57</td><td>74.41</td><td>34.12</td><td>83.33</td><td>74.75</td></tr><tr><td>...</td><td>...</td><td>...</td><td colspan="6">...</td></tr><tr><td>34</td><td>58.93</td><td>Gemini-2-Flash-Thinking</td><td>87.4</td><td>87.07</td><td>75.97</td><td>14.5</td><td>77.78</td><td>72.75</td></tr><tr><td>35</td><td>58.9</td><td>Qwen2.5-14B-Instruct (FC)</td><td>85.42</td><td>84.86</td><td>76.68</td><td>15.88</td><td>55.56</td><td>77.69</td></tr><tr><td>36</td><td>58.90</td><td>xLAM-2-1b-fc-r (FC)</td><td>76.23</td><td>74.86</td><td>59.88</td><td>43.12</td><td>88.89</td><td>56.87</td></tr><tr><td>37</td><td>58.55</td><td>DeepSeek-V3 (FC)</td><td>89.17</td><td>92.32</td><td>68.41</td><td>18.62</td><td>88.89</td><td>59.36</td></tr><tr><td>38</td><td>58.45</td><td>mistral-large-2407 (FC)</td><td>86.81</td><td>84.38</td><td>69.88</td><td>23.75</td><td>72.22</td><td>52.85</td></tr><tr><td>39</td><td>58.42</td><td>ToolACE-8B (FC)</td><td>87.54</td><td>89.21</td><td>78.59</td><td>7.75</td><td>83.33</td><td>87.88</td></tr></table>
|
| 205 |
+
|
| 206 |
+
$\tau$ -bench Results. Table 2 presents results under the default naive user setting on $\tau$ -bench. Our xLAM-2-70b-fc-r model achieves a $56.2\%$ success rate, outperforming Llama 3.1 70B Instruct $(38.2\%)$ , DeepSeek v3 $(40.6\%)$ , and even proprietary models like GPT-4o $(52.9\%)$ , while approaching more recent models like Claude 3.5 Sonnet $(60.1\%)$ . Notably, our smaller variants like xLAM-2-32b-fc-r $(54.6\%)$ and xLAM-2-8b-fc-r $(46.7\%)$ surpass larger baselines, demonstrating that our synthetic data approach enables efficient knowledge transfer and strong performance with fewer parameters.
|
| 207 |
+
|
| 208 |
+
Table 2: Success Rate (pass@1) of various open-source and proprietary models on the Retail and Airline settings of $\tau$ -bench (averaged across at least 5 trials). The xLAM-2-fc-r models are trained on the data generated using APIGen-MT. Overall indicates the average score across both domains. $^{1}$ indicates results from [14]; $^{2}$ indicates results from [2]; $^{3}$ indicate results from [3]; $^{4}$ indicates from [4]. Note. We evaluate only with the benchmark's think tool and no prompt optimizations.
|
| 209 |
+
|
| 210 |
+
<table><tr><td>Model</td><td>τ-Retail</td><td>τ-Airline</td><td>Overall</td></tr><tr><td colspan="4">Open-Source Models</td></tr><tr><td>Qwen 2.5 32B Instruct</td><td>24.4</td><td>25.0</td><td>24.7</td></tr><tr><td>Llama 3.1 70B Instruct</td><td>50.4</td><td>26.0</td><td>38.2</td></tr><tr><td>DeepSeek v31</td><td>58.3</td><td>22.8</td><td>40.6</td></tr><tr><td>xLAM-2-70b-fc-r</td><td>67.1</td><td>45.2</td><td>56.2</td></tr><tr><td>xLAM-2-32b-fc-r</td><td>64.3</td><td>45.0</td><td>54.6</td></tr><tr><td>xLAM-2-8b-fc-r</td><td>58.2</td><td>35.2</td><td>46.7</td></tr><tr><td>xLAM-2-3b-fc-r</td><td>44.4</td><td>32.0</td><td>38.2</td></tr><tr><td>xLAM-2-1b-fc-r</td><td>22.5</td><td>21.0</td><td>21.8</td></tr><tr><td colspan="4">Proprietary Models</td></tr><tr><td>Gemini 1.5 pro1</td><td>54.9</td><td>25.2</td><td>40.1</td></tr><tr><td>gpt-4o-2024-11-20</td><td>62.8</td><td>43.0</td><td>52.9</td></tr><tr><td>o13</td><td>73.5</td><td>54.2</td><td>63.9</td></tr><tr><td>Claude 3.5 Haiku2</td><td>51.0</td><td>22.8</td><td>36.9</td></tr><tr><td>Calude 3.5 Sonnet2</td><td>62.6</td><td>36.0</td><td>49.3</td></tr><tr><td>Claude 3.5 Sonnet (new)3</td><td>71.5</td><td>48.8</td><td>60.1</td></tr><tr><td>Claude 3.7 Sonnet4</td><td>78.3</td><td>41.2</td><td>59.8</td></tr><tr><td>Claude 3.7 Sonnet + optimized prompt4</td><td>81.2</td><td>58.4</td><td>69.8</td></tr></table>
|
| 211 |
+
|
| 212 |
+
These results across both benchmarks demonstrate that our APIGen-MT approach for generating synthetic multi-turn data through simulated agent-human interplay is highly effective. Models trained on this data consistently outperform open-source baselines and on par with proprietary models, with particularly strong performance in multi-turn scenarios. Importantly, our approach enables smaller models to achieve competitive or superior performance compared to much larger models, highlighting the efficiency and effectiveness of our data generation methodology.
|
| 213 |
+
|
| 214 |
+
# 5.3 Consistency & Stability Experiments
|
| 215 |
+
|
| 216 |
+
We plot the pass^k curves [49] in Figure 6 on $\tau$ -bench in the default naive user LM setting. pass^k is defined as the chance of all $k$ i.i.d. task trials being successful, averaged across all tasks. As $k$ increases, we see less drop in success rate (SR) for our models. Notably on the more complex airline domain, xLAM-2-70b-fc-r has higher pass^5 score than Claude, despite having a slightly lower pass^1 suggesting higher reliability and consistency across multiple trials. This is a critical property for deployment in real-world applications, where consistent performance is essential.
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Figure 6: Pass^k curves measuring the probability that all 5 independent trials succeed for a given task, averaged across all tasks for $\tau$ -retail (left) and $\tau$ -airline (right) domains. Higher value indicates consistency of the models.
|
| 220 |
+
|
| 221 |
+
Next, we adopt the BoN user LM setting (introduced in Subsection 4.2) to assess its effectiveness in producing more stable results across trials. Although this enhancement is applied to the user LM, Table 3 highlights the improved success rate and reduced variance in models utilizing the BoN user simulation. This suggests that enhancing the user simulation strategy with a simple self-critiquing mechanism can not only increase stability but also improve agent performance.
|
| 222 |
+
|
| 223 |
+
Table 3: The Success Rate (SR) measured across 5 trials on the Retail domain of $\tau$ -bench using gpt-4o and xLAM-2-70b-fc-r as the test assistants. The average success rate is higher with lower variance using BoN based user simulation, indicative of a more stable evaluation.
|
| 224 |
+
|
| 225 |
+
<table><tr><td>Model (User LM setting)</td><td>t1</td><td>t2</td><td>t3</td><td>t4</td><td>t5</td><td>SR Average</td><td>SR Variance</td></tr><tr><td>gpt-4o (Naive)</td><td>61.7</td><td>57.4</td><td>65.2</td><td>65.2</td><td>64.4</td><td>62.8</td><td>11.1</td></tr><tr><td>gpt-4o (BoN)</td><td>65.2</td><td>69.6</td><td>67.0</td><td>66.1</td><td>67.0</td><td>67.0</td><td>2.6</td></tr><tr><td>xLAM-2-70b-fc-r (Naive)</td><td>69.6</td><td>65.2</td><td>62.6</td><td>68.7</td><td>69.6</td><td>67.1</td><td>9.7</td></tr><tr><td>xLAM-2-70b-fc-r (BoN)</td><td>66.9</td><td>71.3</td><td>68.7</td><td>66.9</td><td>70.4</td><td>68.8</td><td>4.0</td></tr></table>
|
| 226 |
+
|
| 227 |
+
# 5.4 In-Depth Analysis of Model Behavior
|
| 228 |
+
|
| 229 |
+
To better understand the behavior of our trained models, we perform an in-depth investigation of the tasks solved by xLAM-2-70b-fc-r and a state-of-the-art model Claude 3.5 Sonnet (new) on $\tau$ -bench. We categorize tasks into 'short', 'medium' and 'long' based on the number of turns required by Claude 3.5 to solve each task across a union of 8 trials. This categorization is derived by calculating the 33rd and 66th percentiles of the number of turns. From Figure 7 we observe that particularly on the 'long' task category, the success rate for xLAM-2-70b-fc-r is
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
Figure 7: Performance/efficiency comparisons of xLAM-2-70b-fc-r with frontier models on $\tau$ -bench.
|
| 233 |
+
|
| 234 |
+
much higher than gpt-40 but lags behind Claude. Further, we assess the efficiency of the agent by measuring the number of interactions needed with the simulated user for the agent to fully comprehend the intent and successfully complete the task. The plot reveals that xLAM-2-70b-fc-r is at par with gpt-40 but requires more interactions compared to Claude, which can be attributed to its method of retrieving user details in stages, necessitating more turns. These observations suggest potential areas for improvement in future iterations.
|
| 235 |
+
|
| 236 |
+
# 6 Discussion
|
| 237 |
+
|
| 238 |
+
Conclusion. We introduced APIGen-MT, a two-phase framework for generating high-quality multi-turn agent data through simulated human-agent interactions. By decoupling the creation of detailed task blueprints from the simulation of conversational trajectories, our approach ensures both structural correctness and natural dialogue dynamics. Experiments on $\tau$ -bench and BFCL v3 demonstrate that models trained on our synthetic data outperform existing baselines, with even smaller models showing competitive performance in multi-turn scenarios. Moreover, our stabilization techniques yield more consistent and reliable agent behavior. By open-sourcing our synthetic data and trained models, we aim to foster further advances in AI agent development.
|
| 239 |
+
|
| 240 |
+
Limitations and future directions. Despite its advantages, APIGen-MT has limitations that present opportunities for future research. First, while our Best-of-N sampling and self-critique mechanisms reduce human user simulation variance, some stochasticity in human behavior remains; more deterministic simulation methods or refined filtering metrics could further stabilize the process. Second, our current approach discards failed trajectories in the second phase, yet these cases may offer valuable insights; future work could leverage such failures as additional contrastive signal during model training. Third, the multi-stage validation process, though effective, incurs computational overhead; developing more efficient validation or adaptive sampling strategies could improve scalability. Finally, extending our approach to additional domains and incorporating self-improvement through reinforcement learning are promising directions for future work.
|
| 241 |
+
|
| 242 |
+
# References
|
| 243 |
+
|
| 244 |
+
[1] S. Agashe, J. Han, S. Gan, J. Yang, A. Li, and X. E. Wang. Agent s: An open agentic framework that uses computers like a human. arXiv preprint arXiv:2410.08164, 2024.
|
| 245 |
+
[2] Anthropic. Claude 3.5 sonnet, 2024. URL https://www.anthropic.com/news/3-5-models-and-computer-use.
|
| 246 |
+
[3] Anthropic. Claude 3.7 sonnet, 2025. URL https://www.anthropic.com/news/claude-3-7-sonnet.
|
| 247 |
+
[4] Anthropic. Claude think tool, 2025. URL https://www.anthropic.com/engineering/claude-think-tool.
|
| 248 |
+
[5] A. Antoniades, A. Örwall, K. Zhang, Y. Xie, A. Goyal, and W. Wang. Swe-search: Enhancing software agents with monte carlo tree search and iterative refinement. arXiv preprint arXiv:2410.20285, 2024.
|
| 249 |
+
[6] S. Arcadinho, D. Aparicio, and M. Almeida. Automated test generation to evaluate tool-augmented llms as conversational ai agents. arXiv preprint arXiv:2409.15934, 2024.
|
| 250 |
+
[7] D. Bahdanau, N. Gontier, G. Huang, E. Kamalloo, R. Pardinas, A. Piche, T. Scholak, O. Shliazhko, J. P. Tremblay, K. Ghanem, et al. Tapeagents: a holistic framework for agent development and optimization. arXiv preprint arXiv:2412.08445, 2024.
|
| 251 |
+
[8] Z. Bi, K. Han, C. Liu, Y. Tang, and Y. Wang. Forest-of-thought: Scaling test-time compute for enhancing lIm reasoning. arXiv preprint arXiv:2412.09078, 2024.
|
| 252 |
+
[9] T. Cai, X. Wang, T. Ma, X. Chen, and D. Zhou. Large language models as tool makers. arXiv preprint arXiv:2305.17126, 2023.
|
| 253 |
+
|
| 254 |
+
[10] CAMEL-AI.org. Owl: Optimized workforce learning for general multi-agent assistance in real-world task automation. https://github.com/camel-ai/owl, 2025. Accessed: 2025-03-07.
|
| 255 |
+
[11] M. Chen, sunhaoze, T. Li, F. Yang, H. Liang, KeerLu, B. CUI, W. Zhang, Z. Zhou, and weipeng chen. Facilitating multi-turn function calling for LLMs via compositional instruction tuning. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=owP2mymrTD.
|
| 256 |
+
[12] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao. Agent-flan: Designing data and methods of effective agent tuning for large language models. arXiv preprint arXiv:2403.12881, 2024.
|
| 257 |
+
[13] Z. Chen, M. Li, Y. Huang, Y. Du, M. Fang, and T. Zhou. Atlas: Agent tuning via learning critical steps. arXiv preprint arXiv:2503.02197, 2025.
|
| 258 |
+
[14] S. Cognition. Apt-1 blog, 2025. URL https://www.scaledcognition.com/blog/apt-1.
|
| 259 |
+
[15] T. Dao. Flashattention-2: Faster attention with better parallelism and work partitioning, 2023. URL https://arxiv.org/abs/2307.08691.
|
| 260 |
+
[16] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.
|
| 261 |
+
[17] T. Ge, X. Chan, X. Wang, D. Yu, H. Mi, and D. Yu. Scaling synthetic data creation with 1,000,000,000 personas, 2024. URL https://arxiv.org/abs/2406.20094.
|
| 262 |
+
[18] S. A. Hayati, T. Jung, T. Bodding-Long, S. Kar, A. Sethy, J.-K. Kim, and D. Kang. Chain-of-instructions: Compositional instruction tuning on large language models. arXiv preprint arXiv:2402.11532, 2024.
|
| 263 |
+
[19] K.-H. Huang, A. Prabhakar, S. Dhawan, Y. Mao, H. Wang, S. Savarese, C. Xiong, P. Laban, and C.-S. Wu. Crmarena: Understanding the capacity of llm agents to perform professional crm tasks in realistic environments, 2025. URL https://arxiv.org/abs/2411.02305.
|
| 264 |
+
[20] E. Levi and I. Kadar. Intellagent: A multi-agent framework for evaluating conversational ai systems. arXiv preprint arXiv:2501.11067, 2025.
|
| 265 |
+
[21] G. Li, H. A. A. K. Hammoud, H. Itani, D. Khizbullin, and B. Ghanem. Camel: Communicative agents for "mind" exploration of large language model society. In Thirty-seventh Conference on Neural Information Processing Systems, 2023.
|
| 266 |
+
[22] X. Li, H. Zou, and P. Liu. Torl: Scaling tool-integrated rl, 2025. URL https://arxiv.org/abs/2503.23383.
|
| 267 |
+
[23] Y. Li, Y. Li, X. Wang, Y. Jiang, Z. Zhang, X. Zheng, H. Wang, H.-T. Zheng, P. Xie, P. S. Yu, et al. Benchmarking multimodal retrieval augmented generation with dynamic vqa dataset and self-adaptive planning agent. arXiv preprint arXiv:2411.02937, 2024.
|
| 268 |
+
[24] W. Liu, X. Huang, X. Zeng, X. Hao, S. Yu, D. Li, S. Wang, W. Gan, Z. Liu, Y. Yu, et al. Toolace: Winning the points of llm function calling. arXiv preprint arXiv:2409.00920, 2024.
|
| 269 |
+
[25] Z. Liu, J. Zhang, K. Asadi, Y. Liu, D. Zhao, S. Sabach, and R. Fakoor. Tail: Task-specific adapters for imitation learning with large pretrained models. arXiv preprint arXiv:2310.05905, 2023.
|
| 270 |
+
[26] Z. Liu, T. Hoang, J. Zhang, M. Zhu, T. Lan, J. Tan, W. Yao, Z. Liu, Y. Feng, R. RN, et al. Apigen: Automated pipeline for generating verifiable and diverse function-calling datasets. Advances in Neural Information Processing Systems, 37:54463-54482, 2024.
|
| 271 |
+
[27] I. Loshchilov and F. Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.
|
| 272 |
+
|
| 273 |
+
[28] J. Lu, T. Holleis, Y. Zhang, B. Aumayer, F. Nan, F. Bai, S. Ma, S. Ma, M. Li, G. Yin, et al. Toolsandbox: A stateful, conversational, interactive evaluation benchmark for llm tool use capabilities. arXiv preprint arXiv:2408.04682, 2024.
|
| 274 |
+
[29] A. Mitra, S. Patel, T. Chakrabarty, and C. Baral. Agentinstruct: An agentic framework for generating high-quality synthetic instruction data. arXiv preprint arXiv:2402.12360, 2024.
|
| 275 |
+
[30] J. Pan, X. Wang, G. Neubig, N. Jaitly, H. Ji, A. Suhr, and Y. Zhang. Training software engineering agents and verifiers with swe-gym. arXiv preprint arXiv:2412.21139, 2024.
|
| 276 |
+
[31] J. Pan, R. Shar, J. Pfau, A. Talwalkar, H. He, and V. Chen. When benchmarks talk: Re-evaluating code llms with interactive feedback, 2025. URL https://arxiv.org/abs/2502.18413.
|
| 277 |
+
[32] J. S. Park, J. O'Brien, C. J. Cai, M. R. Morris, P. Liang, and M. S. Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023.
|
| 278 |
+
[33] Y. Qin, S. Hu, Y. Lin, W. Chen, N. Ding, G. Cui, Z. Zeng, X. Zhou, Y. Huang, C. Xiao, et al. Tool learning with foundation models. ACM Computing Surveys, 57(4):1-40, 2024.
|
| 279 |
+
[34] Qwen, :, A. Yang, B. Yang, B. Zhang, B. Hui, B. Zheng, B. Yu, C. Li, D. Liu, F. Huang, H. Wei, H. Lin, J. Yang, J. Tu, J. Zhang, J. Yang, J. Yang, J. Zhou, J. Lin, K. Dang, K. Lu, K. Bao, K. Yang, L. Yu, M. Li, M. Xue, P. Zhang, Q. Zhu, R. Men, R. Lin, T. Li, T. Tang, T. Xia, X. Ren, X. Ren, Y. Fan, Y. Su, Y. Zhang, Y. Wan, Y. Liu, Z. Cui, Z. Zhang, and Z. Qiu. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115.
|
| 280 |
+
[35] J. Rasley, S. Rajbhandari, O. Ruwase, and Y. He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, KDD '20, page 3505-3506, New York, NY, USA, 2020. Association for Computing Machinery. ISBN 9781450379984. doi: 10.1145/3394486.3406703. URL https://doi.org/10.1145/3394486.3406703.
|
| 281 |
+
[36] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551, 2023.
|
| 282 |
+
[37] S. Sengupta, K. Curtis, A. Mallipeddi, A. Mathur, J. Ross, and L. Gou. Mag-v: A multi-agent framework for synthetic data generation and verification. arXiv preprint arXiv:2412.04494, 2024.
|
| 283 |
+
[38] J. Shim, G. Seo, C. Lim, and Y. Jo. Tooldial: Multi-turn dialogue generation method for tool-augmented language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=J1J5eGJsKZ.
|
| 284 |
+
[39] N. Shinn, F. Cassano, A. Gopinath, K. R. Narasimhan, and S. Yao. Reflexion: language agents with verbal reinforcement learning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=vAElhFcKW6.
|
| 285 |
+
[40] V. Sirdeshmukh, K. Deshpande, J. Mols, L. Jin, E.-Y. Cardona, D. Lee, J. Kritz, W. Primack, S. Yue, and C. Xing. Multichallenge: A realistic multi-turn conversation evaluation benchmark challenging to frontier llms. arXiv preprint arXiv:2501.17399, 2025.
|
| 286 |
+
[41] H. Su, R. Sun, J. Yoon, P. Yin, T. Yu, and S. O. Arik. Learn-by-interact: A data-centric framework for self-adaptive agents in realistic environments. arXiv preprint arXiv:2501.10893, 2025.
|
| 287 |
+
[42] S. Tang, X. Pang, Z. Liu, B. Tang, R. Ye, X. Dong, Y. Wang, and S. Chen. Synthesizing post-training data for llms through multi-agent simulation. arXiv preprint arXiv:2410.14251, 2024.
|
| 288 |
+
[43] J. Wang, J. Zhou, M. Wen, X. Mo, H. Zhang, Q. Lin, C. Jin, X. Wang, W. Zhang, and Q. Peng. Hammerbench: Fine-grained function-calling evaluation in real mobile device scenarios. arXiv preprint arXiv:2412.16516, 2024.
|
| 289 |
+
|
| 290 |
+
[44] G. Wölfein, D. Ferber, D. Truhn, O. Arandjelović, and J. N. Kather. Llm agents making agent tools. arXiv preprint arXiv:2502.11705, 2025.
|
| 291 |
+
[45] F. Yan, H. Mao, C. C.-J. Ji, T. Zhang, S. G. Patil, I. Stoica, and J. E. Gonzalez. Berkeley function calling leaderboard. 2024.
|
| 292 |
+
[46] J. Yang, A. Prabhakar, S. Yao, K. Pei, and K. R. Narasimhan. Language agents as hackers: Evaluating cybersecurity skills with capture the flag. In Multi-Agent Security Workshop @ NeurIPS'23, 2023. URL https://openreview.net/forum?id=KOZwk7BFc3.
|
| 293 |
+
[47] J. Yang, A. Prabhakar, K. Narasimhan, and S. Yao. Intercode: Standardizing and benchmarking interactive coding with execution feedback. Advances in Neural Information Processing Systems, 36, 2024.
|
| 294 |
+
[48] R. Yang, F. Ye, J. Li, S. Yuan, Y. Zhang, Z. Tu, X. Li, and D. Yang. The lighthouse of language: Enhancing llm agents via critique-guided improvement. arXiv preprint arXiv:2503.16024, 2025.
|
| 295 |
+
[49] S. Yao, N. Shinn, P. Razavi, and K. Narasimhan. Tau-bench: A benchmark for tool-agent-user interaction in real-world domains. arXiv preprint arXiv:2406.12045, 2024.
|
| 296 |
+
[50] F. Yin, Z. Wang, I.-H. Hsu, J. Yan, K. Jiang, Y. Chen, J. Gu, L. T. Le, K.-W. Chang, C.-Y. Lee, H. Palangi, and T. Pfister. Magnet: Multi-turn tool-use data synthesis and distillation via graph translation, 2025. URL https://arxiv.org/abs/2503.07826.
|
| 297 |
+
[51] Y. Zeng, X. Ding, Y. Wang, W. Liu, W. Ning, Y. Hou, X. Huang, B. Qin, and T. Liu. Boosting tool use of large language models via iterative reinforced fine-tuning. arXiv preprint arXiv:2501.09766, 2025.
|
| 298 |
+
[52] J. Zhang, T. Lan, M. Zhu, Z. Liu, T. Hoang, S. Kokane, W. Yao, J. Tan, A. Prabhakar, H. Chen, et al. xlam: A family of large action models to empower ai agent systems. arXiv preprint arXiv:2409.03215, 2024.
|
| 299 |
+
[53] J. Zhang, T. Hoang, M. Zhu, Z. Liu, S. Wang, T. Awalgaonkar, A. Prabhakar, H. Chen, W. Yao, Z. Liu, et al. Actionstudio: A lightweight framework for data and training of action models. arXiv preprint arXiv:2503.22673, 2025.
|
| 300 |
+
[54] K. Zhang, W. Yao, Z. Liu, Y. Feng, Z. Liu, R. Rithesh, T. Lan, L. Li, R. Lou, J. Xu, et al. Diversity empowers intelligence: Integrating expertise of software engineering agents. In The Thirteenth International Conference on Learning Representations, 2024.
|
| 301 |
+
[55] Y. Zhang, J. Lu, and N. Jaitly. Probing the multi-turn planning capabilities of LLMs via 20 question games. In L.-W. Ku, A. Martins, and V. Srikumar, editors, Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1495-1516, Bangkok, Thailand, Aug. 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.82. URL https://aclanthology.org/2024.acl-long.82/.
|
| 302 |
+
[56] Y. Zheng, R. Zhang, J. Zhang, Y. Ye, Z. Luo, Z. Feng, and Y. Ma. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand, 2024. Association for Computational Linguistics. URL http://arxiv.org/abs/2403.13372.
|
| 303 |
+
|
| 304 |
+
# A Benchmarks Description
|
| 305 |
+
|
| 306 |
+
- BFCL v3: It introduces comprehensive evaluation across single-turn, multi-turn, and multi-step function calling scenarios. BFCL v3 evaluates models on their ability to understand user requests, select appropriate functions, generate valid parameters, and interpret function outputs across multiple interaction turns. The benchmark uses a weighted average of different evaluation categories to provide an overall accuracy score.
|
| 307 |
+
- $\tau$ -bench: It measures an agent's ability to interact with simulated human users (powered by language models) and programmatic APIs while following domain-specific policies. $\tau$ -bench emulates dynamic conversations across multiple domains, including retail and airline customer service, requiring agents to maintain context across turns, understand user intents, and follow complex domain-specific rules. The benchmark emphasizes the importance of multi-turn interactions and policy adherence in real-world applications.
|
| 308 |
+
|
| 309 |
+
# B Prompts
|
| 310 |
+
|
| 311 |
+
The prompts used across the various stages of APIGen-MT implemented for $\tau$ -bench are shown here - Task Configuration Generation (Figure 8), Alignment Validation (Figure 9), Final Semantic Review (Figure 10), Trajectory Collection (Figure 11), Stabilized Human Simulation (Figure 12).
|
| 312 |
+
|
| 313 |
+
# Task Configuration Generation Prompt
|
| 314 |
+
|
| 315 |
+
Instructions
|
| 316 |
+
|
| 317 |
+
Generate a task instruction that mimics realistic human users and their intentions, such as with different personality and goals. The task instruction should be followed by 'actions' which is a list of the tool Calls to be taken to solve this task and 'outputs' which is a list of the answers to specific information requests made by the user. Think step by step to come up with the action(s) and the corresponding tool_call(s) translating this thought that would be necessary to fulfill the user's request or solve their intentions. Focus on common retail scenarios following the provided task instruction guidelines.
|
| 318 |
+
|
| 319 |
+
Guidelines for Generating Task Instruction $(q)$ {task_rule $^+$ domain_rule}
|
| 320 |
+
|
| 321 |
+
```java
|
| 322 |
+
```java
|
| 323 |
+
```java
|
| 324 |
+
```java
|
| 325 |
+
```java
|
| 326 |
+
```java
|
| 327 |
+
```java
|
| 328 |
+
```java
|
| 329 |
+
```java
|
| 330 |
+
```java
|
| 331 |
+
```java
|
| 332 |
+
```java
|
| 333 |
+
```java
|
| 334 |
+
```java
|
| 335 |
+
```java
|
| 336 |
+
```java
|
| 337 |
+
```java
|
| 338 |
+
```java
|
| 339 |
+
```java
|
| 340 |
+
```java
|
| 341 |
+
```java
|
| 342 |
+
```java
|
| 343 |
+
```java
|
| 344 |
+
```java
|
| 345 |
+
```java
|
| 346 |
+
```java
|
| 347 |
+
```java
|
| 348 |
+
```java
|
| 349 |
+
```java
|
| 350 |
+
```java
|
| 351 |
+
```java
|
| 352 |
+
```java
|
| 353 |
+
```java
|
| 354 |
+
```java
|
| 355 |
+
``
|
| 356 |
+
|
| 357 |
+
{sampled_user_details}
|
| 358 |
+
|
| 359 |
+
Order Data
|
| 360 |
+
|
| 361 |
+
{sampled Orders}
|
| 362 |
+
|
| 363 |
+
Guidelines for generating Groundtruth Actions $(a_{gt})$
|
| 364 |
+
|
| 365 |
+
1. The main focus is to generate actions that can modify the underlying database.
|
| 366 |
+
|
| 367 |
+
2. For actions that do not modify the database like specific information requests, scan the provided User Data directly and append only the answer in 'outputs' $(o_{gt})$ . Do not make separate tool calls for this in 'actions'.
|
| 368 |
+
3. Include multiple tool calls when the scenario requires multiple steps or modifications.
|
| 369 |
+
4. Provide precise tool calls with all necessary parameters for each action.
|
| 370 |
+
5. Ensure all actions adhere to retail policies and common sense practices.
|
| 371 |
+
|
| 372 |
+
Tools
|
| 373 |
+
|
| 374 |
+
The available tool combination in Python format is as follows: {sampled.tools}
|
| 375 |
+
|
| 376 |
+
Output Format
|
| 377 |
+
|
| 378 |
+
Generate your response according to the following format. Enlose the thought process within <thought></thought> tags, and the final structured response within <answer></answer> tags. The structured response should be in strict JSON format, without any additional comments or explanations.
|
| 379 |
+
|
| 380 |
+
Example Tasks [example]
|
| 381 |
+
|
| 382 |
+
Do not directly copy instruction and the action patterns from the examples. Ground the generation from the above provided data. Generate the task now.
|
| 383 |
+
|
| 384 |
+
Figure 8: Task configuration generation prompt for retail domain of $\tau$ -bench.
|
| 385 |
+
|
| 386 |
+
# Task Alignment Validation Prompt
|
| 387 |
+
|
| 388 |
+
You are an AI judge and your goal is to judge the quality and validity of the provided task object based on the guidelines, following the rubric.
|
| 389 |
+
|
| 390 |
+
# Guidelines
|
| 391 |
+
|
| 392 |
+
- The task object contains an 'intent' $(q)$ from a user, 'actions' $(a_{gt})$ , and 'outputs' $(o_{gt})$ .
|
| 393 |
+
- The 'actions' correspond to the tool Calls made by an AI assistant to satisfy the instruction.
|
| 394 |
+
- A description of the 'tools' available to the AI assistant is provided.
|
| 395 |
+
The diffPatch' is the difference in the database state after the tool Calls are made. It should only reflect changes corresponding to the 'intent'. There should be no extraneous changes. If the diffPatch' is empty, it means that the tool Calls did not change the database state, which is possible if the instruction was to provide information only.
|
| 396 |
+
- Perform a brief reflection on the task based on the below Rubrics.
|
| 397 |
+
- Think step-by-step to generate a score of 0 or 1 for each of these criteria (1 means follows criterion and 0 means does not)
|
| 398 |
+
|
| 399 |
+
# ## Rubric
|
| 400 |
+
|
| 401 |
+
- Correctness: Do the actions $(a_{gt})$ accurately implement the instruction $(q)$ ?
|
| 402 |
+
- Completeness: Is the instruction $(q)$ sufficiently detailed, and is it fully addressed by the actions? (Includes rule-based checks).
|
| 403 |
+
- Satisfaction: Do the expected outputs $(o_{gt})$ fulfill any explicit or implicit information requests within the instruction $(q)$ ?
|
| 404 |
+
- Creativity: Does the task represent a non-trivial, plausible, and potentially interesting scenario within the domain?
|
| 405 |
+
|
| 406 |
+
# ## Task Object
|
| 407 |
+
|
| 408 |
+
{task}
|
| 409 |
+
|
| 410 |
+
Tools in Python format {tools}
|
| 411 |
+
|
| 412 |
+
Diff Patch{diffpatch}
|
| 413 |
+
|
| 414 |
+
Output format <scores>
|
| 415 |
+
|
| 416 |
+
{
|
| 417 |
+
|
| 418 |
+
"reflection": str, <a brief high-level review of the task>
|
| 419 |
+
|
| 420 |
+
"correctness": int, $<0/1>$ ,
|
| 421 |
+
|
| 422 |
+
"completeness": int, $< 0/1>$ ,
|
| 423 |
+
|
| 424 |
+
"satisfaction": int, $< 0/1>$ ,
|
| 425 |
+
|
| 426 |
+
"creativity": int, $<0/1>$ ,
|
| 427 |
+
|
| 428 |
+
"total": int, <total score out of 4>
|
| 429 |
+
|
| 430 |
+
"correction": str, <brief explanation and suggested correction (if needed)>
|
| 431 |
+
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
</scores>
|
| 435 |
+
|
| 436 |
+
Figure 9: Task alignment validation prompt for $\tau$ -bench. This is sent to each LM in the review committee to get their scores, following which we employ majority voting.
|
| 437 |
+
|
| 438 |
+
# Final Semantic Review Prompt
|
| 439 |
+
|
| 440 |
+
You are responsible for analyzing and summarizing feedback from multiple AI judges. Your primary goal is to provide clear, actionable feedback that will help the generator LLM improve its future outputs. You do not evaluate the task directly; instead, you review and grounding the existing feedback from the AI judges.
|
| 441 |
+
|
| 442 |
+
# ## Review Process
|
| 443 |
+
|
| 444 |
+
- Begin by analyzing individual reflections and scores from each judge.
|
| 445 |
+
- Summarize common points of agreement or disagreement.
|
| 446 |
+
- Offer a concise summary of actionable feedback to be sent back to the data generator, which aims to improve the next round of data quality.
|
| 447 |
+
|
| 448 |
+
# Diff Patch
|
| 449 |
+
|
| 450 |
+
{diff_batch}
|
| 451 |
+
|
| 452 |
+
Generated Task Data [task]
|
| 453 |
+
|
| 454 |
+
AI Judges' Feedback [reviews]
|
| 455 |
+
|
| 456 |
+
# Output Format
|
| 457 |
+
|
| 458 |
+
Generate your response according to the following format. Enclose the thought process within ‘<thought></thought>’ tags, and the final summary of actionable feedback within ‘<summary></summary>’ tags.
|
| 459 |
+
|
| 460 |
+
Figure 10: Final semantic review prompt for $\tau$ -bench.
|
| 461 |
+
|
| 462 |
+
# Trajectory Collection Prompt
|
| 463 |
+
|
| 464 |
+
You are a detail-oriented user interacting with an AI agent.
|
| 465 |
+
|
| 466 |
+
Intent
|
| 467 |
+
|
| 468 |
+
{intent}
|
| 469 |
+
|
| 470 |
+
# ## Rules
|
| 471 |
+
|
| 472 |
+
- Generate one line at a time to simulate the user's message.
|
| 473 |
+
- Do not give away all the intent at once. Only provide the information that is necessary for the current step.
|
| 474 |
+
- Do not hallucinate information that is not provided in the intent.
|
| 475 |
+
- If the intent goal is satisfied, generate ‘#####STOP#####’ to end the conversation.
|
| 476 |
+
- Do not repeat the exact intent in the conversation. Instead, use your own words to convey the same information.
|
| 477 |
+
- Try to make the conversation as natural as possible and stick to the personalities in the intent.
|
| 478 |
+
|
| 479 |
+
Figure 11: Trajectory collection prompt for $\tau$ -bench.
|
| 480 |
+
|
| 481 |
+
# BoN User LM Setting Prompt
|
| 482 |
+
|
| 483 |
+
You are a fair judge and an expert in following details.
|
| 484 |
+
|
| 485 |
+
A human is interacting with a retail assistant to get help on solving their task. You are provided with the description of the human and the task the human wants to accomplish (wrapped with <description></description>), and a candidate response (wrapped with <response></response>) the human wants to give the assistant. Please help the human evaluate this candidate response, give an integer score (ranging from 0 to 10) to indicate the correctness of the response, higher score means better quality.
|
| 486 |
+
|
| 487 |
+
1. If the response includes specific item / order / personal details, and they correctly match the task description you should give full score of 10. If there is some change in details, give a corresponding lower score (more incorrect details gets lower score).
|
| 488 |
+
2. The response can include any normal conversation otherwise (e.g. asking details, saying ###STOP) etc. which are all correct responses.
|
| 489 |
+
3. Additionally, if the candidate\_response keeps the conversation flowing by describing the task clearly / gives information properly then give a high score and if not (e.g. "I don't remember" or unhelpful response) should get a corresponding lower score.
|
| 490 |
+
|
| 491 |
+
<description> {description} </description>
|
| 492 |
+
|
| 493 |
+
<response> {response} </response>
|
| 494 |
+
|
| 495 |
+
After scoring using the mentioned guideline, tell me your score, wrap it in $\langle \text{score} \rangle \langle \text{/score} \rangle$ tags.
|
| 496 |
+
|
| 497 |
+
Figure 12: Best-of-N (BoN) User LM setting prompt used in the retail domain of $\tau$ -bench.
|
data/2025/2504_03xxx/2504.03601/images/203ec427caf0475b6de20a1c27e8f5d86efacda5f8e6acae7c169101e65fc728.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/2b8e0d56103bcc42f7476f84e79b2069ef908ea243fa876358afda18c118752d.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/2ef11530635023def2c6bbd7d0095d4770537183662847e5a80478a095b50a2b.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/3199bca066d064c413546149c857fe2287fe8b36fd97a2b514d3efc0fc9c03fe.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/3f64f67378a0b7e6fbf7151b5b45f29a4fe2f91c63b430da9d64017780c1bf53.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/4203782c4a6a25dc3bb32f2d8a1eb074f8aedff6a4e743415259aca7c6bc1c8a.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/62e072e86f40da4411690e9d06828dce77c6e6f95b0fdd8e7b80ea4102d43b8b.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/7152beccd11e88f51abc2a53fae6549e9e5f17e52485b4003e4db45e28116eb6.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/aadc07b5135ebf6aaf609b46288e97e7822c3c262f062f92421cf17449a351d4.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/cd559fb87d18a9d2a2968961d57a1b5b0bc196e8abd6b0ce7684367dc6006373.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/ce6e74d3af5194803e096a95af152e26799e17b4e2a522e5baaefbd2b3266ff7.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/e87359d51cf1bb9b7ca3ead71e055c135bd96e9fe052a91ff53e63f936dd0e2f.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/images/e87c3e3d8cfa887c8723e9d1e30c35a087337b7bff360f8c9c8096b084ddeccd.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03601/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_03xxx/2504.03624/2aef3cf2-63ec-4c64-b6be-6f4154c03023_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_03xxx/2504.03624/2aef3cf2-63ec-4c64-b6be-6f4154c03023_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_03xxx/2504.03624/2aef3cf2-63ec-4c64-b6be-6f4154c03023_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b21cc8f29f76853389404d31e3700d7ad851b3293692ebd183890a70a7d89254
|
| 3 |
+
size 1386485
|
data/2025/2504_03xxx/2504.03624/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_03xxx/2504.03624/images/1cc66296e8a29104cfea243cfb83fc6134021779f2af488d4d5e99bd42d5bcc0.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03624/images/1fecb8a82dcbadd166b9bd03dd08128da3541cc761c11a16af0b819fd4420831.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03624/images/33ea7131b53f28909fe42f8b25ac0b9c3757da5cc268ff15a33fa8ad908acee5.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03624/images/3473a2175257e4fce9156c179b984736feb1dc0ccc692489eb89718fd065bd5c.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03624/images/3639835691b6d953b865fb514ef3de322e2fe0c3e5392ca41115f3dd6ef855f5.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_03xxx/2504.03624/images/37db35014a6a18bfa0a1e31ed569540424dd14958513957d80b9aa4cd1ded650.jpg
ADDED
|
Git LFS Details
|