MinerU Batch 8f46ae6c-07be-4c29-a893-b2f0e7aaabad (Part 4/8)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +8 -0
- data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_content_list.json +0 -0
- data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_model.json +0 -0
- data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_origin.pdf +3 -0
- data/2025/2504_05xxx/2504.05657/full.md +458 -0
- data/2025/2504_05xxx/2504.05657/images/3cc1916acf788294e6c8ae9cfda94679fa734b07114b7b845c6ca02cdef7c997.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/42c4d557aa680975a756269def714eecc57dc1e96b22095e36c26eabbddd4813.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/5e9d3dac3968c2aca3602c7becebdf97db62a94ae78c7df5ff863f2e6c45e0e0.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/60a1d06954731a1d8497f7ad39edfbb82ed60f30aa0d12cd56c49475a8a4119e.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/659129421e45a9fe814172a8f9a34bba84b17527efa124efa6a95150223c258d.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/79157a288ac5f97faf334aba645a2a71ff54ca0a6409da7b5db961540f267480.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/7e69571a4ff0ffaa47a0d2c05ca2240776de844853e960ce84ab1186458a45c6.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/8e23480718b64a36e60a649ebac4e58f7fde3d839dc800ff33cb052003b55e25.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/98b5b166adf5a4085d47308265f5b1d43548aef5667c9a311dea779157979ccf.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/9d42cb0231914a7b71d6b7e6fa59b6b9bb1fdc4444517d6b37eb83903910150a.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/aaf4389be4b7945f02c8bda6ea039fe9c5e53dd5e8bc867b886ce5c647d07ecd.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/b205df6253934c495cd6bf54f649c643eae158455b8a5a57b5838ccd4e16a70f.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/bb3829ac42d9a686c979bc4e9bb61faab459c4bb9e22a76b5267ae278546f1d8.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/bd43c91baeed1799a4c358e11c48f71336db4a8f12d9b56d330407e1dc303c30.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/cdb077715738a859647716de99f3da764ab98bc0bbef54fc6a8d8d889b7cb8ce.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/dabbcf97be8c75c0deb7beb748fdb20e0361404e5c527c4f5e1d38e00b935d08.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/efe1584a963bd86fedb0e112a862eb4e48cd69fc72a15bbe69361f939014ea25.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/images/ff815d3d676fe98054c7f2384cec4ecceab1946ec4936157be8fb0c8b58b53f0.jpg +3 -0
- data/2025/2504_05xxx/2504.05657/layout.json +0 -0
- data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_content_list.json +1546 -0
- data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_model.json +0 -0
- data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_origin.pdf +3 -0
- data/2025/2504_05xxx/2504.05692/full.md +329 -0
- data/2025/2504_05xxx/2504.05692/images/133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/2725a90e3c969c688b8b4aece0822e6dc29099b7a2ee22c8e885d7c14e48523e.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/48af71f3300680b8034c43f62dca0f4717a244f56bdddfab423db451007292d7.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/5ae28c25f828dac5cf5304f158000a3d4b15aaa5c22767f9cce24e599bcd63ea.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/61e6296f3d781a21567857de4b2ea166d7a6b53edff37bc595f168c4d288e0ee.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/77c2053e0882f10da0a9b7e380f417e6d9b57a1125876608e85426222f0b0d45.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/7e0a527c5d772d5170207369443f4492fcc8f881d8ee08801d58a55b0d28fec9.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/80719c660669e2d285012f6663ec105a6137bf97e42c4107de2e9c454bfec2f8.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/80953ea0dc5ca9ee59f95c9853c3873761fadebe7ab58f819e72258d3f71d946.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg +3 -0
- data/2025/2504_05xxx/2504.05692/images/a14c594c62cedd5e32d97e3ea1a176b458e13f8f8cdad1bfc378e63d14129e06.jpg +3 -0
.gitattributes
CHANGED
|
@@ -1292,3 +1292,11 @@ data/2025/2504_05xxx/2504.05862/f5f20bac-767d-4260-82e6-943416d1d631_origin.pdf
|
|
| 1292 |
data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1293 |
data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1294 |
data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1292 |
data/2025/2504_05xxx/2504.05888/a802a693-b787-42ea-9506-1d20b2ad6f02_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1293 |
data/2025/2504_06xxx/2504.06122/1f4b320c-f80c-4d99-b14d-3a49e20634f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1294 |
data/2025/2504_06xxx/2504.06196/00c24729-2bee-4726-a0b4-13a163fe9cf3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1295 |
+
data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1296 |
+
data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1297 |
+
data/2025/2504_05xxx/2504.05730/2fcd628a-662d-422e-89a8-df9b6d69e542_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1298 |
+
data/2025/2504_05xxx/2504.05731/1d2e0a8a-e48a-4cb6-8ec0-35774101eda9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1299 |
+
data/2025/2504_05xxx/2504.05741/97d4b9fa-4704-4c9a-9165-90317d2d4443_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1300 |
+
data/2025/2504_05xxx/2504.05747/ec2428c8-34ac-40c3-9d6a-d16ed6c46317_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1301 |
+
data/2025/2504_05xxx/2504.05979/c3842c4d-63fe-4775-a7ec-8e60138be3cb_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1302 |
+
data/2025/2504_06xxx/2504.06311/24099640-7df8-4696-ab55-8a633ecbda84_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_05xxx/2504.05657/73cf1d18-a4f6-40ed-812d-0ba2712fd755_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c38ac6549dc60cab20aceb88fa67c5cc23a4dc7712c2f6dbe6f108e04e3a8fd2
|
| 3 |
+
size 1717631
|
data/2025/2504_05xxx/2504.05657/full.md
ADDED
|
@@ -0,0 +1,458 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The current version is 'Preprint'.
|
| 2 |
+
|
| 3 |
+
This work has been submitted to the IEEE for possible publication. Copyright may be transferred without notice, after which this version may no longer be accessible.
|
| 4 |
+
|
| 5 |
+
This information aligns with the guidelines available at:
|
| 6 |
+
|
| 7 |
+
https://journals.ieeethorcenter.ieee.org/become-an-iiie-journal-author/publishing-ethics/guidelines-and-policies/post-publication-policies/
|
| 8 |
+
|
| 9 |
+
# Nes2Net: A Lightweight Nested Architecture for Foundation Model Driven Speech Anti-spoofing
|
| 10 |
+
|
| 11 |
+
Tianchi Liu, Student Member, Duc-Tuan Truong, Student Member, Rohan Kumar Das, Senior Member, Kong Aik Lee, Senior Member, Haizhou Li, Fellow
|
| 12 |
+
|
| 13 |
+
Abstract—Speech foundation models have significantly advanced various speech-related tasks by providing exceptional representation capabilities. However, their high-dimensional output features often create a mismatch with downstream task models, which typically require lower-dimensional inputs. A common solution is to apply a dimensionality reduction (DR) layer, but this approach increases parameter overhead, computational costs, and risks losing valuable information. To address these issues, we propose Nested Res2Net (Nes2Net), a lightweight back-end architecture designed to directly process high-dimensional features without DR layers. The nested structure enhances multi-scale feature extraction, improves feature interaction, and preserves high-dimensional information. We first validate Nes2Net on CtrSVDD, a singing voice deepfake detection dataset, and report a $22\%$ performance improvement and an $87\%$ back-end computational cost reduction over the state-of-the-art baseline. Additionally, extensive testing across four diverse datasets: ASVspoof 2021, ASVspoof 5, PartialSpoof, and In-the-Wild, covering fully spoofed speech, adversarial attacks, partial spoofing, and real-world scenarios, consistently highlights Nes2Net's superior robustness and generalization capabilities. The code package and pre-trained models are available at https://github.com/Liu-Tianchi/Nes2Net.
|
| 14 |
+
|
| 15 |
+
Index Terms—DeepFake detection, speech anti-spoofing, Res2Net, Nes2Net, SSL, speech foundation model
|
| 16 |
+
|
| 17 |
+
# I. INTRODUCTION
|
| 18 |
+
|
| 19 |
+
SPEECH foundation models, such as wav2vec 2.0 [1], HuBERT [2], and WavLM [3], have revolutionized speech processing by leveraging large-scale pretraining to capture complex acoustic and linguistic patterns [4]. This has driven notable advances in automatic speech recognition (ASR) [5], speaker verification (SV) [6], and other speech applications.
|
| 20 |
+
|
| 21 |
+
Beyond traditional tasks, speech foundation models also show great promise in addressing critical security concerns, particularly speech anti-spoofing (also referred to as deepfake detection) [7]. With the growing sophistication of spoofing techniques, such as voice conversion, ensuring the reliability
|
| 22 |
+
|
| 23 |
+
Tianchi Liu and Haizhou Li are with the Department of Electrical and Computer Engineering, National University of Singapore, Singapore. Tianchi Liu is also with LIGHTSPEED, Singapore (email: tianchi.liu@u.nus.edu);
|
| 24 |
+
|
| 25 |
+
Duc-Tuan Truong is with the Nanyang Technological University, Singapore (email: truongdu001@e.ntu.edu.sg);
|
| 26 |
+
|
| 27 |
+
Rohan Kumar Das is with the Fortemedia Singapore, Singapore (email: ecerohan@gmail.com);
|
| 28 |
+
|
| 29 |
+
Kong Aik Lee is with the Department of Electrical and Electronic Engineering and the Research Centre for Data Science & Artificial Intelligence, The Hong Kong Polytechnic University, Hong Kong (e-mail: kongaik.lee@polyu.edu.hk);
|
| 30 |
+
|
| 31 |
+
Haizhou Li is also with the Shenzhen Research Institute of Big Data, School of Artificial Intelligence, School of Data Science, The Chinese University of Hong Kong, Shenzhen, China (email: haizhouli@cuhk.edu.cn).
|
| 32 |
+
|
| 33 |
+
and security of speech-driven systems has become a pressing concern [8]–[12]. Leveraging the rich representations of these foundation models could significantly improve the robustness and generalization of anti-spoofing systems [13]–[15].
|
| 34 |
+
|
| 35 |
+
While speech foundation models offer exceptional representations, their high-dimensional feature outputs present significant challenges for downstream tasks. Downstream models used in tasks like speech anti-spoofing typically require lower-dimensional features [15]–[17]. To address this mismatch, a common approach is to introduce a dimensionality reduction (DR) layer, usually implemented as a fully connected (FC) layer for transforming high-dimensional features into lower-dimensional features. However, this conventional strategy presents notable drawbacks. Given that downstream classifiers are typically compact [15], [16], the DR layer alone often consumes a substantial portion of the parameters and computational resources within the entire back-end model. Moreover, directly projecting high-dimensional features in a one-shot manner through an FC layer leads to the loss of important information, reducing the effectiveness of speech foundation models. These issues highlight the need for a more efficient and effective solution to bridge the dimensionality gap and fully utilize speech foundation models in downstream tasks.
|
| 36 |
+
|
| 37 |
+
To address these challenges, we propose Nested Res2Net (Nes2Net) to process high-dimensional features from speech foundation models, eliminating the need for a DR layer while preserving the richness of the original representations. By addressing key limitations of DR layers, such as excessive computational cost and information loss, Nes2Net offers a more efficient and effective solution. This design makes it particularly suitable for tasks requiring a balance of high performance and efficiency, such as speech anti-spoofing. The key contributions of this work can be summarized as follows:
|
| 38 |
+
|
| 39 |
+
- Novel Architecture: We introduce Nes2Net, a new approach that effectively addresses the limitations of DR layers. Nes2Net retains the expressive power of high-dimensional features while reducing model complexity.
|
| 40 |
+
- Enhanced Performance, Efficiency, and Generalization: Our method demonstrates a $22\%$ performance gain and an $87\%$ reduction in computational costs compared to the state-of-the-art baselines on the CtrlSVDD dataset. Further experiments conducted on four additional datasets across various scenarios demonstrate strong generalization capability and consistently superior performance.
|
| 41 |
+
- Reproducibility: To facilitate further research and application, we make our scripts and pre-trained models publicly available.
|
| 42 |
+
|
| 43 |
+
# II. RELATED WORK
|
| 44 |
+
|
| 45 |
+
# A. Res2Net
|
| 46 |
+
|
| 47 |
+
Res2Net [18] is a well-known architecture designed to extract multi-scale features. Unlike ResNet [19], Res2Net uses hierarchical residual connections within a single block, allowing it to capture patterns across varying receptive fields simultaneously [18]. This design offers proven advantages in speech-related tasks, such as SV [20]–[22] and anti-spoofing [23]–[25], where capturing subtle variations and complex acoustic patterns is important. As shown in Fig. 1, Res2Net (highlighted using a light red block) can also serve as a classifier within a speech foundation model-based anti-spoofing system. Its ability to extract multi-scale features has led to superior performance over conventional models and motivates the design of Nested Res2Net in this work.
|
| 48 |
+
|
| 49 |
+
# B. Hand-crafted Feature-based Speech Anti-Spoofing Models
|
| 50 |
+
|
| 51 |
+
Hand-crafted acoustic features (such as MFCC) are common choices for many earlier speech anti-spoofing systems. These systems have evolved to effectively detect speech deepfakes [26], [27]. For instance, the Channel-wise Gated Res2Net (CG-Res2Net) [23] introduces a gating mechanism within the Res2Net architecture, enabling dynamic selection of channel-wise features to enhance generalization to unseen attacks. A widely recognized model is AASIST [26], which employs spectro-temporal graph attention layers to capture both temporal and spectral artifacts, thereby achieving efficient and accurate detection. Given AASIST's SOTA performance and its wide adoption in recent anti-spoofing challenges [16], [28], we consider it as our main baseline for evaluation.
|
| 52 |
+
|
| 53 |
+
# C. Speech Foundation Models
|
| 54 |
+
|
| 55 |
+
Speech foundation models are often referred to as Self-Supervised Learning (SSL) models due to their typical pretraining on large amounts of unlabeled speech data using self-supervised learning techniques. Examples include wav2vec 2.0 [1], HuBERT [2], and WavLM [3]. Unlike hand-crafted acoustic features, which are limited in their ability to adapt to diverse and complex conditions, self-supervised learning (SSL) models learn rich and generalized speech representations that can be effectively adapted to various downstream applications. This allows them to achieve superior performance in speech-related tasks, including speech anti-spoofing.
|
| 56 |
+
|
| 57 |
+
# D. Speech Foundation Model-based Anti-spoofing
|
| 58 |
+
|
| 59 |
+
As discussed in the previous subsection, speech foundation models can capture more informative representations than handcrafted or raw acoustic features [3]. This makes them highly effective for speech anti-spoofing, as they generalize well across datasets and are more robust to unseen attacks [15]. As a result, many recent anti-spoofing systems increasingly adopt these models as front-ends, feeding their features to the back-end classifiers and consistently outperforming traditional models [16], [29], [30].
|
| 60 |
+
|
| 61 |
+
To connect these powerful front-end models to downstream classifiers, a feature aggregation layer is introduced, as shown
|
| 62 |
+
|
| 63 |
+
TABLEI CONTRIBUTION OF THE DR LAYER ON THE NUMBER OF PARAMETERS AND COMPUTATIONAL COST IN BACK-END MODELS. MMACS STANDS FOR MILLION MULTIPLY-ACCUMULATE OPERATIONS.
|
| 64 |
+
|
| 65 |
+
<table><tr><td rowspan="2">Back-end Model</td><td colspan="3">Parameters</td><td colspan="3">MMACs</td></tr><tr><td>DR</td><td>Total</td><td>%</td><td>DR</td><td>Total</td><td>%</td></tr><tr><td>ResNet [19]</td><td>131k</td><td>611k</td><td>21%</td><td>26.24</td><td>70.62</td><td>37%</td></tr><tr><td>Res2Net [18]</td><td>131k</td><td>452k</td><td>29%</td><td>26.24</td><td>64.93</td><td>40%</td></tr><tr><td>ECAPA [34]</td><td>131k</td><td>497k</td><td>26%</td><td>26.24</td><td>80.21</td><td>33%</td></tr><tr><td>AASIST [26]</td><td>131k</td><td>447k</td><td>29%</td><td>26.24</td><td>707.65</td><td>4%</td></tr></table>
|
| 66 |
+
|
| 67 |
+
in Fig. 1. This layer combines features from different SSL layers using methods such as a simple weighted sum or attention-based methods like Squeeze-and-Excitation Aggregation (SEA) [16] and Attentive Merging (AttM) [31].
|
| 68 |
+
|
| 69 |
+
Following the aggregation layer, the resulting features are passed to the back-end classifier, as shown in the green box of Fig. 1. Existing methods typically use a DR layer, which reduces the high-dimensional features of $N$ channels (commonly $N = 1024$ [1], [3], [32]) to a lower dimension $D$ (e.g., $D = 128$ [15], [16] or $D = 144$ [17], [33]) to match the classifier's input requirements. The classifier model then extracts features from the DR layer outputs and produces the final score. As illustrated in the red box of Fig. 1, commonly used classifier structures include traditional models such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26].
|
| 70 |
+
|
| 71 |
+
The strong performance of these systems stems from their ability to capture rich speech representations, enabling more accurate distinction between real and spoofed speech. As a result, these systems have achieved SOTA results [33], [35], [36], especially in recent challenges like ASVspoof 5 [28], [37], CtrSVDD [16], [38], [39], and ADD [40]. However, the use of a DR layer introduces challenges that limit the backend's ability to fully leverage the rich representations from speech foundation models. In this work, we aim to better unlock the potential of foundation models for speech antispoofing. These issues will be discussed in the next subsection.
|
| 72 |
+
|
| 73 |
+
# E. Limitation of Dimensionality Reduction Layer
|
| 74 |
+
|
| 75 |
+
Existing speech foundation model-based anti-spoofing systems excel in extracting rich, high-dimensional feature representations, which capture intricate patterns in speech. However, this high dimensionality poses a significant challenge for downstream tasks. Models in these tasks typically require lower-dimensional features [23], [26], [27], creating a mismatch between the output features of the foundation models and the requirements of downstream processing.
|
| 76 |
+
|
| 77 |
+
A commonly used approach for dimensionality reduction is to employ a DR layer. However, this approach has several issues, including parameter overhead and potential information loss. As shown in Table I, our analysis of back-end models further emphasizes the inefficiency of this approach. We consider commonly used feature dimensions of $N = 1024$ from large models [1], [3], and a reduced dimension of $D = 128$ , widely adopted in SOTA back-end models [15], [16], [31].
|
| 78 |
+
|
| 79 |
+
Across various back-end models, the DR layer, despite being just a single layer, consistently accounts for a substantial
|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
Fig. 1. The block diagram of the speech foundation model-based speech anti-spoofing system, showcasing both the traditional back-end models and the proposed Nes2Net back-end. The traditional back-end models include a DR layer and a classifier, such as ResNet [19], Res2Net [18], ECAPA-TDNN [34], and AASIST [26]. In contrast, the proposed Nes2Net back-end model features a DR layer-free design. Additionally, an enhanced version of its nested layer, named Nes2Net-X, is introduced to further improve performance. Abbreviations used in the figure include: 'FC' (fully connected layer), 'Conv' (convolutional layer), 'WS' (weighted sum), 'SE' (squeeze-and-excitation module) [41], and 'Att. Stat. Pool.' (attentive statistics pooling) [42].
|
| 83 |
+
|
| 84 |
+
share of parameters and computational cost, underscoring its resource-intensive nature. For instance, the DR layer accounts for $21\%$ to $29\%$ of the parameters across ResNet, Res2Net, ECAPA, and AASIST. In terms of computational cost, the DR layer generally contributes at least one-third of the total MACs. AASIST is the only exception, where the DR layer accounts for just $4\%$ of the MACs, primarily because its overall MAC count is an order of magnitude higher than that of other models.
|
| 85 |
+
|
| 86 |
+
This table highlights that a single DR layer significantly inflates the back-end model's size and resource demands. Furthermore, its direct projection design discards important high-dimensional features, limiting the overall potential of speech foundation models.
|
| 87 |
+
|
| 88 |
+
# III. METHODOLOGY
|
| 89 |
+
|
| 90 |
+
# A. Proposed Nested Res2Net (Nes2Net)
|
| 91 |
+
|
| 92 |
+
The design of Nes2Net is driven by two primary objectives: 1) effectively and efficiently utilizing the high-dimensional features from speech foundation models, and 2) enhancing multi-scale feature extraction to achieve robust generalization in speech anti-spoofing tasks. These objectives are realized through a novel nested architecture that simultaneously improves the efficiency, flexibility, and robustness of the model.
|
| 93 |
+
|
| 94 |
+
Efficiency and Retention of Rich Feature Information: The analysis in Section II-E reveals the limitations of employing the DR layer. Building upon the observations, Nes2Net entirely removes the DR layer, directly processing high-dimensional features to retain their intrinsic richness and minimize unnecessary computational costs. By bypassing the DR layer, Nes2Net prevents the information bottleneck typically caused by early dimensionality reduction. This ensures the preservation of detailed representations essential for accurately distinguishing genuine speech from spoofed audio.
|
| 95 |
+
|
| 96 |
+
Enhanced Multi-Scale Feature Interaction and Expressiveness: While the Res2Net architecture effectively extracts multi-scale features through hierarchical splits, it exhibits significant limitations when processing high-dimensional features directly, especially with large split scales $s$ . Specifically, Res2Net suffers from feature dilution [18], redundant transformations [43], and restricted interactions among channels. Excessive splitting fragments the features, weakening their expressiveness, and repetitive transformations increase computational redundancy, potentially causing overfitting. Moreover, closely related information can be distributed across non-adjacent subsets, limiting effective cross-channel interactions.
|
| 97 |
+
|
| 98 |
+
To overcome these limitations, as illustrated in Fig. 1, we propose a novel Nested Res2Net (Nes2Net) architecture that introduces a hierarchical nesting structure. This additional degree of flexibility significantly enhances the model's representational capability. Each nested layer progressively refines features by building upon outputs from preceding layers and also incorporates efficient local cross-channel attention mechanisms [44], [45], strengthening interactions across channels. This holistic feature extraction approach enables Nes2Net to comprehensively capture intricate speech patterns. Moreover, the cumulative refinement effectively mitigates the issue of feature dilution, preserving rich and expressive multi-scale information. Benefiting from the structural advantages of the nesting strategy, the need for excessive fine-grained splits is reduced, effectively mitigating redundant transformations. This approach also minimizes unnecessary computations, resulting in a compact yet highly expressive model.
|
| 99 |
+
|
| 100 |
+
Critically, overfitting is a well-known challenge in speech anti-spoofing tasks, often leading to degraded performance in cross-domain scenarios. Previous studies [23], [26], particularly with compact models like AASIST and Res2Net (both with fewer than 500k parameters), have shown that smaller models can help reduce overfitting. Our experiments with these
|
| 101 |
+
|
| 102 |
+
models confirm that simply increasing their size does not always lead to better performance and can, in fact, make overfitting worse. As a result, improving feature quality through smarter model structure design becomes more important than just scaling up the model. The nested architecture of Nes2Net provides clear benefits as it maintains computational efficiency while reducing the risk of overfitting.
|
| 103 |
+
|
| 104 |
+
The Nes2Net consists of an outer layer and several identical nested layers, described as follows:
|
| 105 |
+
|
| 106 |
+
1) Outer Layer: The outer layer of Nes2Net adopts a structure similar to that of Res2Net. The high-dimensional features produced by a speech foundation model are uniformly split into $s_1$ feature map subsets, denoted by $x_i$ , where $i \in \{1, 2, \dots, s_1\}$ . Each feature subset $x_i$ has the same spatial size but contains only $\frac{1}{s_1}$ of the channels of the input feature map. With the exception of $x_1$ , each $x_i$ is paired with a corresponding nested layer, denoted by $\mathbf{K}_i(\cdot)$ . The output of $\mathbf{K}_i(\cdot)$ , represented as $y_i$ , is computed as follows:
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
y _ {i} = \left\{ \begin{array}{l l} x _ {i} & i = 1; \\ \mathbf {K} _ {i} \left(x _ {i}\right) & i = 2; \\ \mathbf {K} _ {i} \left(x _ {i} + y _ {i - 1}\right) & 2 < i \leq s _ {1}. \end{array} \right. \tag {1}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
where $x_{i}$ is first added to the output of $\mathbf{K}_{i - 1}(\cdot)$ , and the resulting feature map is then fed into $\mathbf{K}_i(\cdot)$ for further processing. All $y_{i}$ features are concatenated along the channel dimension. Due to the combinatorial explosion effect [18], the output features encapsulate a fusion of receptive field characteristics across different scales and frame levels. These features are then pooled along the time axis to convert frame-level features into utterance-level representations, which are subsequently used to compute the final classification score.
|
| 113 |
+
|
| 114 |
+
It is worth noting that since the outer layer directly processes high-dimensional features from the speech foundation model, the original two convolutional layers (kernel size of 1) used before splitting and after concatenation in Res2Net are removed to improve efficiency.
|
| 115 |
+
|
| 116 |
+
2) Nested Layer: The nested layer acts as the core module responsible for processing the outer layer's intermediate features, denoted by $x_{i}^{\prime}$ , where $i \in \{2, \ldots, s_1\}$ . Based on Eq. 1, $x_{i}^{\prime}$ is defined as:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
x _ {i} ^ {\prime} = \left\{ \begin{array}{l l} x _ {i} & i = 2; \\ x _ {i} + y _ {i - 1} & 2 < i \leq s _ {1}. \end{array} \right. \tag {2}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
Each nested layer $\mathbf{K}_i(\cdot)$ is designed to extract multi-scale representations from its input while maintaining computational efficiency. As shown in Fig. 1, the structure of $\mathbf{K}_i(\cdot)$ follows a SE-Res2Net-like design, but its input is the feature subset $x_i'$ from the outer layer of Nes2Net. Specifically, each nested layer consists of the following components:
|
| 123 |
+
|
| 124 |
+
Convolutional Layers: The input feature map is first processed by a convolutional layer with a kernel size of 1 to extract local features while preserving the spatial dimensions.
|
| 125 |
+
|
| 126 |
+
Multi-Scale Feature Extraction: To enable multi-scale processing, the input feature map $x_{i}^{\prime}$ is equally split into $s_2$ subsets along the channel dimension, denoted by $x_{i,j}^{\prime}$ , where $j \in \{1, 2, \ldots, s_2\}$ . Each subset undergoes separate
|
| 127 |
+
|
| 128 |
+
transformations through convolutional operations $\mathbf{M}_j$ with varying receptive fields, yielding $y_{i,j}$ , formulated as:
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
y _ {i, j} = \left\{ \begin{array}{l l} x _ {i, j} ^ {\prime} & j = 1; \\ \mathbf {M} _ {j} \left(x _ {i, j} ^ {\prime} + y _ {i, j - 1}\right) & 1 < j \leq s _ {2}. \end{array} \right. \tag {3}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
These transformed subsets are then concatenated to form the output $y_{i}$ of the nested layer.
|
| 135 |
+
|
| 136 |
+
SE Module: To further enhance the feature representations, a Squeeze-and-Excitation (SE) module is integrated into each nested layer. The SE module adaptively recalibrates channelwise features to emphasize informative features and suppress less relevant ones [41].
|
| 137 |
+
|
| 138 |
+
Residual Connections: To enhance gradient flow and stabilize training, a residual connection is applied by adding the input of $x_{i}^{\prime}$ to its output $y_{i}$ . This design preserves the original information while incorporating newly learned features.
|
| 139 |
+
|
| 140 |
+
In summary, the nested layer is lightweight, highly efficient, and designed to improve robustness and generalization across diverse conditions.
|
| 141 |
+
|
| 142 |
+
# B. Enhanced Nested Res2Net (Nes2Net-X)
|
| 143 |
+
|
| 144 |
+
Nes2Net efficiently addresses the high-dimensional feature issue. However, it relies on an additive combination method within the nested layer, which may limit the flexibility and effectiveness of feature extraction, as it implicitly assigns equal importance to all features. To further enhance the representational capacity of Nes2Net, we propose an improved variant named Nes2Net-X. It replaces the original addition operation in the nested layer with a concatenation followed by a learnable weighted summation. This design explicitly preserves feature subset individuality before fusion and employs learnable weights to adaptively combine these subsets. The Nes2Net-X consists of the following components:
|
| 145 |
+
|
| 146 |
+
Feature Splitting and Processing: This component is the same as that in Nes2Net nested layer. The input feature $x_{i}^{\prime}$ is equally split into $s_2$ subsets along the channel dimension, denoted by $x_{i,j}^{\prime}$ , where $j \in \{1, 2, \dots, s_2\}$ . Each subset $x_{i,j}^{\prime}$ undergoes a convolutional operation to extract feature representations.
|
| 147 |
+
|
| 148 |
+
Feature Concatenation: The outputs of the convolutional layers are denoted as $z_{i,j}$ . In Nes2Net-X, instead of summing the processed features as in the Nes2Net, each current subset $x_{i,j}^{\prime}$ is concatenated with the previous output $z_{i,j-1}$ along a newly introduced dimension before being processed.
|
| 149 |
+
|
| 150 |
+
Weighted Sum: The additional dimension created during concatenation is merged back into the original feature space using a 'weighted sum' operation. This operation enables the model to dynamically assign importance to each subset, enhancing feature representation. For each subset, the 'weighted sum' is applied to the output feature $z_{i,j}$ of the convolutional layer. Let $w_{i,j}$ denote the learnable weights assigned to each concatenated feature. The output $y_{i,j}$ of the 'weighted sum' is computed as:
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
y _ {i, j} = \sum_ {k = 1} ^ {s} w _ {i, j, k} \cdot z _ {i, j, k} \tag {4}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
where $s$ denotes the number of subsets, $w_{i,j,k}$ represents the weight for the $k$ -th subset features $z_{i,j,k}$ .
|
| 157 |
+
|
| 158 |
+
The weighted summation provides more flexible and effective feature integration, offering several advantages:
|
| 159 |
+
|
| 160 |
+
- Enhanced Feature Diversity: By concatenating features across subsets, the network captures a richer set of features, encompassing various aspects of the input data.
|
| 161 |
+
- Learnable Feature Fusion: The introduction of learnable weights $w$ enables the model to prioritize more informative features, effectively suppressing less relevant ones. This adaptive mechanism allows the network to focus on the most discriminative features for the task.
|
| 162 |
+
- Improved Gradient Flow: By combining concatenation with weighted summation, the model facilitates better gradient propagation during training. This helps address potential issues such as vanishing or exploding gradients, leading to more stable and efficient learning.
|
| 163 |
+
|
| 164 |
+
These modifications enable Nes2Net-X to retain the strengths of Nes2Net while introducing greater flexibility in feature fusion, ultimately improving performance.
|
| 165 |
+
|
| 166 |
+
# IV. EXPERIMENTAL SETUPS
|
| 167 |
+
|
| 168 |
+
# A. Datasets
|
| 169 |
+
|
| 170 |
+
TABLE II AN SUMMARY OF THE DATASETS USED IN OUR EXPERIMENTS.
|
| 171 |
+
|
| 172 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Spoofing Type</td><td colspan="3">Number of Samples</td></tr><tr><td>Train</td><td>Valid</td><td>Test</td></tr><tr><td>CtrSVDD w/o ACEsinger bona fide [46]</td><td rowspan="2">Singing Voice</td><td rowspan="2">84,404</td><td rowspan="2">43,625</td><td>64,734</td></tr><tr><td>CtrSVDD w/ ACEsinger bona fide [46]</td><td>67,579</td></tr><tr><td>ASVspoof 2019 [47]</td><td></td><td>25,380</td><td>24,844</td><td>-</td></tr><tr><td>ASVspoof 2021 LA [48]</td><td></td><td>-</td><td>-</td><td>181,566</td></tr><tr><td>ASVspoof 2021 DF [48]</td><td>Speech</td><td>-</td><td>-</td><td>611,829</td></tr><tr><td>ASVspoof 5 [49]</td><td></td><td>182,357</td><td>140,950</td><td>680,774</td></tr><tr><td>In-the-Wild [50]</td><td></td><td>-</td><td>-</td><td>31,779</td></tr><tr><td>PartialSpoof [51]</td><td>Partial Spoof</td><td>25,380</td><td>24,844</td><td>71,237</td></tr></table>
|
| 173 |
+
|
| 174 |
+
We use five datasets across various scenarios, including singing voice deepfake, fully spoofed speech, adversarial attacks, and partially spoofed speech, to evaluate the performance of the proposed model. Singing voice deepfake detection (SVDD) is a growing area of interest in the research community [46], [52], [53]. The CtrlSVDD dataset [46], [52] offers structured attack types and official evaluation protocols, making it suitable for systematic architecture exploration. As a newly collected resource, it captures recent spoofing techniques, providing a more challenging and relevant benchmark for modern anti-spoofing systems. We therefore adopt it as a representative example. Moreover, fully spoofed speech is the most studied category. In this work, we include two categories of datasets: (1) the ASVspoof series, which comprises ASVspoof 2019 [47], ASVspoof 2021 Logical Access (LA), ASVspoof 2021 Deepfake (DF) [48], and ASVspoof 5 [49]; and (2) the In-the-Wild dataset [50], which reflects real-world usage scenarios. Partially spoofed speech alters only part of an utterance to convey deceptive meaning. This emerging challenge has attracted growing attention. We use the PartialSpoof [51] dataset as a representative benchmark. Table II summarizes the datasets used in this study. Models are trained on the training set and validated on the validation set to select the best checkpoint for testing.
|
| 175 |
+
|
| 176 |
+
For CtrlSVDD [46], we report results on two official test protocols, according to whether ACESinger bona fide samples are included. The 'A14' attack type of the CtrlSVDD dataset is excluded following the official guidelines [46]. ASVspoof 2019 [47] is used only for training and validation, while the In-the-Wild [50], ASVspoof 2021 LA and DF [48] datasets are used only for testing. For the recently released ASVspoof 5 dataset [49], we use its train, development, and evaluation partitions for model training, validation, and testing, respectively. For PartialSpoof [51], we follow the standard partitioning into train, development, and evaluation sets.
|
| 177 |
+
|
| 178 |
+
# B. Training Strategies
|
| 179 |
+
|
| 180 |
+
Each experiment is run three times using different random seeds. We report both the result from the best-performing run and the average performance across all runs. The values of $s_1$ and $s_2$ are both set to 8 for Nes2Net and Nes2Net-X. The baseline systems for each dataset are built using SOTA models, and our proposed model adopts similar training strategies. The details are as follows:
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
Fig. 2. The cyclic learning rate schedule using cosine annealing.
|
| 184 |
+
|
| 185 |
+
CtSVDD: For the models trained on the CtrSVDD dataset [46], [52], we follow the baseline system from $[16]^1$ . Following the setting in [16], we use a random seed of 42 to ensure reproducibility. Furthermore, due to the inherent stochasticity in deep learning, repeated runs are necessary to obtain reliable average results. We use the AdamW optimizer with batch size 34, an initial learning rate of $1 \times 10^{-6}$ , and weight decay of $1 \times 10^{-4}$ . The learning rate is scheduled using cosine annealing with a cycle to a minimum of $1 \times 10^{-9}$ .
|
| 186 |
+
|
| 187 |
+
As shown in Fig. 2, over 75 training epochs, we select checkpoints from the epoch with the minimum learning rate, as well as its preceding and following epochs, for validation. The best validation result is then used for testing. We use binary focal loss [54], a generalization of binary cross-entropy loss, with a focusing parameter $(\gamma)$ of 2 and a positive class weight $(\alpha)$ of 0.25. To standardize input length, each sample is randomly cropped or padded to 4 seconds during training. We adopt the Rawboost 'parallel: $(1)+(2)$ ' data augmentation strategy [55], as explored in [16]. WavLM is used as the frontend model for this dataset. The pre-trained and implementation of WavLM are obtained from S3PRL<sup>2</sup>.
|
| 188 |
+
|
| 189 |
+
ASVspoof 2019 & 2021: For the models trained on the ASVspoof 2019 [47] dataset, we follow the baseline system proposed in $[15]^3$ . Audio data are cropped or concatenated to create segments of approximately 4 seconds in duration (64,600 samples) for both training and testing. We use the
|
| 190 |
+
|
| 191 |
+
$^{1}$ https://github.com/Anmol2059/SVDD2024
|
| 192 |
+
$^{2}$ https://github.com/s3prl/s3prl
|
| 193 |
+
<sup>3</sup>https://github.com/TakHemlata/SSL_Anti-spoofing
|
| 194 |
+
|
| 195 |
+
TABLE III PERFORMANCE IN EER $(\%)$ ON THE CTRSVDD EVALUATION SET [46] WITH WAVLM [3] FRONT-END. RESULTS ARE SHOWN AS 'BEST (MEAN)' OVER 3 RUNS. PARAMETERS. AND MMACs REFER TO NUMBER OF PARAMETERS AND MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. W/O AND W/ ACE B.F. REFER TO 'WITHOUT' AND 'WITH' ACESINGER BONA FIDE SAMPLES, RESPECTIVELY. ATTACK-SPECIFIC EERS ARE COMPUTED UNDER THE 'W/O ACE B.F' CONDITION. BEST RESULTS ARE IN BOLD; SECOND-BEST ARE UNDERlined.' $\dagger$ DENOTES IMPLEMENTATION CONDUCTED BY US.
|
| 196 |
+
|
| 197 |
+
<table><tr><td rowspan="2">Back-end</td><td rowspan="2">Params.</td><td rowspan="2">MMACs</td><td colspan="5">EER of Different Attack Types</td><td colspan="2">Pooled EER</td></tr><tr><td>A9</td><td>A10</td><td>A11</td><td>A12</td><td>A13</td><td>w/o ACE. B.F.</td><td>w/ ACE. B.F.</td></tr><tr><td>XWSB [39] *</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.32</td></tr><tr><td>SLS [39]</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.59</td></tr><tr><td>AASIST (C=32) [16]</td><td>447k</td><td>707.65</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>2.70</td></tr><tr><td>AASIST Light (C=24) †</td><td>159k</td><td>91.35</td><td>1.27 (1.37)</td><td>0.87 (1.00)</td><td>5.44 (5.86)</td><td>4.84 (5.65)</td><td>0.98 (1.05)</td><td>3.95 (4.35)</td><td>3.41 (3.77)</td></tr><tr><td>AASIST Standard(C=32) †</td><td>447k</td><td>707.65</td><td>1.18 (1.28)</td><td>0.73 (0.86)</td><td>3.63 (3.86)</td><td>5.65 (5.77)</td><td>0.88 (1.00)</td><td>3.30 (3.36)</td><td>2.79 (2.89)</td></tr><tr><td>AASIST Large(C=40) †</td><td>662k</td><td>1,091.28</td><td>1.32 (1.37)</td><td>0.87 (0.97)</td><td>3.70 (3.96)</td><td>5.04 (5.63)</td><td>0.96 (1.06)</td><td>3.19 (3.36)</td><td>2.71 (2.94)</td></tr><tr><td>AASIST XL(C=48) †</td><td>835k</td><td>1,555.56</td><td>1.23 (1.36)</td><td>0.76 (0.92)</td><td>3.40 (4.64)</td><td>4.93 (5.55)</td><td>0.89 (1.06)</td><td>3.12 (3.62)</td><td>2.76 (3.18)</td></tr><tr><td>AASIST XXL(C=56) †</td><td>1,087k</td><td>2,104.57</td><td>0.96 (1.20)</td><td>0.66 (0.84)</td><td>3.86 (4.15)</td><td>4.83 (5.43)</td><td>0.75 (0.95)</td><td>3.05 (3.43)</td><td>2.65 (2.95)</td></tr><tr><td>ResNet †</td><td>611k</td><td>70.62</td><td>1.18 (1.21)</td><td>0.80 (0.93)</td><td>3.97 (5.06)</td><td>4.60 (4.86)</td><td>0.96 (1.03)</td><td>3.11 (3.61)</td><td>2.74 (3.17)</td></tr><tr><td>Res2Net †</td><td>452k</td><td>64.93</td><td>1.26 (1.37)</td><td>0.83 (0.86)</td><td>3.59 (4.08)</td><td>4.45 (4.80)</td><td>1.08 (1.09)</td><td>3.02 (3.24)</td><td>2.61 (2.78)</td></tr><tr><td>ECAPA-TDNN (C=128) †</td><td>497k</td><td>80.21</td><td>1.18 (1.39)</td><td>0.67 (0.85)</td><td>4.47 (5.84)</td><td>4.63 (4.96)</td><td>0.87 (1.04)</td><td>3.19 (3.74)</td><td>2.79 (3.30)</td></tr><tr><td>Proposed Nes2Net</td><td>511k</td><td>58.11</td><td>1.23 (1.34)</td><td>0.76 (0.81)</td><td>2.40 (2.43)</td><td>5.00 (5.24)</td><td>0.96 (0.99)</td><td>2.53 (2.55)</td><td>2.22 (2.27)</td></tr><tr><td>Proposed Nes2Net-X</td><td>511k</td><td>91.35</td><td>1.21 (1.23)</td><td>0.63 (0.76)</td><td>2.09 (2.32)</td><td>4.99 (5.24)</td><td>0.83 (0.92)</td><td>2.48 (2.51)</td><td>2.20 (2.24)</td></tr></table>
|
| 198 |
+
|
| 199 |
+
$\text{※}$ XWSB is an ensemble-like model that combine two SSL front-ends [39], while all other models in Table III are based on single SSL front-end.
|
| 200 |
+
|
| 201 |
+
Adam optimizer [56] with a weight decay of $1 \times 10^{-4}$ . To reproduce the AASIST baseline [15], we reduce the original batch size from 14 to 8 due to GPU memory constraints, and halve the learning rate from $1 \times 10^{-6}$ to $5 \times 10^{-7}$ . For Nes2Net, benefiting from its lower GPU memory consumption, we use a batch size of 12 with a learning rate of $2.5 \times 10^{-7}$ . The loss function used is weighted Cross Entropy. Following [15], we apply Rawboost augmentations [55], specifically 'series: $(1 + 2 + 3)$ ' (Algo4) and 'series: $(1 + 2)$ ' (Algo5), for AASIST baselines. For the proposed Nes2Net-X, only the former augmentation is applied. All models are trained for 100 epochs and the best checkpoint on the validation set is used for testing on the ASVspoof 2021 [48] and In-the-Wild [50] datasets.
|
| 202 |
+
|
| 203 |
+
ASVspoof 5: Both our AASIST baseline and the proposed Nes2Net-X models are trained using settings similar to those used for AASIST in the ASVspoof 2019 corpus. However, several differences apply. The final learning rate is set to $1 \times 10^{-7}$ , we apply data augmentation using MUSAN [57] and RIR [58], and training is stopped if there is no improvement on the development set for 5 consecutive epochs.
|
| 204 |
+
|
| 205 |
+
**PartialProof:** For models trained on the PartialSpoof [51], we follow the baseline systems described in [51], $[59]^4$ . Specifically, we use wav2vec 2.0 as the front-end, the MSE for P2SGrad [60] as the loss function, and Adam [56] as the optimizer. Following [59], the batch size is set to 2, and a learning rate of $2.5 \times 10^{-6}$ is adopted for the baseline systems. For the proposed Nes2Net and Nes2Net-X, the learning rate is set to $1 \times 10^{-5}$ . The pooling layer used for the proposed Nes2Net and Nes2Net-X is the Attentive Statistics Pooling [42], and the reduction ratio of SE module is set to 8. Training is terminated if no improvement is observed on the development set for 20 consecutive epochs. The epoch yielding the best performance on the development set is used for testing.
|
| 206 |
+
|
| 207 |
+
# V. RESULTS AND ANALYSIS
|
| 208 |
+
|
| 209 |
+
All Equal Error Rate (EER) results in this work are reported as 'best (mean)' over multiple runs. For cited results that (1)
|
| 210 |
+
|
| 211 |
+
are based on a single run, (2) report only the best result, or (3) lack sufficient details, only a single value is presented.
|
| 212 |
+
|
| 213 |
+
# A. Studies on the CtrSVDD dataset
|
| 214 |
+
|
| 215 |
+
We conduct experiments on the CtrSVDD dataset [46], following two testing protocols: one including ACESinger bona fide samples and the other excluding them [38]. While results for both protocols are reported in Table III, our primary analysis focuses on the scenario 'without ACESinger bona fide (w/o ACE. B.F.)', as recommended by the dataset creators. Since AASIST $(\mathrm{C} = 32)$ in our prior work [16], as well as SLS and XWSB [39], were evaluated during the CtrSVDD Challenge 2024, portions of their test sets differ from the current official protocol. As a result, the EER by attack type is not directly comparable. To ensure a fair comparison, we re-implemented the AASIST $(\mathrm{C} = 32)$ system under the official protocol and used it as our baseline, referred to as AASIST Standard $(\mathrm{C} = 32)$ in Table III, achieving an EER of $2.79\%$ which is close to the originally reported $2.70\%$ [16]. Under the 'w/o ACE B.F.' condition, the best run achieves an EER of $3.30\%$ with an average of $3.36\%$ across three runs. Further experiments show that scaling up the AASIST model does not improve mean EER, possibly due to parameter redundancy.
|
| 216 |
+
|
| 217 |
+
We additionally evaluate several widely-used baseline systems, including ResNet [19], Res2Net [18], and ECAPATDNN [34]. ECAPA-TDNN and ResNet achieve EERs of $3.74\%$ and $3.61\%$ , respectively, which are slightly worse than that of AASIST. In contrast, Res2Net benefits from the advantages of multi-scale feature extraction, delivering the best average performance among the baseline systems with an EER of $3.24\%$ . Our proposed Nes2Net outperforms all baseline systems, achieving a mean EER of $2.55\%$ with the lowest computational cost. Furthermore, the enhanced version, Nes2Net-X, further improves the performance to $2.51\%$ EER, marking the best single-model performance reported to date. Compared to Res2Net, ResNet, ECAPA-TDNN, and SOTA AASIST ( $C = 32$ ), Nes2Net-X achieves EER reductions of $23\%$ , $30\%$ , $33\%$ , and $25\%$ , respectively.
|
| 218 |
+
|
| 219 |
+
TABLE IV PERFORMANCE IN EER $(\%)$ ON THE CTRSVDD EVALUATION SET [46], COMPARING THE PROPOSED NES2NET WITH RES2NET AND ITS VARIOUS VARIANTS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS, E.G., 3.02 (3.24) IN THE FIRST ROW, OR AS THE RESULT OF A SINGLE EXPERIMENT, E.G., 3.21 IN THE SECOND ROW. 'B' AND 'S' REPRESENT THE NUMBER OF BLOCKS AND SCALE OF RES2NET, RESPECTIVELY.
|
| 220 |
+
|
| 221 |
+
<table><tr><td rowspan="2">Back-end</td><td rowspan="2">Dimensionality Reduction Layer</td><td rowspan="2">Reduced Dimension D</td><td rowspan="2">Params.</td><td rowspan="2">MMACs</td><td colspan="2">Pooled EER</td><td rowspan="2">Remarks</td></tr><tr><td>w/o ACE. B.F.</td><td>w/ ACE. B.F.</td></tr><tr><td>Res2Net (b=4, s=4)</td><td>✓</td><td>128</td><td>452k</td><td>64.93</td><td>3.02 (3.24)</td><td>2.61 (2.78)</td><td></td></tr><tr><td>Res2Net (b=4, s=16)</td><td>✓</td><td>128</td><td>427k</td><td>59.95</td><td>3.21</td><td>2.80</td><td rowspan="3">■ increase scale s</td></tr><tr><td>Res2Net (b=4, s=64)</td><td>✓</td><td>128</td><td>419k</td><td>58.28</td><td>3.15</td><td>2.74</td></tr><tr><td>Res2Net (b=4, s=128)</td><td>✓</td><td>128</td><td>417k</td><td>57.98</td><td>3.26</td><td>2.88</td></tr><tr><td>Res2Net (b=4, s=4)</td><td>✓</td><td>64</td><td>180k</td><td>23.25</td><td>4.32</td><td>3.76</td><td rowspan="2">change D</td></tr><tr><td>Res2Net (b=4, s=4)</td><td>✓</td><td>256</td><td>1,273k</td><td>202.91</td><td>3.83</td><td>3.38</td></tr><tr><td>Res2Net-woDR (b=1, s=4)</td><td>×</td><td>-</td><td>861k</td><td>119.15</td><td>4.15</td><td>3.62</td><td></td></tr><tr><td>Res2Net-woDR (b=1, s=8)</td><td>×</td><td>-</td><td>615k</td><td>70.12</td><td>4.23</td><td>3.71</td><td></td></tr><tr><td>Res2Net-woDR (b=1, s=16)</td><td>×</td><td>-</td><td>456k</td><td>38.24</td><td>3.82</td><td>3.35</td><td rowspan="5">remove dimensionality reduction layer and increase scale s</td></tr><tr><td>Res2Net-woDR (b=1, s=32)</td><td>×</td><td>-</td><td>367k</td><td>20.45</td><td>2.98 (3.45)</td><td>2.56 (3.02)</td></tr><tr><td>Res2Net-woDR (b=1, s=64)</td><td>×</td><td>-</td><td>320k</td><td>11.10</td><td>2.73 (2.97)</td><td>2.42 (2.61)</td></tr><tr><td>Res2Net-woDR (b=1, s=128)</td><td>×</td><td>-</td><td>296k</td><td>6.31</td><td>3.29</td><td>2.88</td></tr><tr><td>Res2Net-woDR (b=1, s=256)</td><td>×</td><td>-</td><td>284k</td><td>3.88</td><td>3.57</td><td>3.13</td></tr><tr><td>Res2Net-woDR (b=2, s=64)</td><td>×</td><td>-</td><td>637k</td><td>21.78</td><td>3.20</td><td>2.82</td><td rowspan="2">increase depth</td></tr><tr><td>Res2Net-woDR (b=4, s=64)</td><td>×</td><td>-</td><td>1,270k</td><td>43.15</td><td>3.09 (3.18)</td><td>2.73 (2.83)</td></tr><tr><td>Proposed Nes2Net</td><td>×</td><td>-</td><td>511k</td><td>58.11</td><td>2.53 (2.55)</td><td>2.22 (2.27)</td><td rowspan="2">proposed nested design</td></tr><tr><td>Proposed Nes2Net-X</td><td>×</td><td>-</td><td>511k</td><td>91.35</td><td>2.48 (2.51)</td><td>2.20 (2.24)</td></tr></table>
|
| 222 |
+
|
| 223 |
+
We also analyze performance across different synthetic attack types using the 'w/o ACE B.F.' protocol. Except for the 'A12' attack type [46], our model consistently achieves either the best or second-best performance, demonstrating strong generalization and robustness. Notably, the 'A12' attack type, based on Singing Voice Synthesis (SVS), proves particularly challenging, showing higher EER across all models and highlighting a potential area for future improvement.
|
| 224 |
+
|
| 225 |
+
We observe that performance trends are consistent across both conditions, with and without ACESinger bona fide samples. Moreover, the EER is lower when ACESinger bona fide samples are included. This indicates that, even though ACESinger bona fide samples are considered out-of-domain, the trained models exhibit strong generalization capabilities and are able to classify these samples accurately.
|
| 226 |
+
|
| 227 |
+
# B. The Roadmap of the Nes2Net
|
| 228 |
+
|
| 229 |
+
In this section, we introduce the roadmap from Res2Net to the proposed Nes2Net, with detailed results summarized in Table IV. All systems are implemented and evaluated under a unified framework for fair comparison. To aid interpretation, we visualize the number of parameters, MACs, and EER. These are represented in Fig. 3 by circle size, the horizontal axis, and the vertical axis, respectively. In the following, we provide detailed analyses:
|
| 230 |
+
|
| 231 |
+
Investigating Res2Net: Among the baselines in Table III, the Res2Net-based back-end outperforms ResNet, AASIST, and ECAPA-TDNN on the CtrlSVDD dataset. Therefore, we select it as the reference baseline for further investigation. First, we experiment with adjusting the scale $s$ of Res2Net. We observe that as $s$ increases, the number of split groups increases linearly; however, the performance shows no significant improvement (depicted as the teal blue line in Fig. 3). This may be because adding too many split groups dilutes the feature representation, leading to redundancy.
|
| 232 |
+
|
| 233 |
+
Next, we explore varying the dimensionality of the output features from the DR layer (referred to as Reduced Dimension
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
Fig. 3. Visualization of Table III and IV, highlighting our exploration of Res2Net and the roadmap of architectural changes leading to Nes2Net.
|
| 237 |
+
|
| 238 |
+
$D$ , depicted as the steel gray line in Fig. 3). Reducing $D$ to 64 significantly lowers model size and MACs, compared to the default $D = 128$ , but leads to substantial performance degradation, increasing EER from $3.02\%$ to $4.32\%$ . Conversely, increasing $D$ to 256 results in a much larger model size and MACs but still leads to worse performance than $D = 128$ . This may be because a larger $D$ introduces over-parameterization and noise. This may explain why $D = 128$ is commonly adopted in SOTA models [15], [16].
|
| 239 |
+
|
| 240 |
+
Removal of DR Layer: Foundation models often incorpo
|
| 241 |
+
|
| 242 |
+
rate a DR layer in their back-end architecture to compress high-dimensional features into lower-dimensional representations, facilitating downstream tasks. For instance, models like wav2vec 2.0-AASIST [15] utilize such a layer alongside task-specific classifiers (e.g., AASIST, ResNet). However, as discussed in Section II-E, this projection layer consumes a substantial portion of the back-end model's parameters and MACs while potentially causing information loss.
|
| 243 |
+
|
| 244 |
+
To explore whether bypassing this layer preserves more task-relevant information, we propose a new back-end model: ResNet without Dimensionality Reduction (ResNet-woDR). By directly processing high-dimensional features, ResNet-woDR simplifies the architecture and focuses on the raw features extracted by the speech foundation model. The naming emphasizes the absence of a DR layer, differentiating it from traditional approaches.
|
| 245 |
+
|
| 246 |
+
We further evaluate the performance of ResNet-woDR with different scales $s$ (depicted as the green line in Fig. 3). The best performance is observed with $s = 64$ , achieving a mean EER of $2.97\%$ , which surpasses the best Res2Net baseline. Increasing $s$ beyond this point leads to a decline in performance, likely due to the following factors:
|
| 247 |
+
|
| 248 |
+
- Feature Dilution. A large $s$ excessively fragments feature representations, weakening their expressiveness and resulting in diluted, less informative features [18].
|
| 249 |
+
- Redundant Transformations. An overly large $s$ introduces unnecessary feature transformations, leading to overfitting and reduced generalization [43].
|
| 250 |
+
- Restricted Feature Interaction. Since channels are unordered, distant groups may still contain correlated information. In this case, the additional convolutional layers introduced by splitting limit their interactions, weakening the model's ability to capture complex patterns.
|
| 251 |
+
|
| 252 |
+
Based on the optimal $s$ , we increase the number of blocks $b$ to deepen the model (depicted as the light pink line in Fig. 3). However, no further performance improvement is observed. This could be attributed to the deeper architecture's limited ability to effectively utilize the additional parameters, resulting in diminishing performance gains. It may also increase the risk of overfitting.
|
| 253 |
+
|
| 254 |
+
The Novel Nested Design: Prior experiments demonstrate that removing the DR layer enhances the performance of Res2Net. We believe that directly extracting information from high-dimensional speech foundation model features avoids the information loss introduced by DR. Our experiments with variations in scale, depth, and dimensionality show that a mean EER of $2.97\%$ marks a performance bottleneck for this design.
|
| 255 |
+
|
| 256 |
+
Compared to ResNet-woDR, the proposed Nes2Net adopts a novel nested design that enhances flexibility and significantly boosts the model's representational capacity. Processing larger feature subsets in the outer layer facilitates better interactions across channels within each nested layer. Furthermore, the integrated local cross-channel attention mechanism enhances feature selection while mitigating redundancy, addressing limitations in prior designs. This architectural refinement overcomes the performance limitations observed in the original Res2Net design. As a result, Nes2Net and its enhanced variant
|
| 257 |
+
|
| 258 |
+
Nes2Net-X surpass the earlier performance bottleneck, achieving mean EERs of $2.55\%$ and $2.51\%$ , respectively.
|
| 259 |
+
|
| 260 |
+
# C. Studies on the ASVspoof 2021 dataset
|
| 261 |
+
|
| 262 |
+
TABLE V PERFORMANCE IN EER $(\%)$ ON THE ASVspoof 2021 LA AND DF. THE RESULTS ARE REPORTED IN THE FORMAT OF 'BEST (MEAN). CKPT AVG. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED. $\ddagger$ DENOTES RE-IMPLEMENTATION CONDUCTED BY US. 'ALGO4' AND 'ALGO5' REPRESENT RAWBOOST SERIES AUGMENTATIONS: $(1 + 2 + 3)$ AND $(1 + 2)$ [55], RESPECTIVELY. PARAMETERS THAT ARE UNDERlined ARE CALCULATED BY US. $-$ REPRESENTS UNKNOWN. N/A INDICATES THAT THE SYSTEM DOES NOT USE THE AVERAGE CHECKPOINTS METHOD.
|
| 263 |
+
|
| 264 |
+
<table><tr><td rowspan="2">Remark</td><td rowspan="2">Front-end</td><td rowspan="2">Back-end Model</td><td rowspan="2">Back-end Parameters</td><td rowspan="2">CKPT Avg.</td><td colspan="2">ASVspoof 2021</td></tr><tr><td>LA</td><td>DF</td></tr><tr><td>2022</td><td>wav2vec</td><td>2.0 FIR-NB [61]</td><td>-</td><td>-</td><td>3.54</td><td>6.18</td></tr><tr><td>2022</td><td>wav2vec</td><td>2.0 FIR-WB [61]</td><td>-</td><td>-</td><td>7.08</td><td>4.98</td></tr><tr><td>2022</td><td>wav2vec</td><td>2.0 LGF [62]</td><td>-</td><td>-</td><td>9.66</td><td>4.75</td></tr><tr><td>2023</td><td>wav2vec</td><td>2.0 Conformer (fix) [63]</td><td>2,506k5</td><td>5</td><td>1.38</td><td>2.27</td></tr><tr><td>2023</td><td>wav2vec</td><td>2.0 Conformer (var) [63]</td><td>2,506k</td><td>5</td><td>0.87</td><td>7.36</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 Ensembling [64] ‡</td><td>-</td><td>-</td><td>2.32 (4.48)</td><td>5.60 (8.74)</td></tr><tr><td>2024</td><td>WavLM</td><td>ASP+MLP [65]</td><td>1,051k</td><td>-</td><td>3.31</td><td>4.47</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 SLIM [14]</td><td>-</td><td>-</td><td>-</td><td>(4.4)</td></tr><tr><td>2024</td><td>WavLM</td><td>AttM-LSTM [31]</td><td>936k6</td><td>N/A</td><td>3.50</td><td>3.19</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 FTDKD [66]</td><td>-</td><td>-</td><td>2.96</td><td>2.82</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 AASIST2 [67]</td><td>-</td><td>-</td><td>1.61</td><td>2.77</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 MFA [68]</td><td>-</td><td>-</td><td>5.08</td><td>2.56</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 MoE [69]</td><td>-</td><td>-</td><td>2.96</td><td>2.54</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 OCKD [70]</td><td>-</td><td>-</td><td>0.90</td><td>2.27</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 TCM [33]</td><td>2,383k7</td><td>5</td><td>1.03</td><td>2.06</td></tr><tr><td>2024</td><td>wav2vec</td><td>2.0 SLS [35]</td><td>23,399k8</td><td>-</td><td>2.87 (3.88)</td><td>1.92 (2.09)</td></tr><tr><td>2025</td><td>wav2vec</td><td>2.0 LSR+LSA [71]</td><td>-</td><td>-</td><td>1.19</td><td>2.43</td></tr><tr><td>2025</td><td>wav2vec</td><td>2.0 LSR+LSA [71] ※</td><td>-</td><td>-</td><td>1.05</td><td>1.86</td></tr><tr><td>2025</td><td>wav2vec</td><td>2.0 WaveSpec [72]</td><td>-</td><td>-</td><td>-</td><td>1.90</td></tr><tr><td>2025</td><td>wav2vec</td><td>2.0 Mamba [17]</td><td>1,937k9</td><td>5</td><td>0.93</td><td>1.88</td></tr><tr><td>2025</td><td>wav2vec</td><td>2.0 SSL-EOW-S. [73] ‡</td><td>-</td><td>-</td><td>-</td><td>1.75 (2.91)</td></tr><tr><td>2025</td><td>wav2vec</td><td>2.0 Cal. Ensemble [73] ‡</td><td>-</td><td>-</td><td>-</td><td>(2.03)</td></tr><tr><td>2022</td><td>wav2vec</td><td>2.0 AASIST [15]</td><td>447k10</td><td>N/A</td><td>0.82 (1.00)</td><td>2.85 (3.69)</td></tr><tr><td>†</td><td>wav2vec</td><td>2.0 AASIST (algo4)</td><td>447k</td><td>N/A</td><td>1.13 (1.36)</td><td>3.37 (4.09)</td></tr><tr><td>†</td><td>wav2vec</td><td>2.0 AASIST (algo5)</td><td>447k</td><td>N/A</td><td>0.93 (1.40)</td><td>3.56 (5.07)</td></tr><tr><td rowspan="4">Ours</td><td>wav2vec</td><td>2.0 Nes2Net</td><td>511k</td><td>N/A</td><td>1.61 (1.90)</td><td>1.89 (2.12)</td></tr><tr><td>wav2vec</td><td>2.0 Nes2Net-X</td><td>511k</td><td>N/A</td><td>1.73 (1.95)</td><td>1.65 (1.91)</td></tr><tr><td>wav2vec</td><td>2.0 Nes2Net-X</td><td>511k</td><td>3</td><td>1.66 (1.87)</td><td>1.54 (1.98)</td></tr><tr><td>wav2vec</td><td>2.0 Nes2Net-X</td><td>511k</td><td>5</td><td>1.88 (2.00)</td><td>1.49 (1.78)</td></tr></table>
|
| 265 |
+
|
| 266 |
+
\*: with extra data augmentation [71] $\ddagger$ : ensemble of multiple models
|
| 267 |
+
|
| 268 |
+
The ASVspoof series datasets are widely used as benchmarks for advancing research in detecting spoofed speech [47], [48]. Following the standard protocol, we train models on ASVspoof 2019 [47] and evaluate them on ASVspoof 2021 Logical Access (LA) and Deepfake (DF) tasks [48]. The LA task focuses on detecting synthetic and voice-converted speech transmitted over telephony systems, introducing challenges related to channel effects and transmission variability. In contrast, the DF task targets detecting manipulated, compressed speech data commonly found on online platforms. This reflects real-world scenarios where deepfake audio circulates, making the DF task a valuable benchmark for evaluating deepfake detection systems.
|
| 269 |
+
|
| 270 |
+
The results in Table V show that for the LA track, our Nes2Net achieves a mean EER of $1.90\%$ , comparable to SOTA systems. For the DF track, which more closely reflects
|
| 271 |
+
|
| 272 |
+
$^{5}$ https://github.com/ErosRos/conformer-based-classifier-for-anti-spoofing
|
| 273 |
+
$^{6}$ https://github.com/pandartialdTJU/AttM_INTERSPEECH24
|
| 274 |
+
<sup>7</sup>https://github.com/ductuantruong/tcm_add
|
| 275 |
+
$^{8}$ https://github.com/QiShanZhang/SLSforASVspoof-2021-DF
|
| 276 |
+
<sup>9</sup>https://github.com/swagshaw/XLSR-Mamba
|
| 277 |
+
$^{10}$ https://github.com/TakHemlata/SSL_Anti-spoofing
|
| 278 |
+
|
| 279 |
+
TABLE VI PERFORMANCE IN EER $(\%)$ FOR DIFFERENT TYPES OF VOCODERS AND COMPRESSION CONDITIONS ON THE ASVSPOOF 2021 DF TEST SET. THE FIVE EER VALUES FOR EACH SUB-ITEM, FROM LEFT TO RIGHT, CORRESPOND TO NES2NET-X, MAMBA [17], SLS [35], TCM [33], AND AASIST [15].THE BEST PERFORMANCE IS REPORTED IN BOLD FONTS, AND THE SECOND-BEST IS UNDERLINED.
|
| 280 |
+
|
| 281 |
+
<table><tr><td></td><td>Traditional Vocoder</td><td>Wav Concatenation</td><td>Neural Autoreg.</td><td>Neural Non-autoreg.</td><td>Unknown</td><td>Pooled EER</td></tr><tr><td>C1 -</td><td>0.36/0.78/1.21/0.95/1.22</td><td>0.76/0.76/0.80/0.76/2.28</td><td>2.70/3.88/3.12/3.89/3.45</td><td>0.52/0.87/0.68/0.95/1.56</td><td>1.64/1.63/1.23/1.73/1.99</td><td>1.47/1.89/1.72/2.23/2.34</td></tr><tr><td>C2 Low mp3</td><td>1.48/0.94/1.94/1.67/2.72</td><td>2.96/2.20/2.16/2.56/5.84</td><td>2.89/3.23/2.71/3.59/5.96</td><td>1.23/0.86/0.78/1.32/3.33</td><td>2.54/1.69/1.65/1.93/4.30</td><td>1.75/1.84/2.02/2.11/4.30</td></tr><tr><td>C3 High mp3</td><td>0.44/0.88/1.39/0.96/1.83</td><td>1.13/1.49/1.17/1.45/3.35</td><td>2.47/3.35/2.91/3.70/3.79</td><td>0.44/0.87/0.69/0.88/2.02</td><td>2.29/1.85/1.34/1.67/2.65</td><td>1.32/1.85/1.59/1.95/2.64</td></tr><tr><td>C4 Low m4a</td><td>0.44/0.95/1.48/1.22/1.57</td><td>1.15/0.85/1.24/1.67/2.09</td><td>2.79/3.39/2.79/3.40/3.75</td><td>0.54/0.96/0.70/1.22/1.65</td><td>1.32/1.22/1.14/1.41/2.10</td><td>1.40/1.92/1.74/2.01/2.37</td></tr><tr><td>C5 High m4a</td><td>0.45/0.80/1.34/0.98/1.16</td><td>0.62/0.76/0.71/0.76/2.10</td><td>2.77/3.48/2.96/3.73/3.39</td><td>0.56/0.90/0.64/1.07/1.34</td><td>1.88/1.70/1.34/1.43/1.87</td><td>1.59/2.05/1.79/1.96/2.14</td></tr><tr><td>C6 Low ogg</td><td>0.69/1.13/2.14/1.44/2.35</td><td>0.80/0.97/0.91/0.91/2.23</td><td>1.92/2.80/2.44/2.79/3.67</td><td>0.48/0.78/0.61/0.84/1.62</td><td>1.05/1.14/1.00/1.01/2.23</td><td>1.09/1.61/1.88/1.87/2.58</td></tr><tr><td>C7 High ogg</td><td>0.70/1.13/1.52/1.35/1.57</td><td>0.62/0.80/0.71/0.80/1.50</td><td>2.05/2.84/2.26/2.66/2.92</td><td>0.43/0.65/0.52/0.74/1.00</td><td>1.34/1.05/0.96/0.96/1.27</td><td>1.35/1.61/1.57/1.74/1.92</td></tr><tr><td>C8 mp3→m4a</td><td>0.95/1.26/2.28/1.74/3.01</td><td>1.52/0.97/1.08/1.08/2.96</td><td>2.22/3.01/2.31/2.96/4.49</td><td>0.61/0.57/0.65/0.95/2.05</td><td>1.61/1.18/1.09/1.18/2.66</td><td>1.48/1.65/1.92/1.97/3.31</td></tr><tr><td>C9 ogg→m4a</td><td>0.70/1.26/2.15/1.49/2.28</td><td>0.88/0.97/0.99/0.88/2.52</td><td>1.92/3.01/2.57/2.88/3.76</td><td>0.52/0.70/0.65/0.78/1.57</td><td>0.96/1.09/1.09/1.05/2.14</td><td>1.13/1.79/2.04/1.88/2.75</td></tr><tr><td>Pooled EER</td><td>0.72/1.14/1.88/1.40/2.15</td><td>1.10/1.05/1.07/1.14/2.85</td><td>2.70/3.32/2.86/3.40/4.05</td><td>0.63/0.80/0.69/0.94/1.84</td><td>1.86/1.43/1.23/1.38/2.45</td><td>1.49/1.88/1.92/2.06/2.85</td></tr><tr><td></td><td>Traditional Vocoder</td><td>Wav Concatenation</td><td>Neural Autoregression</td><td>Neural Non-autoregression</td><td>Unknown</td><td>Pooled EER</td></tr><tr><td>C1</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>C2</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>C3</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>C4</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>C5</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>C6</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>C7</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>C8</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>C9</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Pooled EER</td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table>
|
| 282 |
+
|
| 283 |
+
Fig. 4. Visualization of the EER $(\%)$ across various vocoders and compression conditions on the ASVspoof 2021 DF test set. Each EER value is shown as a colored circle, where the size indicates the EER value, and the color represents the performance ranking among the five models: blue (best) to light red (worst). The five EER values for each sub-item, from left to right, correspond to the proposed Nes2Net-X, Mamba [17], SLS [35], TCM [33], and AASIST [15].
|
| 284 |
+
|
| 285 |
+
real-world scenarios as discussed earlier, the baseline system AASIST [15] achieves its best EER of $2.85\%$ and a mean EER of $3.69\%$ , remaining competitive with current SOTA systems. The SLS [35] and TCM [33] models achieve EERs close to $2\%$ , demonstrating strong performance at the SOTA level. The Mamba-based [17] model further improves results, reducing the EER to $1.88\%$ . Notably, our proposed Nes2Net attains its best EER of $1.89\%$ and a mean EER of $2.12\%$ EER, comparable to the performance of current SOTA systems. The enhanced variant, Nes2Net-X achieves the best performance among all compared systems, with its best EER of $1.65\%$ and a mean EER of $1.91\%$ .
|
| 286 |
+
|
| 287 |
+
Inspired by prior works [17], [33], we average the weights of several top-performing checkpoints on the validation set to obtain an improved model. This approach further improves the performance of the DF task to a best EER of $1.49\%$ and a mean EER of $1.78\%$ , which, to the best of our knowledge, is the best performance reported to date. Furthermore, compared to Mamba [17], our model achieves this performance with approximately $74\%$ fewer parameters, demonstrating superior efficiency.
|
| 288 |
+
|
| 289 |
+
The analysis above summarizes overall performance on the
|
| 290 |
+
|
| 291 |
+
DF test set. The DF dataset also provides detailed labels for vocoder types and compression conditions, enabling more fine-grained analysis. To further evaluate performance, we compare the SOTA models Mamba, SLS, TCM, and AASIST with our proposed Nes2Net-X across these sub-tracks. The results are presented in Table VI. To improve readability and make the extensive numerical data easier to interpret, we also visualize the table's results in Fig. 4.
|
| 292 |
+
|
| 293 |
+
For traditional vocoders, all models perform well, with most EERs below $2\%$ . Notably, our proposed Nes2Net-X achieves exceptional results, consistently yielding EERs under $1\%$ across all conditions except C2. This demonstrates the strong stability of Nes2Net-X when handling unseen and relatively simple scenarios. In contrast, for neural autoregressive vocoders, all models experience a noticeable drop in performance, with EER reaching up to $5.96\%$ . This indicates the greater challenge posed by the sequential and dynamic nature of autoregressive vocoders, which introduce higher variability in synthesis. Nevertheless, Nes2Net-X maintains a clear advantage over the competing models, demonstrating its robustness in handling these complex synthesis conditions.
|
| 294 |
+
|
| 295 |
+
From the perspective of compression conditions, the differ
|
| 296 |
+
|
| 297 |
+
ences in model performance are less pronounced compared to the variations observed across vocoder types. Nes2Net-X consistently achieves the lowest EERs across all compression conditions, regardless of the level of distortion introduced by compression. This consistency highlights the model's strong generalization ability across different levels of compressions.
|
| 298 |
+
|
| 299 |
+
Overall, these findings demonstrate that Nes2Net-X is not only highly effective across diverse vocoder types, but also maintains superior performance under varying compression conditions. This robustness underscores the model's capability to handle both compression diversity and complex synthesis challenges, making it a reliable solution for deepfake audio detection across a wide range of scenarios.
|
| 300 |
+
|
| 301 |
+
# D. The results on the In-the-Wild dataset
|
| 302 |
+
|
| 303 |
+
TABLE VII PERFORMANCE IN EER $(\%)$ ON THE IN-THE-WILD [50] DATASET. OUR RESULT IS REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS.
|
| 304 |
+
|
| 305 |
+
<table><tr><td>Front-end</td><td>Year</td><td>Back-end</td><td>EER</td></tr><tr><td rowspan="12">wav2vec 2.0</td><td>2022</td><td>Rawnet&ASSIST (reported by [35])</td><td>10.46</td></tr><tr><td>2024</td><td>SLIM [14]</td><td>- (12.5)</td></tr><tr><td>2024</td><td>MoE [69]</td><td>9.17</td></tr><tr><td>2024</td><td>Conformer [63]</td><td>8.42</td></tr><tr><td>2024</td><td>TCM [33]</td><td>7.79</td></tr><tr><td>2024</td><td>OCKD [70]</td><td>7.68</td></tr><tr><td>2024</td><td>SLS [35]</td><td>7.46 (8.87)</td></tr><tr><td>2024</td><td>Pascu et al. [74]</td><td>- (7.2)</td></tr><tr><td>2025</td><td>Mamba [17]</td><td>6.71</td></tr><tr><td>2025</td><td>WaveSpec [72]</td><td>6.58</td></tr><tr><td>2025</td><td>LSR+LSA [71]</td><td>5.92</td></tr><tr><td>2025</td><td>LSR+LSA [71]※</td><td>5.54</td></tr><tr><td></td><td>-</td><td>Proposed Nes2Net</td><td>5.80 (7.06)</td></tr><tr><td></td><td>-</td><td>Proposed Nes2Net-X</td><td>5.52 (6.60)</td></tr></table>
|
| 306 |
+
|
| 307 |
+
$\text{※}$ with extra data augmentation [71]
|
| 308 |
+
|
| 309 |
+
The In-the-Wild dataset [50] is a collection of deepfake videos sourced from the internet. Unlike controlled datasets, it captures the diverse and unpredictable nature of real-world scenarios. This diversity is essential for developing and evaluating deepfake detection models, as it challenges them to generalize effectively across a wide range of conditions.
|
| 310 |
+
|
| 311 |
+
In addition, unlike many other datasets that rely on self-generated fake audio, this dataset is collected from publicly available video and audio files explicitly labeled as audio deepfakes [50]. To account for the potential presence of partial spoofing, we evaluate our proposed Nes2Net and Nes2Net-X using the entire duration of each test sample instead of restricting it to the first 4 seconds, as the latter approach risks missing partially spoofed segments.
|
| 312 |
+
|
| 313 |
+
The testing results, alongside SOTA models, are reported in Table VII. We find that the overall performance trends are consistent with those seen on the ASVspoof 2021 DF dataset. However, EERs on the In-the-Wild dataset are generally higher than those on the DF dataset, reflecting greater complexity and variability in real-world scenarios. Notably, the proposed Nes2Net-X outperforms all SOTA models, achieving the lowest EER of $5.52\%$ and a mean EER of $6.60\%$ on this challenging dataset.
|
| 314 |
+
|
| 315 |
+
# E. The results on the ASVspoof 5 dataset
|
| 316 |
+
|
| 317 |
+
The ASVspoof 5 dataset represents the most recent edition in the ASVspoof series. Unlike earlier versions, it introduces
|
| 318 |
+
|
| 319 |
+
TABLE VIII A COMPARISON BETWEEN THE PROPOSED NES2NET AND THE AASIST BASELINE SYSTEM ON THE ASVSPOOF 5 DATASET [49]. 'PARAMS.' AND 'MMACs' REFER TO THE NUMBER OF PARAMETERS AND THE NUMBER OF MILLION MULTIPLY-ACCUMULATE OPERATIONS, RESPECTIVELY. 'AVG.' INDICATES THE AVERAGE RELATIVE PERFORMANCE IMPROVEMENT ACROSS ALL THREE EVALUATION METRICS.
|
| 320 |
+
|
| 321 |
+
<table><tr><td colspan="3">Back-end</td><td colspan="4">Performance</td></tr><tr><td>Model</td><td>Params.↓</td><td>MMACs↓</td><td>CLLR↓</td><td>minDCF↓</td><td>EER↓</td><td>Avg.</td></tr><tr><td>AASIST</td><td>447k</td><td>707.65</td><td>0.9587</td><td>0.1645</td><td>6.08</td><td>Benchmark</td></tr><tr><td>Nes2Net</td><td>511k</td><td>58.11</td><td>0.7912</td><td>0.1568</td><td>6.13</td><td>7.1%</td></tr><tr><td>Nes2Net-X</td><td>511k</td><td>91.35</td><td>0.7344</td><td>0.1535</td><td>5.92</td><td>10.9%</td></tr></table>
|
| 322 |
+
|
| 323 |
+
adversarial attacks and is crowdsourced under various acoustic conditions [49]. As it is newly released, there are currently no existing systems available for a fair comparison. Therefore, we re-implement the AASIST system as a baseline and compare it with our proposed Nes2Net and Nes2Net-X model. Following the ASVspoof 5 challenge guidelines [49], we use WavLM [3] as the front-end. Based the evaluation protocol in [37], we assess performance using three metrics: Cost of Log-Likelihood Ratio (CLLR), minimum Detection Cost Function (minDCF), and EER, and present the results in Table VIII. We observe that the Nes2Net and Nes2Net-X backend models result in only a slight increase in the number of parameters compared to AASIST, while significantly reducing MMMs. Moreover, across all three evaluation metrics, the Nes2Net and Nes2Net-X back-ends improve performance by $7.1\%$ and $10.9\%$ , receptively.
|
| 324 |
+
|
| 325 |
+
# F. The results on the PartialSpoof dataset
|
| 326 |
+
|
| 327 |
+
TABLE IX PERFORMANCE IN EER $(\%)$ ON THE PARTIALSPOOF [51] DATASET. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. $\dagger$ INDICATES RESULTS OBTAINED FROM OUR IMPLEMENTATION.
|
| 328 |
+
|
| 329 |
+
<table><tr><td rowspan="2">Front-end</td><td rowspan="2">Year</td><td rowspan="2">Back-end</td><td colspan="2">PartialSpoof [51]</td></tr><tr><td>Dev</td><td>Eval</td></tr><tr><td rowspan="7">wav2vec 2.0</td><td>2024</td><td>gMLP [51]</td><td>0.35</td><td>0.64</td></tr><tr><td>-</td><td>gMLP†</td><td>0.39 (0.43)</td><td>0.72 (0.80)</td></tr><tr><td>2024</td><td>1D Res2Net [59]</td><td>0.35</td><td>0.73</td></tr><tr><td>-</td><td>1D Res2Net†</td><td>0.35 (0.38)</td><td>0.73 (0.79)</td></tr><tr><td>-</td><td>SE ResNet†</td><td>0.31 (0.50)</td><td>0.77 (0.78)</td></tr><tr><td>-</td><td>Nes2Net</td><td>0.24 (0.36)</td><td>0.53 (0.68)</td></tr><tr><td>-</td><td>Nes2Net-X</td><td>0.20 (0.33)</td><td>0.57 (0.64)</td></tr></table>
|
| 330 |
+
|
| 331 |
+
Partially manipulating a sentence can significantly alter its intended meaning [59]. When such manipulations occur in small regions, existing models trained on fully spoofed speech and relying on pooling functions struggle to detect these subtle changes. Consequently, there is growing interest in the detection of partially spoofed speech [51], [59], [75].
|
| 332 |
+
|
| 333 |
+
To evaluate the performance of our proposed model across different spoofing tasks, we conduct experiments on the PartialSpoof dataset [51]. The results are presented in Table IX. First, we reproduce the performance of two SOTA models, achieving results comparable to those reported in their original papers [51], [59]. Additionally, we evaluate SE ResNet, which demonstrated performance similar to the other baselines. In
|
| 334 |
+
|
| 335 |
+
TABLE X THE PERFORMANCE IN EER $(\%)$ ON THE ASVspoof 2021 LA, DF [48], AND IN-THE-WILD [50] DATASETS. THE RESULTS ARE REPORTED AS THE FORMAT OF 'BEST (MEAN)' ACROSS 3 RUNS. W/ AUG.' AND W/O AUG.' INDICATE WHETHER EVALUATION WITH AUGMENTATIONS ON THE VALIDATION SET IS USED TO SELECT THE BEST CHECKPOINT FOR TESTING. CKPT Avg. REFERS TO THE NUMBER OF CHECKPOINTS AVERAGED.
|
| 336 |
+
|
| 337 |
+
<table><tr><td rowspan="2">Back-end</td><td rowspan="2">Train Set</td><td rowspan="2">CKPT Avg.</td><td colspan="3">w/ Aug.</td><td colspan="3">w/o Aug.</td></tr><tr><td>21LA [48]</td><td>21DF [48]</td><td>In-the-Wild [50]</td><td>21LA [48]</td><td>21DF [48]</td><td>In-the-Wild [50]</td></tr><tr><td rowspan="3">Nes2Net-X</td><td rowspan="3">ASVspoof 19 [47]</td><td>N/A</td><td>1.63 (1.79)</td><td>1.84 (2.03)</td><td>5.56 (6.61)</td><td>1.73 (1.95)</td><td>1.65 (1.91)</td><td>5.73 (6.83)</td></tr><tr><td>3</td><td>1.70 (1.80)</td><td>1.88 (1.98)</td><td>5.15 (6.31)</td><td>1.66 (1.87)</td><td>1.54 (1.98)</td><td>5.59 (6.90)</td></tr><tr><td>5</td><td>1.67 (1.78)</td><td>1.80 (1.91)</td><td>5.28 (6.31)</td><td>1.88 (2.00)</td><td>1.49 (1.78)</td><td>5.52 (6.60)</td></tr></table>
|
| 338 |
+
|
| 339 |
+
contrast, our proposed Nes2Net and Nes2Net-X outperform all three baselines.
|
| 340 |
+
|
| 341 |
+
# G. Empirical Runtime and Memory Analysis
|
| 342 |
+
|
| 343 |
+
Number of parameters and MMACs are widely adopted metrics for evaluating model efficiency. These platform-independent measures offer consistent and fair comparisons across different hardware. However, to better reflect the real-world deployment costs of back-end architectures, we additionally benchmark their training time, inference time, and peak GPU memory usage, as summarized in Table XI.
|
| 344 |
+
|
| 345 |
+
TABLE XI TRAINING AND INFERENCE EFFICIENCY COMPARISON ACROSS BACK-END MODELS. THE TABLE REPORTS THE AVERAGE (AVG.) TRAINING AND INFERENCE TIME PER BATCH IN MILLSECONDS (MS/BATCH), AS WELL AS PEAK GPU MEMORY USAGE IN MEGABYTES (MB).
|
| 346 |
+
|
| 347 |
+
<table><tr><td rowspan="2">Back-end</td><td colspan="2">Avg. Time (ms/batch)↓</td><td rowspan="2">Peak GPU Memory↓ (MB)</td></tr><tr><td>Training</td><td>Inference</td></tr><tr><td>AASIST Light (C=24)</td><td>27.0</td><td>7.8</td><td>1,327</td></tr><tr><td>AASIST Standard(C=32)</td><td>53.8</td><td>18.7</td><td>3,454</td></tr><tr><td>AASIST Large(C=40)</td><td>79.2</td><td>28.1</td><td>4,273</td></tr><tr><td>AASIST XL(C=48)</td><td>86.1</td><td>30.7</td><td>5,087</td></tr><tr><td>AASIST XXL(C=56)</td><td>100.9</td><td>37.4</td><td>5,905</td></tr><tr><td>ResNet</td><td>7.8</td><td>2.6</td><td>691</td></tr><tr><td>Res2Net</td><td>15.6</td><td>3.5</td><td>721</td></tr><tr><td>ECAPA-TDNN (C=128)</td><td>9.4</td><td>3.1</td><td>698</td></tr><tr><td>Proposed Nes2Net</td><td>20.2</td><td>4.9</td><td>1,312</td></tr><tr><td>Proposed Nes2Net-X</td><td>29.1</td><td>9.2</td><td>2,231</td></tr></table>
|
| 348 |
+
|
| 349 |
+
All back-end models are evaluated under identical conditions: input features of 400 frames with 1024 dimensions, a batch size of 64, and execution on a dedicated NVIDIA H20 GPU. The first 10 batches are used for warm-up and excluded from the measurement, and the inference and training times are averaged over the subsequent 200 batches. Training time includes the forward, backward, and optimizer update steps.
|
| 350 |
+
|
| 351 |
+
The results show that AASIST models exhibit rapidly increasing runtime and memory consumption as the channel dimension $C$ grows. In contrast, our proposed Nes2Net achieves notably lower latency and memory usage. Nes2Net-X further improves performance in some settings by preserving more high-dimensional information, albeit at the cost of higher resource consumption.
|
| 352 |
+
|
| 353 |
+
Conventional models such as ResNet, Res2Net, and ECAPA-TDNN offer faster runtime and smaller memory footprints than our proposed method, but fall short in detection accuracy as shown in earlier experiments. Therefore, when selecting a back-end architecture, we believe both Nes2Net
|
| 354 |
+
|
| 355 |
+
and Nes2Net-X offer flexible options: the former prioritizes efficiency, while the latter favors accuracy when computational resources permit. This underscores the importance of balancing performance and efficiency in real-world applications.
|
| 356 |
+
|
| 357 |
+
# H. Should We Use Augmentation During Validation?
|
| 358 |
+
|
| 359 |
+
In all previous experiments, the datasets are split into three non-overlapping subsets: training, validation (or development), and test sets. The validation set is used to select the best-performing checkpoints for final evaluation on the test set. The training set typically applies data augmentation to enhance model performance and generalization. However, the use of augmentation during validation remains inconsistent across prior studies. For instance, wav2vec 2.0-AASIST [15] applies the same augmentation strategy to both training and validation sets. In contrast, WavLM-AASIST [16] does not use augmentation on the validation set, aligning with common practices in speaker verification research [34], [76], [77].
|
| 360 |
+
|
| 361 |
+
In this section, we compare these two approaches and report the results in Table X. We observe that applying the same augmentation to the validation set as in the training set leads to worse performance on ASVspoof 2021 DF [48], but better results on In-the-Wild [50]. When no augmentation is applied to the validation set, the opposite trend is observed.
|
| 362 |
+
|
| 363 |
+
From the outcome of the above study, we believe that in cases where robustness to certain variations (e.g., noise, compression, or distortions) is important, applying augmentation during validation provides insights into how well the model handles such conditions. As a result, the selected checkpoints from this approach may generalize better to these variations. Further investigation into this topic may yield deeper insights for future work.
|
| 364 |
+
|
| 365 |
+
# VI. CONCLUSION
|
| 366 |
+
|
| 367 |
+
In this work, we propose Nested Res2Net (Nes2Net) and its enhanced variant, Nes2Net-X, as lightweight and dimensionality reduction (DR) layer-free back-end architectures designed for speech anti-spoofing in the era of foundation models. Unlike conventional approaches that rely on a DR layer to bridge the mismatch between high-dimensional features and downstream classifiers, our proposed architectures directly process these rich representations. This not only eliminates the computational and parameter overhead introduced by DR layers but also avoids information loss, enhancing overall system efficiency and robustness.
|
| 368 |
+
|
| 369 |
+
Nes2Net incorporates a novel nested multi-scale design that enables more effective feature extraction and deeper cross-channel interactions without increasing model complexity.
|
| 370 |
+
|
| 371 |
+
The improved Nes2Net-X further strengthens representation learning by introducing learnable weighted feature fusion, offering adaptive control over the feature aggregation process.
|
| 372 |
+
|
| 373 |
+
We conduct extensive evaluations across five representative datasets: CtrSVDD, ASVspoof 2021, ASVspoof 5, Partial-Spoof, and In-the-Wild, covering a wide range of singing voice deepfakes, fully spoofed speech, adversarial attacks, real-world deepfakes, and partially spoofed speech. Across all scenarios, our models achieve SOTA performance, demonstrating superior generalization, compactness, and resilience under unseen and challenging conditions.
|
| 374 |
+
|
| 375 |
+
In summary, Nes2Net and Nes2Net-X offer a general-purpose, resource-efficient back-end for foundation model-based speech anti-spoofing, providing a practical yet powerful alternative to DR-dependent designs. To facilitate future research and applications, we make all source code and pretrained models publicly available.
|
| 376 |
+
|
| 377 |
+
# REFERENCES
|
| 378 |
+
|
| 379 |
+
[1] A. Baevski, Y. Zhou, A. Mohamed, and M. Auli, "wav2vec 2.0: A framework for self-supervised learning of speech representations," in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 33, 2020, pp. 12449-12460.
|
| 380 |
+
[2] W.-N. Hsu, B. Bolte, Y.-H. H. Tsai, K. Lakhotia, R. Salakhutdinov, and A. Mohamed, "HuBERT: Self-supervised speech representation learning by masked prediction of hidden units," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 3451-3460, 2021.
|
| 381 |
+
[3] S. Chen, C. Wang, Z. Chen, Y. Wu, S. Liu, Z. Chen, J. Li, N. Kanda, T. Yoshioka, X. Xiao, J. Wu, L. Zhou, S. Ren, Y. Qian, Y. Qian, J. Wu, M. Zeng, X. Yu, and F. Wei, "WavLM: Large-scale self-supervised pre-training for full stack speech processing," IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1505-1518, 2022.
|
| 382 |
+
[4] A. T. Liu, S.-W. Li, and H.-y. Lee, “TERA: Self-supervised learning of transformer encoder representation for speech,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 29, pp. 2351-2366, 2021.
|
| 383 |
+
[5] J. Zhao and W.-Q. Zhang, "Improving automatic speech recognition performance for low-resource languages with self-supervised models," IEEE J. Sel. Top. Signal Process., vol. 16, no. 6, pp. 1227-1241, 2022.
|
| 384 |
+
[6] J. weon Jung, W. Zhang, J. Shi, Z. Aldeneh, T. Higuchi, A. Gichamba, B.-J. Theobald, A. Hussen Abdelaziz, and S. Watanabe, "ESPnet-SPK: full pipeline speaker embedding toolkit with reproducible recipes, self-supervised front-ends, and off-the-shelf models," in Proc. INTERSPEECH, 2024, pp. 4278-4282.
|
| 385 |
+
[7] M. Li, Y. Ahmadiadli, and X.-P. Zhang, "A survey on speech deepfake detection," ACM Comput. Surv., vol. 57, no. 7, 2025.
|
| 386 |
+
[8] N. M. Müller, P. Kawa, W. H. Choong, E. Casanova, E. Gölle, T. Müller, P. Syga, P. Sperl, and K. Böttinger, "MLAAD: The multi-language audio anti-spoofing dataset," in Proc. Int. Jt. Conf. Neural Netw. (IJCNN), 2024, pp. 1-7.
|
| 387 |
+
[9] Y. Xie, Y. Lu, R. Fu, Z. Wen, Z. Wang, J. Tao, X. Qi, X. Wang, Y. Liu, H. Cheng, L. Ye, and Y. Sun, "The codecfake dataset and countermeasures for the universally detection of deepfake audio," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 33, pp. 386-400, 2025.
|
| 388 |
+
[10] R. K. Das, X. Tian, T. Kinnunen, and H. Li, “The attacker's perspective on automatic speaker verification: An overview,” in Proc. INTERSPEECH, 2020, pp. 4213–4217.
|
| 389 |
+
[11] J.-w. Jung, Y. Wu, X. Wang, J.-H. Kim, S. Maiti, Y. Matsunaga, H.-j. Shim, J. Tian, N. Evans, J. S. Chung, W. Zhang, S. Um, S. Takamichi, and S. Watanabe, "SpoofCeleb: Speech deepfake detection and SASV in the wild," IEEE Open J. Signal Process., vol. 6, pp. 68-77, 2025.
|
| 390 |
+
[12] J. Du, X. Chen, H. Wu, L. Zhang, I. Lin, I. Chiu, W. Ren, Y. Tseng, Y. Tsao, J.-S. R. Jang et al., "CodecFake-Omni: A large-scale codec-based deepfake speech dataset," arXiv preprint arXiv:2501.08238, 2025.
|
| 391 |
+
[13] X. Chen, H. Wu, R. Jang, and H. yi Lee, "Singing voice graph modeling for singfake detection," in Proc. INTERSPEECH, 2024, pp. 4843-4847.
|
| 392 |
+
[14] Y. Zhu, S. Koppisetti, T. Tran, and G. Bharaj, "SLIM: Style-linguistics mismatch model for generalized audio deepfake detection," in Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 37, 2024, pp. 67901-67928.
|
| 393 |
+
[15] H. Tak, M. Todisco, X. Wang, J. weon Jung, J. Yamagishi, and N. Evans, "Automatic speaker verification spoofing and deepfake detection using wav2vec 2.0 and data augmentation," in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 112-119.
|
| 394 |
+
|
| 395 |
+
[16] A. Guragain, T. Liu, Z. Pan, H. B. Sailor, and Q. Wang, "Speech foundation model ensembles for the controlled singing voice deepfake detection (CtrSVDD) challenge 2024," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024.
|
| 396 |
+
[17] Y. Xiao and R. K. Das, "XLSR-Mamba: A dual-column bidirectional state space model for spoofing attack detection," IEEE Signal Process Lett., vol. 32, pp. 1276-1280, 2025.
|
| 397 |
+
[18] S.-H. Gao, M.-M. Cheng, K. Zhao, X.-Y. Zhang, M.-H. Yang, and P. Torr, “Res2Net: A new multi-scale backbone architecture,” IEEE Trans. Pattern Anal. Mach. Intell., vol. 43, no. 2, pp. 652-662, 2021.
|
| 398 |
+
[19] K. He, X. Zhang, S. Ren, and J. Sun, "Deep residual learning for image recognition," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2016, pp. 770-778.
|
| 399 |
+
[20] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, and J. Qi, "An enhanced Res2Net with local and global feature fusion for speaker verification," in Proc. INTERSPEECH, 2023, pp. 2228-2232.
|
| 400 |
+
[21] Y. Chen, S. Zheng, H. Wang, L. Cheng, Q. Chen, S. Zhang, and J. Li, "ERes2NetV2: Boosting short-duration speaker verification performance with computational efficiency," in Proc. INTERSPEECH, 2024, pp. 3245-3249.
|
| 401 |
+
[22] T. Liu, K. A. Lee, Q. Wang, and H. Li, "Golden Gemini is all you need: Finding the sweet spots for speaker verification," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 2324-2337, 2024.
|
| 402 |
+
[23] X. Li, X. Wu, H. Lu, X. Liu, and H. Meng, "Channel-wise gated Res2Net: Towards robust detection of synthetic speech attacks," in Proc. INTERSPEECH, 2021, pp. 4314-4318.
|
| 403 |
+
[24] J. Kim and S. M. Ban, "Phase-aware spoof speech detection based on Res2Net with phase network," in Proc. ICASSP, 2023, pp. 1-5.
|
| 404 |
+
[25] T. Liu, I. Kukanov, Z. Pan, Q. Wang, H. B. Sailor, and K. A. Lee, "Towards quantifying and reducing language mismatch effects in cross-lingual speech anti-spoofing," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 1185-1192.
|
| 405 |
+
[26] J.-w. Jung, H.-S. Heo, H. Tak, H.-j. Shim, J. S. Chung, B.-J. Lee, H.-J. Yu, and N. Evans, "AASIST: Audio anti-spoofing using integrated spectro-temporal graph attention networks," in Proc. ICASSP, 2022, pp. 6367-6371.
|
| 406 |
+
[27] Y. Chen, J. Yi, J. Xue, C. Wang, X. Zhang, S. Dong, S. Zeng, J. Tao, Z. Lv, and C. Fan, "RawBMamba: End-to-end bidirectional state space model for audio deepfake detection," in Proc. INTERSPEECH, 2024, pp. 2720-2724.
|
| 407 |
+
[28] Y. Chen, H. Wu, N. Jiang, X. Xia, Q. Gu, Y. Hao, P. Cai, Y. Guan, J. Wang, W. Xie et al., "Ustc-kxdigit system description for asvsproof5 challenge," arXiv preprint arXiv:2409.01695, 2024.
|
| 408 |
+
[29] Z. Wei, D. Ye, J. Deng, and Y. Lin, “From voices to beats: Enhancing music deepfake detection by identifying forgeries in background,” in Proc. ICASSP, 2025, pp. 1-5.
|
| 409 |
+
[30] Y. Guan, Y. Ai, Z. Li, S. Peng, and W. Guo, "Recursive feature learning from pre-trained models for spoofing speech detection," in Proc. ICASSP, 2025, pp. 1-5.
|
| 410 |
+
[31] Z. Pan, T. Liu, H. B. Sailor, and Q. Wang, "Attentive merging of hidden embeddings from pre-trained speech model for anti-spoofing detection," in Proc. INTERSPEECH, 2024, pp. 2090-2094.
|
| 411 |
+
[32] M. Huaifah, T. Liu, H. B. Sailor, K. M. Tan, T. K. Vangani, Q. Wang, J. H. Wong, N. F. Chen, and A. T. Aw, "Towards a speech foundation model for Singapore and beyond," arXiv preprint arXiv:2412.11538, 2024.
|
| 412 |
+
[33] D.-T. Truong, R. Tao, T. Nguyen, H.-T. Luong, K. A. Lee, and E. S. Chng, “Temporal-channel modeling in multi-head self-attention for synthetic speech detection,” in Proc. INTERSPEECH, 2024, pp. 537–541.
|
| 413 |
+
[34] B. Desplanques, J. Thienpondt, and K. Demuynck, "ECAPA-TDNN: Emphasized channel attention, propagation and aggregation in TDNN based speaker verification," in Proc. INTERSPEECH, 2020, pp. 3830-3834.
|
| 414 |
+
[35] Q. Zhang, S. Wen, and T. Hu, "Audio deepfake detection with self-supervised XLS-R and SLS classifier," in Proc. ACM Int. Conf. Multimedia, 2024, pp. 6765-6773.
|
| 415 |
+
[36] Z. Ge, X. Xu, H. Guo, Z. Yang, and B. Schuller, "Gncl: A graph neural network with consistency loss for segment-level spoofed speech detection," in Proc. ICASSP, 2025, pp. 1-5.
|
| 416 |
+
[37] X. Wang, H. Delgado, H. Tak, J. weon Jung, H. jin Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. H. Kinnunen, N. Evans, K. A. Lee, and J. Yamagishi, "ASVspoof 5: crowdsourced speech data, deepfakes, and adversarial attacks at scale," in Autom. Speaker Verif. Spoofing Countermeas. Workshop, 2024, pp. 1-8.
|
| 417 |
+
|
| 418 |
+
[38] Y. Zhang, Y. Zang, J. Shi, R. Yamamoto, T. Toda, and Z. Duan, "SVDD 2024: The inaugural singing voice deepfake detection challenge," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 782-787.
|
| 419 |
+
[39] Q. Zhang, S. Wen, F. Yan, T. Hu, and J. Li, "XWSB: A blend system utilizing XLS-R and WavLM with SLS classifier detection system for SVDD 2024 challenge," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2024, pp. 788-794.
|
| 420 |
+
[40] J. Yi, J. Tao, R. Fu, X. Yan, C. Wang, T. Wang, C. Y. Zhang, X. Zhang, Y. Zhao, Y. Ren et al., "ADD 2023: the second audio deepfake detection challenge," arXiv preprint arXiv:2305.13774, 2023.
|
| 421 |
+
[41] J. Hu, L. Shen, and G. Sun, "Squeeze-and-excitation networks," in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2018.
|
| 422 |
+
[42] K. Okabe, T. Koshinaka, and K. Shinoda, "Attentive statistics pooling for deep speaker embedding," in Proc. INTERSPEECH, 2018, pp. 2252-2256.
|
| 423 |
+
[43] T. Zhou, Y. Zhao, and J. Wu, "ResNeXt and Res2Net structures for speaker verification," in Proc. IEEE Spoken Lang. Technol. Workshop (SLT), 2021, pp. 301-307.
|
| 424 |
+
[44] Q. Wang, B. Wu, P. Zhu, P. Li, W. Zuo, and Q. Hu, "ECA-Net: Efficient channel attention for deep convolutional neural networks," in Proc. IEEE Conf. Comput. Vis. Pattern Recog. (CVPR), 2020, pp. 11531-11539.
|
| 425 |
+
[45] T. Liu, R. K. Das, K. A. Lee, and H. Li, "MFA: TDNN with multi-scale frequency-channel attention for text-independent speaker verification with short utterances," in Proc. ICASSP, 2022, pp. 7517-7521.
|
| 426 |
+
[46] Y. Zang, J. Shi, Y. Zhang, R. Yamamoto, J. Han, Y. Tang, S. Xu, W. Zhao, J. Guo, T. Toda, and Z. Duan, "CtrSVDD: A benchmark dataset and baseline analysis for controlled singing voice deepfake detection," in Proc. INTERSPEECH, 2024, pp. 4783-4787.
|
| 427 |
+
[47] X. Wang, J. Yamagishi, M. Todisco, H. Delgado, A. Nautsch, N. Evans, M. Sahidullah, V. Vestman, T. Kinnunen, K. A. Lee, L. Juvela, P. Alku, Y.-H. Peng, H.-T. Hwang, Y. Tsao, H.-M. Wang, S. L. Maguer, M. Becker, F. Henderson, R. Clark, Y. Zhang, Q. Wang, Y. Jia, K. Onuma, K. Mushika, T. Kaneda, Y. Jiang, L.-J. Liu, Y.-C. Wu, W.-C. Huang, T. Toda, K. Tanaka, H. Kameoka, I. Steiner, D. Matrouf, J.-F. Bonastre, A. Govender, S. Ronanki, J.-X. Zhang, and Z.-H. Ling, "ASVspoof 2019: A large-scale public database of synthesized, converted and replayed speech," Comput. Speech Lang., vol. 64, p. 101114, 2020.
|
| 428 |
+
[48] J. Yamagishi, X. Wang, M. Todisco, M. Sahidullah, J. Patino, A. Nautsch, X. Liu, K. A. Lee, T. Kinnunen, N. Evans, and H. Delgado, "ASVspoof 2021: accelerating progress in spoofed and deepfake speech detection," in Autom. Speaker Verif. Spoofing Countermeas. Challenge, 2021, pp. 47-54.
|
| 429 |
+
[49] X. Wang, H. Delgado, H. Tak, J.-w. Jung, H.-j. Shim, M. Todisco, I. Kukanov, X. Liu, M. Sahidullah, T. Kinnunen et al., "ASVspoof 5: Design, collection and validation of resources for spoofing, deepfake, and adversarial attack detection using crowdsourced speech," arXiv preprint arXiv:2502.08857, 2025.
|
| 430 |
+
[50] N. M. Müller, P. Czempin, F. Dieckmann, A. Froghyar, and K. Böttinger, "Does audio deepfake detection generalize?" in Proc. INTERSPEECH, 2022, pp. 2783-2787.
|
| 431 |
+
[51] L. Zhang, X. Wang, E. Cooper, N. Evans, and J. Yamagishi, "The PartialProof database and countermeasures for the detection of short fake speech segments embedded in an utterance," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 31, pp. 813-825, 2023.
|
| 432 |
+
[52] Y. Zang, Y. Zhang, M. Heydari, and Z. Duan, "SingFake: Singing voice deepfake detection," in Proc. ICASSP, 2024, pp. 12156-12160.
|
| 433 |
+
[53] Y. Xie, J. Zhou, X. Lu, Z. Jiang, Y. Yang, H. Cheng, and L. Ye, "FSD: An initial chinese dataset for fake song detection," in Proc. ICASSP, 2024, pp. 4605-4609.
|
| 434 |
+
[54] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar, “Focal loss for dense object detection,” in IEEE Int. Conf. Comput. Vis. (ICCV), 2017, pp. 2980–2988.
|
| 435 |
+
[55] H. Tak, M. Kamble, J. Patino, M. Todisco, and N. Evans, "Rawboost: A raw data boosting and augmentation method applied to automatic speaker verification anti-spoofing," in Proc. ICASSP, 2022, pp. 6382-6386.
|
| 436 |
+
|
| 437 |
+
[56] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,” in Int. Conf. Learn. Represent., 2015.
|
| 438 |
+
[57] D. Snyder, G. Chen, and D. Povey, “Musan: A music, speech, and noise corpus,” arXiv preprint arXiv:1510.08484, 2015.
|
| 439 |
+
[58] T. Ko, V. Peddinti, D. Povey, M. L. Seltzer, and S. Khudanpur, “A study on data augmentation of reverberant speech for robust speech recognition,” in 2017 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2017, pp. 5220–5224.
|
| 440 |
+
[59] T. Liu, L. Zhang, R. K. Das, Y. Ma, R. Tao, and H. Li, "How do neural spoofing countermeasures detect partially spoofed audio?" in Proc. INTERSPEECH, 2024, pp. 1105-1109.
|
| 441 |
+
[60] X. Wang and J. Yamagishi, “A comparative study on recent neural spoofing countermeasures for synthetic speech detection,” in Proc. INTERSPEECH, 2021, pp. 4259–4263.
|
| 442 |
+
[61] J. M. Martin-Doñas and A. Álvarez, “The Vicomtech audio deepfake detection system based on wav2vec2 for the 2022 ADD challenge,” in Proc. ICASSP, 2022, pp. 9241–9245.
|
| 443 |
+
[62] X. Wang and J. Yamagishi, “Investigating self-supervised front ends for speech spoofing countermeasures,” in Proc. Odyssey Speaker Lang. Recognit. Workshop, 2022, pp. 100–106.
|
| 444 |
+
[63] E. Rosello, A. Gomez-Alanis, A. M. Gomez, and A. Peinado, “A conformer-based classifier for variable-length utterance processing in anti-spoofing,” in Proc. INTERSPEECH, 2023, pp. 5281-5285.
|
| 445 |
+
[64] E. Rosello, A. M. Gomez, I. López-Espejo, A. M. Peinado, and J. M. Martín-Doñas, “Anti-spoofing ensembling model: Dynamic weight allocation in ensemble models for improved voice biometrics security,” in Proc. INTERSPEECH, 2024, pp. 497–501.
|
| 446 |
+
[65] H. M. Tran, D. Guennec, P. Martin, A. Sini, D. Loline, A. Delhay, and P.-F. Marteau, "Spoofed speech detection with a focus on speaker embedding," in Proc. INTERSPEECH, 2024, pp. 2080-2084.
|
| 447 |
+
[66] B. Wang, Y. Tang, F. Wei, Z. Ba, and K. Ren, "FTDKD: Frequency-time domain knowledge distillation for low-quality compressed audio deepfake detection," IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4905-4918, 2024.
|
| 448 |
+
[67] Y. Zhang, J. Lu, Z. Shang, W. Wang, and P. Zhang, “Improving short utterance anti-spoofing with AASIST2,” in Proc. ICASSP, 2024, pp. 11636-11640.
|
| 449 |
+
[68] Y. Guo, H. Huang, X. Chen, H. Zhao, and Y. Wang, "Audio deepfake detection with self-supervised WavLm and multi-fusion attentive classifier," in Proc. ICASSP, 2024, pp. 12702-12706.
|
| 450 |
+
[69] Z. Wang, R. Fu, Z. Wen, J. Tao, X. Wang, Y. Xie, X. Qi, S. Shi, Y. Lu, Y. Liu et al., "Mixture of experts fusion for fake audio detection using frozen wav2vec 2.0," arXiv preprint arXiv:2409.11909, 2024.
|
| 451 |
+
[70] J. Lu, Y. Zhang, W. Wang, Z. Shang, and P. Zhang, “One-class knowledge distillation for spoofing speech detection,” in Proc. ICASSP, 2024, pp. 11251-11255.
|
| 452 |
+
[71] W. Huang, Y. Gu, Z. Wang, H. Zhu, and Y. Qian, "Generalizable audio deepfake detection via latent space refinement and augmentation," in Proc. ICASSP, 2025, pp. 1-5.
|
| 453 |
+
[72] Z. Jin, L. Lang, and B. Leng, "Wave-spectrogram cross-modal aggregation for audio deepfake detection," in Proc. ICASSP, 2025, pp. 1-5.
|
| 454 |
+
[73] C. Y. Kwok, D.-T. Truong, and J. Q. Yip, "Robust audio deepfake detection using ensemble confidence calibration," in Proc. ICASSP, 2025, pp. 1-5.
|
| 455 |
+
[74] O. Pascu, A. Stan, D. Oneata, E. Oneata, and H. Cucu, "Towards generalisable and calibrated audio deepfake detection with self-supervised representations," in Proc. INTERSPEECH, 2024, pp. 4828-4832.
|
| 456 |
+
[75] H.-T. Luong, H. Li, L. Zhang, K. A. Lee, and E. S. Chng, “LlamaPartial-Spoof: An LLM-driven fake speech dataset simulating disinformation generation,” arXiv preprint arXiv:2409.14743, 2024.
|
| 457 |
+
[76] T. Liu, K. A. Lee, Q. Wang, and H. Li, "Disentangling voice and content with self-supervision for speaker recognition," Proc. Adv. Neural Inf. Process. Syst. (NeurIPS), vol. 36, pp. 50221-50236, 2023.
|
| 458 |
+
[77] S. Wang, Z. Chen, K. A. Lee, Y. Qian, and H. Li, “Overview of speaker modeling and its applications: From the lens of deep speaker representation learning,” IEEE/ACM Trans. Audio, Speech, Lang. Process., vol. 32, pp. 4971–4998, 2024.
|
data/2025/2504_05xxx/2504.05657/images/3cc1916acf788294e6c8ae9cfda94679fa734b07114b7b845c6ca02cdef7c997.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/42c4d557aa680975a756269def714eecc57dc1e96b22095e36c26eabbddd4813.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/5e9d3dac3968c2aca3602c7becebdf97db62a94ae78c7df5ff863f2e6c45e0e0.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/60a1d06954731a1d8497f7ad39edfbb82ed60f30aa0d12cd56c49475a8a4119e.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/659129421e45a9fe814172a8f9a34bba84b17527efa124efa6a95150223c258d.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/79157a288ac5f97faf334aba645a2a71ff54ca0a6409da7b5db961540f267480.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/7e69571a4ff0ffaa47a0d2c05ca2240776de844853e960ce84ab1186458a45c6.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/8e23480718b64a36e60a649ebac4e58f7fde3d839dc800ff33cb052003b55e25.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/98b5b166adf5a4085d47308265f5b1d43548aef5667c9a311dea779157979ccf.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/9d42cb0231914a7b71d6b7e6fa59b6b9bb1fdc4444517d6b37eb83903910150a.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/aaf4389be4b7945f02c8bda6ea039fe9c5e53dd5e8bc867b886ce5c647d07ecd.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/b205df6253934c495cd6bf54f649c643eae158455b8a5a57b5838ccd4e16a70f.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/bb3829ac42d9a686c979bc4e9bb61faab459c4bb9e22a76b5267ae278546f1d8.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/bd43c91baeed1799a4c358e11c48f71336db4a8f12d9b56d330407e1dc303c30.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/cdb077715738a859647716de99f3da764ab98bc0bbef54fc6a8d8d889b7cb8ce.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/dabbcf97be8c75c0deb7beb748fdb20e0361404e5c527c4f5e1d38e00b935d08.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/efe1584a963bd86fedb0e112a862eb4e48cd69fc72a15bbe69361f939014ea25.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/images/ff815d3d676fe98054c7f2384cec4ecceab1946ec4936157be8fb0c8b58b53f0.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05657/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_content_list.json
ADDED
|
@@ -0,0 +1,1546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
166,
|
| 8 |
+
128,
|
| 9 |
+
831,
|
| 10 |
+
174
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Songyan Zhang $^{1*}$ Yongtao Ge $^{2,3*}$ Jinyuan Tian $^{2*}$ Guangkai Xu $^{2}$ Hao Chen $^{2\\boxtimes}$ Chen Lv $^{1}$ Chunhua Shen $^{2}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
214,
|
| 19 |
+
200,
|
| 20 |
+
776,
|
| 21 |
+
239
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup>Nanyang Technological University, Singapore <sup>2</sup>Zhejiang University, China <sup>3</sup>The University of Adelaide, Australia",
|
| 28 |
+
"bbox": [
|
| 29 |
+
186,
|
| 30 |
+
244,
|
| 31 |
+
812,
|
| 32 |
+
282
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "image",
|
| 38 |
+
"img_path": "images/2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg",
|
| 39 |
+
"image_caption": [
|
| 40 |
+
"Figure 1. 3D reconstruction from an arbitrary dynamic video with POMATO. Without relying on external modules, POMATO can directly perform 3D reconstruction along with temporal 3D point tracking and dynamic mask estimation."
|
| 41 |
+
],
|
| 42 |
+
"image_footnote": [],
|
| 43 |
+
"bbox": [
|
| 44 |
+
94,
|
| 45 |
+
320,
|
| 46 |
+
199,
|
| 47 |
+
579
|
| 48 |
+
],
|
| 49 |
+
"page_idx": 0
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"type": "image",
|
| 53 |
+
"img_path": "images/7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg",
|
| 54 |
+
"image_caption": [],
|
| 55 |
+
"image_footnote": [],
|
| 56 |
+
"bbox": [
|
| 57 |
+
205,
|
| 58 |
+
324,
|
| 59 |
+
609,
|
| 60 |
+
579
|
| 61 |
+
],
|
| 62 |
+
"page_idx": 0
|
| 63 |
+
},
|
| 64 |
+
{
|
| 65 |
+
"type": "image",
|
| 66 |
+
"img_path": "images/17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg",
|
| 67 |
+
"image_caption": [],
|
| 68 |
+
"image_footnote": [],
|
| 69 |
+
"bbox": [
|
| 70 |
+
614,
|
| 71 |
+
325,
|
| 72 |
+
893,
|
| 73 |
+
575
|
| 74 |
+
],
|
| 75 |
+
"page_idx": 0
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"type": "text",
|
| 79 |
+
"text": "Abstract",
|
| 80 |
+
"text_level": 1,
|
| 81 |
+
"bbox": [
|
| 82 |
+
246,
|
| 83 |
+
633,
|
| 84 |
+
326,
|
| 85 |
+
648
|
| 86 |
+
],
|
| 87 |
+
"page_idx": 0
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"type": "text",
|
| 91 |
+
"text": "Recent approaches to 3D reconstruction in dynamic scenes primarily rely on the integration of separate geometry estimation and matching modules, where the latter plays a critical role in distinguishing dynamic regions and mitigating the interference caused by moving objects. Furthermore, the matching module explicitly models object motion, enabling the tracking of specific targets and advancing motion understanding in complex scenarios. Recently, the proposed representation of pointmap in DUSt3R suggests a potential solution to unify both geometry estimation and matching in 3D space, effectively reducing computational overhead by eliminating the need for redundant auxiliary modules. However, it still struggles with ambiguous correspondences in dynamic regions, which limits reconstruction",
|
| 92 |
+
"bbox": [
|
| 93 |
+
88,
|
| 94 |
+
666,
|
| 95 |
+
482,
|
| 96 |
+
876
|
| 97 |
+
],
|
| 98 |
+
"page_idx": 0
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"type": "text",
|
| 102 |
+
"text": "performance in such scenarios. In this work, we present POMATO, a unified framework for dynamic 3D reconstruction by marrying POintmap MAtching with Temporal mOtion. Specifically, our method first learns an explicit matching relationship by mapping RGB pixels across different views to 3D pointmaps within a unified coordinate system. Furthermore, we introduce a temporal motion module for dynamic motions that ensures scale consistency across different frames and enhances performance in 3D reconstruction tasks requiring both precise geometry and reliable matching, most notably 3D point tracking. We show the effectiveness of our proposed POMATO by demonstrating the remarkable performance across multiple downstream tasks, including video depth estimation, 3D point tracking, and pose estimation. Code and models are publicly available at https://github.com/wyddmw/POMATO.",
|
| 103 |
+
"bbox": [
|
| 104 |
+
511,
|
| 105 |
+
636,
|
| 106 |
+
906,
|
| 107 |
+
876
|
| 108 |
+
],
|
| 109 |
+
"page_idx": 0
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"type": "aside_text",
|
| 113 |
+
"text": "arXiv:2504.05692v2 [eess.IV] 8 Aug 2025",
|
| 114 |
+
"bbox": [
|
| 115 |
+
22,
|
| 116 |
+
273,
|
| 117 |
+
60,
|
| 118 |
+
722
|
| 119 |
+
],
|
| 120 |
+
"page_idx": 0
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"type": "page_footnote",
|
| 124 |
+
"text": "* Equal contribution. $\\boxdot$ Corresponding author.",
|
| 125 |
+
"bbox": [
|
| 126 |
+
114,
|
| 127 |
+
886,
|
| 128 |
+
362,
|
| 129 |
+
898
|
| 130 |
+
],
|
| 131 |
+
"page_idx": 0
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"type": "image",
|
| 135 |
+
"img_path": "images/a572bf8f3f31147ee0b7fe7ce8470f549bc4ee48d2b9a7606bf4cb6e04f57753.jpg",
|
| 136 |
+
"image_caption": [
|
| 137 |
+
"Image1"
|
| 138 |
+
],
|
| 139 |
+
"image_footnote": [],
|
| 140 |
+
"bbox": [
|
| 141 |
+
165,
|
| 142 |
+
93,
|
| 143 |
+
392,
|
| 144 |
+
227
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 1
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "image",
|
| 150 |
+
"img_path": "images/865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg",
|
| 151 |
+
"image_caption": [
|
| 152 |
+
"Figure 2. Ambiguity in 3D point matching in dynamic scenes with DUSt3R. Given representative corresponding pixels of background (orange) and moving foreground (red) in two different views, DUSt3R outputs a pair of 3D points within the same coordinate system. In static regions, identical pixels share the same 3D coordinates which provide an accurate matching relationship in 3D space, but in moving regions, the 3D coordinates are inconsistent for corresponding pixels across views, leading to ambiguous 3D matching relationships."
|
| 153 |
+
],
|
| 154 |
+
"image_footnote": [],
|
| 155 |
+
"bbox": [
|
| 156 |
+
406,
|
| 157 |
+
89,
|
| 158 |
+
759,
|
| 159 |
+
237
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 1
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "image",
|
| 165 |
+
"img_path": "images/22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg",
|
| 166 |
+
"image_caption": [],
|
| 167 |
+
"image_footnote": [],
|
| 168 |
+
"bbox": [
|
| 169 |
+
761,
|
| 170 |
+
93,
|
| 171 |
+
898,
|
| 172 |
+
227
|
| 173 |
+
],
|
| 174 |
+
"page_idx": 1
|
| 175 |
+
},
|
| 176 |
+
{
|
| 177 |
+
"type": "text",
|
| 178 |
+
"text": "1. Introduction",
|
| 179 |
+
"text_level": 1,
|
| 180 |
+
"bbox": [
|
| 181 |
+
89,
|
| 182 |
+
316,
|
| 183 |
+
220,
|
| 184 |
+
330
|
| 185 |
+
],
|
| 186 |
+
"page_idx": 1
|
| 187 |
+
},
|
| 188 |
+
{
|
| 189 |
+
"type": "text",
|
| 190 |
+
"text": "Image-based 3D reconstruction is a fundamental task in computer vision with a wide range of applications including SLAM [39], robotics [19, 49], autonomous driving [53], and novel view synthesis [5]. While substantial progress has been achieved in static 3D reconstruction [16, 23, 26, 44, 51], dynamic scenes remain a major hurdle due to complexities like non-rigid motion and deformation, which may hamper the learning of local structure and camera motion, thereby complicating accurate 3D reconstruction for dynamic scenes. These scenarios require explicit modeling of both scene geometry and object motion. Moreover, downstream reconstruction tasks, such as 3D point tracking, demand precise geometry estimation and robust matching across views. To effectively distinguish dynamic regions, it is essential to establish reliable correspondences between different frames. Some pioneering works have attempted to address dynamic motion by incorporating additional auxiliary matching modules, such as optical flow [42, 52] or 2D tracking [47]. However, these approaches may suffer from domain gaps and accumulated errors between modules, limiting their effectiveness. A unified framework that seamlessly integrates geometry estimation and matching for dynamic 3D reconstruction remains a critical and underexplored challenge.",
|
| 191 |
+
"bbox": [
|
| 192 |
+
91,
|
| 193 |
+
347,
|
| 194 |
+
483,
|
| 195 |
+
709
|
| 196 |
+
],
|
| 197 |
+
"page_idx": 1
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"type": "text",
|
| 201 |
+
"text": "Recently, DUSt3R [44] proposes a promising solution to address this challenge. It introduces the concept of a pointmap that assigns each pixel in an image to a corresponding 3D coordinate. The network utilizes a standard transformer-based encoder-decoder architecture and receives a pair of images as input. The system incorporates two parallel decoders to predict pointmaps for each view within the same coordinate system. However, this representation is limited to static matching and struggles in dynamic scenes, as illustrated in Fig. 2.",
|
| 202 |
+
"bbox": [
|
| 203 |
+
89,
|
| 204 |
+
714,
|
| 205 |
+
482,
|
| 206 |
+
864
|
| 207 |
+
],
|
| 208 |
+
"page_idx": 1
|
| 209 |
+
},
|
| 210 |
+
{
|
| 211 |
+
"type": "text",
|
| 212 |
+
"text": "To address this problem, we present POMATO, a unified network for dynamic 3D reconstruction by marrying",
|
| 213 |
+
"bbox": [
|
| 214 |
+
89,
|
| 215 |
+
869,
|
| 216 |
+
482,
|
| 217 |
+
901
|
| 218 |
+
],
|
| 219 |
+
"page_idx": 1
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"type": "text",
|
| 223 |
+
"text": "POintmap MMatching with Temporal mOtion. We argue that with iterative cross-attention modules across different views, matching features are well preserved in the decoder tokens. We thus introduce an auxiliary pointmap matching head to learn explicit correspondences. Specifically, for each pixel in the second view, the pointmap matching head predicts the corresponding 3D coordinates of its counterpart in the first view, under the shared coordinate system. Our proposed pointmap-based matching representation enables the establishment of explicit correspondences in 3D space, which can be directly leveraged for motion analysis, especially the estimation of dynamic regions. Moreover, we further extend our POMATO to handle 4D video sequences by introducing a temporal motion module that enhances the learning of temporal motions. This motion module promotes scale consistency across different frames and improves performance in tasks where both accurate geometry and reliable matching are paramount, most notably 3D point tracking. Compared with recent temporal 3D reconstruction methods [41, 43] based on an autoregression manner where the previous frames are blocked from the recently added frames, our temporal motion module is based on the self-attention mechanism along the temporal dimension, facilitating a comprehensive interaction across all frames. Our POMATO is trained in a two-stage manner. In the first stage, we used pairwise input images to learn fundamental geometry and matching capabilities. In the second stage, we extend the input to sequential video frames and incorporate the temporal motion module, enabling the model to effectively capture motions over time.",
|
| 224 |
+
"bbox": [
|
| 225 |
+
511,
|
| 226 |
+
316,
|
| 227 |
+
906,
|
| 228 |
+
770
|
| 229 |
+
],
|
| 230 |
+
"page_idx": 1
|
| 231 |
+
},
|
| 232 |
+
{
|
| 233 |
+
"type": "text",
|
| 234 |
+
"text": "Our contributions can be summarized in threefold: First, we propose a novel approach that unifies the fundamental geometry estimation and motion understanding for dynamic 3D reconstruction into a single network by incorporating the representation of pointmap matching. Second, we introduce a temporal motion module to facilitate the interactions of motion features along the temporal dimension, which significantly improves the performance in tasks",
|
| 235 |
+
"bbox": [
|
| 236 |
+
511,
|
| 237 |
+
779,
|
| 238 |
+
908,
|
| 239 |
+
901
|
| 240 |
+
],
|
| 241 |
+
"page_idx": 1
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"type": "text",
|
| 245 |
+
"text": "where both accurate geometry and precise matching are required for video sequential input, most notably 3D point tracking. Third, we demonstrate promising performance on 3D vision tasks, including video depth estimation, 3D point tracking, and camera pose estimation.",
|
| 246 |
+
"bbox": [
|
| 247 |
+
89,
|
| 248 |
+
90,
|
| 249 |
+
480,
|
| 250 |
+
167
|
| 251 |
+
],
|
| 252 |
+
"page_idx": 2
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"type": "text",
|
| 256 |
+
"text": "2. Related Work",
|
| 257 |
+
"text_level": 1,
|
| 258 |
+
"bbox": [
|
| 259 |
+
89,
|
| 260 |
+
180,
|
| 261 |
+
232,
|
| 262 |
+
196
|
| 263 |
+
],
|
| 264 |
+
"page_idx": 2
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"type": "text",
|
| 268 |
+
"text": "Geometry estimation refers to the process of determining the spatial properties and structures from different forms of visual data. Direct recovery of 3D geometry from a single RGB image is by nature an ill-posed problem. Many recent works [3, 16, 23, 51] have tried to leverage strong pre-trained models to learn generalizable depthmaps from large-scale real and synthetic datasets to solve ambiguities. For example, Marigold [23], Geowizard [11], and GenPercept [48] aim at leveraging the generative priors from pre-trained diffusion models by finetuning them on synthetic datasets. Depthanything V2 [51] proposes to estimate scale-and-shift invariant disparity map by finetuning DINOV2 [29] model on synthetic datasets and largescale pseudo labels. Depth Pro [3] further propose a FOV head to estimate the metric depthmap from a single image without relying on camera intrinsics as input. Due to the scale ambiguity in the monocular depth estimation models, ChronoDepth [36], DepthCrafter [17], and Depth-any-video [50] proposes to learn temporal consistent depthmaps by leveraging the priors from a video generative model, i.e. SVD [2]. In another line of the research, multi-view stereo reconstruction (MVS) methods seek to reconstruct visible surfaces from multiple viewpoints. Traditional MVS [12] and SfM pipelines break the reconstruction pipeline into several sub-problems, e.g., feature extraction [8], image matching [1, 27], triangulation, and bundle adjustment [7]. The chain is complicated and accumulates noise for every single step, thus often resulting in unsatisfactory performance in complex real-world scenes. Recognizing the limitations of previous MVS methods, seminal work DUSt3R [44] proposes 3D pointmaps representation, and trains a network from large-scale data to regress the dense and accurate pointmaps from a pair of images. The camera intrinsics and relative camera poses can be implicitly inferred from the two-view pointmaps. However, it still can not handle reconstruction for dynamic scenes. MonST3R [52] directly finetuned the original DUSt3R model upon synthetic datasets that contain dynamic scenes. Motion representation. Optical flow is a commonly used representation for 2D motion. RAFT [38] is a representative work for pairwise optical flow estimation, which employs a 4D cost volume and recurrently estimates the optical flow. Some follow-up methods further extend it to multi-frame (3-5 frames) settings, which is still insufficient for long-range tracking. To resolve the problem, Particle Video [35] represent video motion by using a set of particles. Each",
|
| 269 |
+
"bbox": [
|
| 270 |
+
89,
|
| 271 |
+
205,
|
| 272 |
+
483,
|
| 273 |
+
902
|
| 274 |
+
],
|
| 275 |
+
"page_idx": 2
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"type": "text",
|
| 279 |
+
"text": "particle is an image point sample with a long-duration trajectory and other properties. Particle videos have two key advantages over optical flow: (1) persistence through occlusions, and (2) multi-frame temporal context. Some recent works, PIPs [15], TAPIR [9] and Cotracker [22] have renewed interest in this representation and show promising long-term 2D point tracking results. Recognizing the advantage of point representation, SpatialTracker [47] lifts the 2D points into 3D and performs tracking in the 3D space. Though it can handle occlusions and enhance 3D tracking accuracy, it still relies on a separate monocular depth estimator, which prevents it performing 3D point tracking in an end-to-end fashion.",
|
| 280 |
+
"bbox": [
|
| 281 |
+
511,
|
| 282 |
+
90,
|
| 283 |
+
903,
|
| 284 |
+
286
|
| 285 |
+
],
|
| 286 |
+
"page_idx": 2
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"type": "text",
|
| 290 |
+
"text": "Multi-view dynamic reconstruction. Our work is closely connected to multi-view dynamic 3D reconstruction techniques. Early works [32, 34] take the straightforward idea that first pre-segment the scene into different regions, each corresponding to a single rigid part of an object, then apply the rigid-SfM technique to each of the regions. Some of the recent Neural Radiance Fields (NeRF) [28] and Gaussian Splatting [24] based methods have achieved state-of-the-art results. However, most of these methods require simultaneous multi-view video inputs or require predefined templates [18]. Shape of motion [42], proposes a new dynamic scene representation to represent the dynamic scene as a set of persistent 3D Gaussians, and optimize the representation from a monocular video by leveraging monocular depth estimation priors and 2D track estimates across frames.",
|
| 291 |
+
"bbox": [
|
| 292 |
+
511,
|
| 293 |
+
287,
|
| 294 |
+
906,
|
| 295 |
+
513
|
| 296 |
+
],
|
| 297 |
+
"page_idx": 2
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"type": "text",
|
| 301 |
+
"text": "3. Method",
|
| 302 |
+
"text_level": 1,
|
| 303 |
+
"bbox": [
|
| 304 |
+
513,
|
| 305 |
+
526,
|
| 306 |
+
604,
|
| 307 |
+
542
|
| 308 |
+
],
|
| 309 |
+
"page_idx": 2
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"type": "text",
|
| 313 |
+
"text": "3.1. Preliminary",
|
| 314 |
+
"text_level": 1,
|
| 315 |
+
"bbox": [
|
| 316 |
+
511,
|
| 317 |
+
551,
|
| 318 |
+
643,
|
| 319 |
+
566
|
| 320 |
+
],
|
| 321 |
+
"page_idx": 2
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"type": "text",
|
| 325 |
+
"text": "The overview of our POMATO is demonstrated in Fig.3. We adopt the definition of pointmap $\\mathbf{X} \\in \\mathbb{R}^{H \\times W \\times 3}$ in DUSt3R [44] as a dense 2D field of 3D points where each point corresponds to its respective RGB pixel. Given a pair of input images $\\mathbf{I}^1, \\mathbf{I}^2 \\in \\mathbb{R}^{H \\times W \\times 3}$ from two different views, a weight-sharing ViT first extracts the corresponding features $\\mathbf{F}^1, \\mathbf{F}^2$ for each view. Two parallel branches are employed to decode the geometric structures and enhance the feature alignment via cross-attention in decoder modules, following a regression head to estimate pointmaps $\\mathbf{X}^{1,1}, \\mathbf{X}^{2,1} \\in \\mathbb{R}^{H \\times W \\times 3}$ along with a confidence map $\\mathbf{C}^{1,1}, \\mathbf{C}^{2,1} \\in \\mathbb{R}^{H \\times W}$ for each image view. Generally, $\\mathbf{X}^{n,m}$ indicates the pointmap $\\mathbf{X}^n$ from camera $n$ expressed in camera $m$ 's coordinate frame, which is obtained by a rigid transformation:",
|
| 326 |
+
"bbox": [
|
| 327 |
+
511,
|
| 328 |
+
573,
|
| 329 |
+
906,
|
| 330 |
+
800
|
| 331 |
+
],
|
| 332 |
+
"page_idx": 2
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"type": "equation",
|
| 336 |
+
"text": "\n$$\n\\mathbf {X} ^ {n, m} = \\mathbf {P} _ {m} \\mathbf {P} _ {n} ^ {- 1} h \\left(\\mathbf {X} ^ {n}\\right), \\tag {1}\n$$\n",
|
| 337 |
+
"text_format": "latex",
|
| 338 |
+
"bbox": [
|
| 339 |
+
622,
|
| 340 |
+
825,
|
| 341 |
+
903,
|
| 342 |
+
844
|
| 343 |
+
],
|
| 344 |
+
"page_idx": 2
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"text": "where $\\mathbf{P}_m, \\mathbf{P}_n \\in \\mathbb{R}^{3 \\times 4}$ are world-to-camera poses for camera $m$ and camera $n$ , respectively, and $h(\\mathbf{X}^n)$ is a homogeneous mapping for the 3D coordinate in camera coordinate",
|
| 349 |
+
"bbox": [
|
| 350 |
+
511,
|
| 351 |
+
854,
|
| 352 |
+
906,
|
| 353 |
+
902
|
| 354 |
+
],
|
| 355 |
+
"page_idx": 2
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "image",
|
| 359 |
+
"img_path": "images/6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg",
|
| 360 |
+
"image_caption": [
|
| 361 |
+
"Figure 3. Overview of our training pipeline. (1) Stage I: build upon DUSt3R [44] architecture, we introduce a third regression point-matching head: $\\mathrm{Head}_3$ , which is in parallel to $\\mathrm{Head}_2$ for explicit pointmap matching in 3D space. For each pixel in the second view, the output pointmap coordinate is the 3D point map of the corresponding pixel in the first view. (2) Stage II: we introduce a temporal fusion module in three heads that enables multi-style sequential input for learning temporal motions."
|
| 362 |
+
],
|
| 363 |
+
"image_footnote": [],
|
| 364 |
+
"bbox": [
|
| 365 |
+
96,
|
| 366 |
+
88,
|
| 367 |
+
535,
|
| 368 |
+
238
|
| 369 |
+
],
|
| 370 |
+
"page_idx": 3
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"type": "image",
|
| 374 |
+
"img_path": "images/e6d6b6d0d562795bdee50bef700038574cb99c8c3950f3ff6313b68af3401c0f.jpg",
|
| 375 |
+
"image_caption": [],
|
| 376 |
+
"image_footnote": [],
|
| 377 |
+
"bbox": [
|
| 378 |
+
535,
|
| 379 |
+
88,
|
| 380 |
+
901,
|
| 381 |
+
238
|
| 382 |
+
],
|
| 383 |
+
"page_idx": 3
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"type": "text",
|
| 387 |
+
"text": "of camera $n$",
|
| 388 |
+
"bbox": [
|
| 389 |
+
89,
|
| 390 |
+
316,
|
| 391 |
+
178,
|
| 392 |
+
330
|
| 393 |
+
],
|
| 394 |
+
"page_idx": 3
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"type": "text",
|
| 398 |
+
"text": "The task for Decoder 1 and its regression head estimate the 3D points for $\\mathbf{I}^1$ in its own coordinate system while Decoder 2 and its regression head are responsible for estimating pixel-wise 3D coordinates for $\\mathbf{I}^2$ in $\\mathbf{I}^1$ 's coordinate system after a rigid transformation of global rotation and translation. In the following contents, we will first introduce our POMATO with pairwise input images and then extend it to the video sequence input with our temporal motion module.",
|
| 399 |
+
"bbox": [
|
| 400 |
+
89,
|
| 401 |
+
330,
|
| 402 |
+
483,
|
| 403 |
+
452
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 3
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "text",
|
| 409 |
+
"text": "3.2. Pointmap Matching with Pairwise Input",
|
| 410 |
+
"text_level": 1,
|
| 411 |
+
"bbox": [
|
| 412 |
+
89,
|
| 413 |
+
459,
|
| 414 |
+
436,
|
| 415 |
+
474
|
| 416 |
+
],
|
| 417 |
+
"page_idx": 3
|
| 418 |
+
},
|
| 419 |
+
{
|
| 420 |
+
"type": "text",
|
| 421 |
+
"text": "As discussed before, the definition of $\\mathbf{X}^{2,1}$ depicts a rigid camera transformation that is ambiguous to reflect explicit matching relationships for dynamic regions. To tackle this, we propose to formulate an explicit pointmap matching $\\mathbf{X}_m^{2,1} \\in \\mathbb{R}^{H \\times W \\times 3}$ that maps dense RGB pixels of $\\mathbf{I}^2$ to 3D coordinates of corresponding pixels in $\\mathbf{I}^1$ under the first image's coordinate system. Given a 2D query pixel at $(x_2, y_2)$ in $\\mathbf{I}^2$ and its corresponding pixel at $(x_1, y_1)$ in $\\mathbf{I}^1$ , the matched pointmap at $(x_2, y_2)$ in $\\mathbf{I}^2$ is:",
|
| 422 |
+
"bbox": [
|
| 423 |
+
89,
|
| 424 |
+
481,
|
| 425 |
+
483,
|
| 426 |
+
618
|
| 427 |
+
],
|
| 428 |
+
"page_idx": 3
|
| 429 |
+
},
|
| 430 |
+
{
|
| 431 |
+
"type": "equation",
|
| 432 |
+
"text": "\n$$\n\\mathbf {X} _ {m} ^ {2, 1} \\left(x _ {2}, y _ {2}\\right) = \\mathbf {X} ^ {1, 1} \\left(x _ {1}, y _ {1}\\right), \\tag {2}\n$$\n",
|
| 433 |
+
"text_format": "latex",
|
| 434 |
+
"bbox": [
|
| 435 |
+
186,
|
| 436 |
+
622,
|
| 437 |
+
480,
|
| 438 |
+
640
|
| 439 |
+
],
|
| 440 |
+
"page_idx": 3
|
| 441 |
+
},
|
| 442 |
+
{
|
| 443 |
+
"type": "text",
|
| 444 |
+
"text": "where $(x,y)$ indicates the coordinates of 2D grid. For the representative dynamic point (red) in Fig. 2, the pointmap matching result is the 3D coordinate of point A in the coordinate system of the first image. As shown in Fig. 3, $\\mathbf{X}_m^{2,1}$ and $\\mathbf{X}^{1,1}$ are supposed to match perfectly in 3D space on the premise of neglecting occluded regions. We argue that the set of decoder tokens from the second branch preserves abundant matching information with iterative cross-attentions, so we introduce a matching head with the same architecture of $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ . The supervision for pointmap matching $\\mathbf{X}_m^{2,1}$ still follows the 3D regression loss which is defined as the Euclidean distance:",
|
| 445 |
+
"bbox": [
|
| 446 |
+
89,
|
| 447 |
+
645,
|
| 448 |
+
483,
|
| 449 |
+
825
|
| 450 |
+
],
|
| 451 |
+
"page_idx": 3
|
| 452 |
+
},
|
| 453 |
+
{
|
| 454 |
+
"type": "equation",
|
| 455 |
+
"text": "\n$$\n\\mathcal {L} _ {\\mathrm {m}} = \\left\\| \\frac {1}{z _ {m}} \\mathbf {X} _ {m} ^ {2, 1} - \\frac {1}{\\bar {z} _ {m}} \\bar {\\mathbf {X}} _ {m} ^ {2, 1} \\right\\|, \\tag {3}\n$$\n",
|
| 456 |
+
"text_format": "latex",
|
| 457 |
+
"bbox": [
|
| 458 |
+
184,
|
| 459 |
+
837,
|
| 460 |
+
480,
|
| 461 |
+
869
|
| 462 |
+
],
|
| 463 |
+
"page_idx": 3
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"type": "text",
|
| 467 |
+
"text": "where $\\bar{\\mathbf{X}}_m^{2,1}$ is the ground truth pointmap matching, which can be obtained following Eq. 2 on the 2D tracking dataset",
|
| 468 |
+
"bbox": [
|
| 469 |
+
89,
|
| 470 |
+
869,
|
| 471 |
+
483,
|
| 472 |
+
901
|
| 473 |
+
],
|
| 474 |
+
"page_idx": 3
|
| 475 |
+
},
|
| 476 |
+
{
|
| 477 |
+
"type": "text",
|
| 478 |
+
"text": "with the depth and camera information. $z_{m},\\bar{z}_{m}$ are the same norm factor defined in DUSt3R. The matching confidence $\\mathbf{C}_m^{2,1}$ is also learned following the confidence loss for $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ within valid regions:",
|
| 479 |
+
"bbox": [
|
| 480 |
+
511,
|
| 481 |
+
316,
|
| 482 |
+
906,
|
| 483 |
+
378
|
| 484 |
+
],
|
| 485 |
+
"page_idx": 3
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"type": "equation",
|
| 489 |
+
"text": "\n$$\n\\mathcal {L} _ {\\mathrm {m c o n f}} = \\mathbf {C} _ {m} ^ {2, 1} \\mathcal {L} _ {\\mathrm {m}} - \\alpha \\log \\mathbf {C} _ {m} ^ {2, 1} \\tag {4}\n$$\n",
|
| 490 |
+
"text_format": "latex",
|
| 491 |
+
"bbox": [
|
| 492 |
+
607,
|
| 493 |
+
390,
|
| 494 |
+
903,
|
| 495 |
+
409
|
| 496 |
+
],
|
| 497 |
+
"page_idx": 3
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"type": "text",
|
| 501 |
+
"text": "The final loss $\\mathcal{L}$ of our POMATO for pairwise input is a combination of predefined DUSt3R loss $\\mathcal{L}_{\\mathrm{DUSt3R}}$ , matching loss $\\mathcal{L}_{\\mathrm{m}}$ , and matching confidence loss $\\mathcal{L}_{\\mathrm{mconf}}$ . When training our POMATO for pairwise input images at the first stage, the parameters in the encoder are frozen.",
|
| 502 |
+
"bbox": [
|
| 503 |
+
511,
|
| 504 |
+
410,
|
| 505 |
+
905,
|
| 506 |
+
486
|
| 507 |
+
],
|
| 508 |
+
"page_idx": 3
|
| 509 |
+
},
|
| 510 |
+
{
|
| 511 |
+
"type": "text",
|
| 512 |
+
"text": "3.3. Dynamic Mask Estimation",
|
| 513 |
+
"text_level": 1,
|
| 514 |
+
"bbox": [
|
| 515 |
+
511,
|
| 516 |
+
494,
|
| 517 |
+
754,
|
| 518 |
+
508
|
| 519 |
+
],
|
| 520 |
+
"page_idx": 3
|
| 521 |
+
},
|
| 522 |
+
{
|
| 523 |
+
"type": "text",
|
| 524 |
+
"text": "Taking advantage of the explicit pointmap matching head, our POMATO can directly perform dynamic mask estimation without introducing an assistant module such as the optical flow model, getting rid of the additional computation cost and the potential domain gap. For an image pair $\\{\\mathbf{I}^i,\\mathbf{I}^j\\}$ along with the estimation of $\\mathbf{X}^{j,i}$ from $\\mathrm{Head}_2$ and $\\mathbf{X}_{m}^{j,i}$ from $\\mathrm{Head}_3$ , the dynamic mask $\\mathbf{D}^{j,i}$ can be obtained by comparing the difference between $\\mathbf{X}^{j,i}$ and $\\mathbf{X}_{m}^{j,i}$ :",
|
| 525 |
+
"bbox": [
|
| 526 |
+
511,
|
| 527 |
+
516,
|
| 528 |
+
906,
|
| 529 |
+
638
|
| 530 |
+
],
|
| 531 |
+
"page_idx": 3
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "equation",
|
| 535 |
+
"text": "\n$$\n\\mathbf {D} ^ {j, i} = \\left| \\left| \\mathbf {X} _ {m} ^ {j, i} - \\mathbf {X} ^ {j, i} \\right| \\right| > \\alpha , \\tag {5}\n$$\n",
|
| 536 |
+
"text_format": "latex",
|
| 537 |
+
"bbox": [
|
| 538 |
+
614,
|
| 539 |
+
646,
|
| 540 |
+
903,
|
| 541 |
+
664
|
| 542 |
+
],
|
| 543 |
+
"page_idx": 3
|
| 544 |
+
},
|
| 545 |
+
{
|
| 546 |
+
"type": "text",
|
| 547 |
+
"text": "where $\\alpha$ is a dynamic threshold defined as $3 \\times$ median $(\\|\\mathbf{X}_m^{j,i} - \\mathbf{X}^{j,i}\\|)$ . The explicit dynamic mask can be incorporated into the global alignment process to minimize the interference of moving objects for pose estimation and 3D reconstruction. Details on the incorporation of dynamic masks for global alignment are provided in the supplementary materials.",
|
| 548 |
+
"bbox": [
|
| 549 |
+
511,
|
| 550 |
+
672,
|
| 551 |
+
906,
|
| 552 |
+
779
|
| 553 |
+
],
|
| 554 |
+
"page_idx": 3
|
| 555 |
+
},
|
| 556 |
+
{
|
| 557 |
+
"type": "text",
|
| 558 |
+
"text": "3.4. Temporal Motion Module",
|
| 559 |
+
"text_level": 1,
|
| 560 |
+
"bbox": [
|
| 561 |
+
511,
|
| 562 |
+
787,
|
| 563 |
+
748,
|
| 564 |
+
803
|
| 565 |
+
],
|
| 566 |
+
"page_idx": 3
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"text": "With the fundamental capability of geometric estimation and pointmap matching for pairwise images, we follow [6] and extend our POMATO to 4D video sequences by inserting a transformer-based motion module into the vanilla DPT head to construct the \"temporal DPT head\", which is illustrated in Fig.4. For a set of decoder tokens $\\mathbf{G} \\in \\mathbb{R}^{B,T,N,C}$",
|
| 571 |
+
"bbox": [
|
| 572 |
+
511,
|
| 573 |
+
809,
|
| 574 |
+
905,
|
| 575 |
+
900
|
| 576 |
+
],
|
| 577 |
+
"page_idx": 3
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "image",
|
| 581 |
+
"img_path": "images/e2fe70253e4295dea812bf5a75af67c019b523b5a34fe2c4ad961025ba265b7c.jpg",
|
| 582 |
+
"image_caption": [
|
| 583 |
+
"Figure 4. Architecture of our temporal motion module. We insert a transformer-based motion module (in shallow yellow) into the vanilla DPT [33] head to enhance the temporal consistency."
|
| 584 |
+
],
|
| 585 |
+
"image_footnote": [],
|
| 586 |
+
"bbox": [
|
| 587 |
+
130,
|
| 588 |
+
87,
|
| 589 |
+
441,
|
| 590 |
+
282
|
| 591 |
+
],
|
| 592 |
+
"page_idx": 4
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"type": "text",
|
| 596 |
+
"text": "where $B, T, N, C$ represent the batch size, window length of a video sequence, token number, and token dimension, respectively, we merge the token number dimension into the batch axis and apply the motion module which consists of two blocks of standard multi-head self-attention modules and feed-forward networks along the temporal dimension $T$ . To reduce the computation cost, the temporal motion modules are applied to features of low resolution.",
|
| 597 |
+
"bbox": [
|
| 598 |
+
89,
|
| 599 |
+
349,
|
| 600 |
+
483,
|
| 601 |
+
472
|
| 602 |
+
],
|
| 603 |
+
"page_idx": 4
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"type": "text",
|
| 607 |
+
"text": "3.5. Downstream Temporal Tasks",
|
| 608 |
+
"text_level": 1,
|
| 609 |
+
"bbox": [
|
| 610 |
+
89,
|
| 611 |
+
481,
|
| 612 |
+
352,
|
| 613 |
+
498
|
| 614 |
+
],
|
| 615 |
+
"page_idx": 4
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"type": "text",
|
| 619 |
+
"text": "Given a video sequence of $T$ frames $\\mathbf{I}^{t_1},\\mathbf{I}^{t_2},\\ldots ,\\mathbf{I}^{t_T}$ , we construct a unique set of stereo image pairs for each task. As illustrated in Fig. 5, the flexible construction of input pairs—combined with the proposed temporal motion module and pointmap matching head—enables POMATO to seamlessly address downstream temporal tasks, including 3D point tracking, video depth estimation, and 3D reconstruction. The keyframe selection strategy and input formulation for each task are detailed in the following section.",
|
| 620 |
+
"bbox": [
|
| 621 |
+
89,
|
| 622 |
+
503,
|
| 623 |
+
483,
|
| 624 |
+
640
|
| 625 |
+
],
|
| 626 |
+
"page_idx": 4
|
| 627 |
+
},
|
| 628 |
+
{
|
| 629 |
+
"type": "text",
|
| 630 |
+
"text": "Besides the default regression losses for $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ , and predefined losses Eq. 3 and Eq. 4 for $\\mathrm{Head}_3$ , we further employ a temporal consistency loss, $\\mathcal{L}_{\\mathrm{t}}$ , which will be described in detail below.",
|
| 631 |
+
"bbox": [
|
| 632 |
+
89,
|
| 633 |
+
641,
|
| 634 |
+
483,
|
| 635 |
+
699
|
| 636 |
+
],
|
| 637 |
+
"page_idx": 4
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"type": "text",
|
| 641 |
+
"text": "In addition to the default regression losses for $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ , and the predefined losses in Eq. 3 and Eq. 4 for $\\mathrm{Head}_3$ , we further introduce a temporal consistency loss, $\\mathcal{L}_{\\mathrm{t}}$ , which will also be described in detail below.",
|
| 642 |
+
"bbox": [
|
| 643 |
+
89,
|
| 644 |
+
700,
|
| 645 |
+
482,
|
| 646 |
+
761
|
| 647 |
+
],
|
| 648 |
+
"page_idx": 4
|
| 649 |
+
},
|
| 650 |
+
{
|
| 651 |
+
"type": "text",
|
| 652 |
+
"text": "3D Point Tracking. As illustrated at the top of Fig.5, the keyframe is set to the first image of the global video sequence and fed to the proposed $\\mathrm{Head}_3$ to obtain the pointmap matching result of each query point (initialized at the first image) under the coordinate system of each reference frame $\\{\\mathbf{X}_{m}^{t_{1},t_{1}},\\mathbf{X}_{m}^{t_{1},t_{2}},\\mathbf{X}_{m}^{t_{1},t_{3}},\\dots \\mathbf{X}_{m}^{t_{1},t_{T}}\\}$ , while the set of reference frames $\\{\\mathbf{I}^{t_1},\\mathbf{I}^{t_2},\\mathbf{I}^{t_3},\\dots \\mathbf{I}^{t_T}\\}$ is fed to the $\\mathrm{Head}_1$ to obtain the pointmap under each ego coordinate system. The dense tracking results can be further sparsified by indexing the 2D coordinates. When inference on a video",
|
| 653 |
+
"bbox": [
|
| 654 |
+
89,
|
| 655 |
+
761,
|
| 656 |
+
483,
|
| 657 |
+
901
|
| 658 |
+
],
|
| 659 |
+
"page_idx": 4
|
| 660 |
+
},
|
| 661 |
+
{
|
| 662 |
+
"type": "image",
|
| 663 |
+
"img_path": "images/8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg",
|
| 664 |
+
"image_caption": [
|
| 665 |
+
"Figure 5. Inference pipelines for point tracking, video depth, and multi-view reconstruction. $t_k$ indicates the keyframe. With the help of the motion module and flexible input construction, PO-MATO can be easily applied to downstream temporal tasks."
|
| 666 |
+
],
|
| 667 |
+
"image_footnote": [],
|
| 668 |
+
"bbox": [
|
| 669 |
+
519,
|
| 670 |
+
90,
|
| 671 |
+
906,
|
| 672 |
+
406
|
| 673 |
+
],
|
| 674 |
+
"page_idx": 4
|
| 675 |
+
},
|
| 676 |
+
{
|
| 677 |
+
"type": "text",
|
| 678 |
+
"text": "longer than $T$ frames, a simple sliding-window approach with an overlap of four frames is adopted to enhance the consistency between adjacent video windows. The temporal consistency loss $\\mathcal{L}_{\\mathrm{t}}$ for tracking is:",
|
| 679 |
+
"bbox": [
|
| 680 |
+
511,
|
| 681 |
+
484,
|
| 682 |
+
906,
|
| 683 |
+
542
|
| 684 |
+
],
|
| 685 |
+
"page_idx": 4
|
| 686 |
+
},
|
| 687 |
+
{
|
| 688 |
+
"type": "equation",
|
| 689 |
+
"text": "\n$$\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} _ {m} ^ {t _ {1} , t _ {i}}}{z _ {m} ^ {T}} - \\frac {\\bar {\\mathbf {X}} _ {m} ^ {t _ {1} , t _ {i}}}{\\bar {z} _ {m} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} ^ {t _ {i} , t _ {i}}}{z ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\|, \\tag {6}\n$$\n",
|
| 690 |
+
"text_format": "latex",
|
| 691 |
+
"bbox": [
|
| 692 |
+
527,
|
| 693 |
+
551,
|
| 694 |
+
905,
|
| 695 |
+
589
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 4
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "text",
|
| 701 |
+
"text": "where $z_{m}^{T} = \\mathrm{norm}\\left(\\mathbf{X}_{m}^{t_{1},t_{1}},\\mathbf{X}_{m}^{t_{1},t_{2}},\\dots,\\mathbf{X}_{m}^{t_{1},t_{T}}\\right)$ and $\\bar{z}_T =$ norm $(\\bar{\\mathbf{X}}_m^{t_1,t_1},\\bar{\\mathbf{X}}_m^{t_1,t_2},\\dots,\\bar{\\mathbf{X}}_m^{t_1,t_T})$ . $z_{m}^{T}$ and $\\bar{z}_T$ are defined analogously.",
|
| 702 |
+
"bbox": [
|
| 703 |
+
511,
|
| 704 |
+
599,
|
| 705 |
+
905,
|
| 706 |
+
643
|
| 707 |
+
],
|
| 708 |
+
"page_idx": 4
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"type": "text",
|
| 712 |
+
"text": "Video Depth Estimation. As shown in the middle part of the Fig. 5, the input video sequence is formulated to a set of identical image pairs $\\{(\\mathbf{I}^{t_1},\\mathbf{I}^{t_1}),(\\mathbf{I}^{t_2},\\mathbf{I}^{t_2}),\\dots,(\\mathbf{I}^{t_T},\\mathbf{I}^{t_T})\\}$ and fed to $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ , where the predictions from each head are identical: $\\{\\mathbf{X}^{t_1,t_1},\\mathbf{X}^{t_2,t_2},\\dots,\\mathbf{X}^{t_N,t_N}\\}$ . We use the output of $\\mathrm{Head}_1$ as our final video depth estimation. The temporal consistency loss $\\mathcal{L}_{\\mathrm{t}}$ is defined as:",
|
| 713 |
+
"bbox": [
|
| 714 |
+
511,
|
| 715 |
+
643,
|
| 716 |
+
905,
|
| 717 |
+
742
|
| 718 |
+
],
|
| 719 |
+
"page_idx": 4
|
| 720 |
+
},
|
| 721 |
+
{
|
| 722 |
+
"type": "equation",
|
| 723 |
+
"text": "\n$$\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} _ {1} ^ {t _ {i} , t _ {i}}}{z _ {1} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} _ {2} ^ {t _ {i} , t _ {i}}}{z _ {2} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\\bar {z} ^ {T}} \\right\\|, (7)\n$$\n",
|
| 724 |
+
"text_format": "latex",
|
| 725 |
+
"bbox": [
|
| 726 |
+
529,
|
| 727 |
+
752,
|
| 728 |
+
906,
|
| 729 |
+
789
|
| 730 |
+
],
|
| 731 |
+
"page_idx": 4
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"type": "text",
|
| 735 |
+
"text": "where $\\mathbf{X}_1^{t_i,t_i}$ and $\\mathbf{X}_2^{t_i,t_i}$ indicate the output from Head_1 and Head_2, respectively. $\\bar{\\mathbf{X}}^{t_i,t_i}$ is the pointmap groundtruth.",
|
| 736 |
+
"bbox": [
|
| 737 |
+
511,
|
| 738 |
+
799,
|
| 739 |
+
905,
|
| 740 |
+
830
|
| 741 |
+
],
|
| 742 |
+
"page_idx": 4
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"type": "text",
|
| 746 |
+
"text": "3D Reconstruction. Assisted by the temporal motion module, redundant post-process operations such as global alignment can be omitted, allowing the reconstructed 3D point cloud to be obtained in a feed-forward manner. As shown in the bottom part of Fig.5, the keyframe is set to the last frame",
|
| 747 |
+
"bbox": [
|
| 748 |
+
511,
|
| 749 |
+
830,
|
| 750 |
+
905,
|
| 751 |
+
900
|
| 752 |
+
],
|
| 753 |
+
"page_idx": 4
|
| 754 |
+
},
|
| 755 |
+
{
|
| 756 |
+
"type": "table",
|
| 757 |
+
"img_path": "images/d664dc6c171f8e6a159f2659a62cc7b36112bf928cff2e86cd189897267afe7a.jpg",
|
| 758 |
+
"table_caption": [],
|
| 759 |
+
"table_footnote": [],
|
| 760 |
+
"table_body": "<table><tr><td rowspan=\"2\">Alignment</td><td rowspan=\"2\">Method</td><td rowspan=\"2\" colspan=\"2\">Optim. Onl.</td><td colspan=\"2\">Sintel [4]</td><td colspan=\"2\">BONN [30]</td><td colspan=\"2\">KITTI [13]</td></tr><tr><td>Abs Rel ↓</td><td>δ<1.25 ↑</td><td>Abs Rel ↓</td><td>δ<1.25 ↑</td><td>Abs Rel ↓</td><td>δ<1.25 ↑</td></tr><tr><td rowspan=\"6\">Per-sequence scale</td><td>DUSt3R-GA [44]</td><td>✓</td><td></td><td>0.656</td><td>45.2</td><td>0.155</td><td>83.3</td><td>0.144</td><td>81.3</td></tr><tr><td>MASt3R-GA [26]</td><td>✓</td><td></td><td>0.641</td><td>43.9</td><td>0.252</td><td>70.1</td><td>0.183</td><td>74.5</td></tr><tr><td>MonST3R-GA [52]</td><td>✓</td><td></td><td>0.378</td><td>55.8</td><td>0.067</td><td>96.3</td><td>0.168</td><td>74.4</td></tr><tr><td>Spann3R [41]</td><td></td><td>✓</td><td>0.622</td><td>42.6</td><td>0.144</td><td>81.3</td><td>0.198</td><td>73.7</td></tr><tr><td>CUT3R [43]</td><td></td><td>✓</td><td>0.421</td><td>47.9</td><td>0.078</td><td>93.7</td><td>0.118</td><td>88.1</td></tr><tr><td>POMATO</td><td></td><td>✓</td><td>0.416</td><td>53.6</td><td>0.074</td><td>96.1</td><td>0.085</td><td>93.3</td></tr><tr><td rowspan=\"3\">Per-sequence scale & shift</td><td>MonST3R-GA [52]</td><td>✓</td><td></td><td>0.335</td><td>58.5</td><td>0.063</td><td>96.4</td><td>0.104</td><td>89.5</td></tr><tr><td>CUT3R [43]</td><td></td><td>✓</td><td>0.466</td><td>56.2</td><td>0.111</td><td>88.3</td><td>0.075</td><td>94.3</td></tr><tr><td>POMATO</td><td></td><td>✓</td><td>0.345</td><td>57.9</td><td>0.072</td><td>96.5</td><td>0.084</td><td>93.4</td></tr></table>",
|
| 761 |
+
"bbox": [
|
| 762 |
+
132,
|
| 763 |
+
88,
|
| 764 |
+
859,
|
| 765 |
+
275
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 5
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "text",
|
| 771 |
+
"text": "Table 1. Video depth evaluation. We report scale-invariant depth and scale & shift invariant depth accuracy on Sintel [4], Bonn [30], and KITTI [13] datasets. Methods requiring global alignment are marked “GA”, while “Optim.” and “Onl.” indicate optimization-based and online methods, respectively. The best and second best results in each category are bold and underlined, respectively.",
|
| 772 |
+
"bbox": [
|
| 773 |
+
89,
|
| 774 |
+
280,
|
| 775 |
+
906,
|
| 776 |
+
324
|
| 777 |
+
],
|
| 778 |
+
"page_idx": 5
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"type": "text",
|
| 782 |
+
"text": "$\\mathbf{I}^{t_T}$ within the temporal window of length $T$ and is fed to $\\mathrm{Head}_1$ with a set output of $\\{\\mathbf{X}^{t_T,t_T},\\mathbf{X}^{t_T,t_T},\\dots,\\mathbf{X}^{t_T,t_T}\\}$ . All the reference frames are input to the $\\mathrm{Head}_2$ so the target pointmaps $\\{\\mathbf{X}^{t_1,t_T},\\mathbf{X}^{t_2,t_T},\\dots,\\mathbf{X}^{t_T,t_T}\\}$ are aligned under the coordinate system of the keyframe. The temporal consistency loss $\\mathcal{L}_{\\mathrm{t}}$ is:",
|
| 783 |
+
"bbox": [
|
| 784 |
+
89,
|
| 785 |
+
349,
|
| 786 |
+
482,
|
| 787 |
+
434
|
| 788 |
+
],
|
| 789 |
+
"page_idx": 5
|
| 790 |
+
},
|
| 791 |
+
{
|
| 792 |
+
"type": "equation",
|
| 793 |
+
"text": "\n$$\n\\mathcal {L} _ {\\mathrm {t}} = \\frac {1}{T} \\sum_ {i = 1} ^ {T} \\left\\| \\frac {\\mathbf {X} ^ {t _ {T} , t _ {T}}}{z _ {1} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {T} , t _ {T}}}{\\bar {z} _ {1} ^ {T}} \\right\\| + \\left\\| \\frac {\\mathbf {X} ^ {t _ {i} , t _ {T}}}{z _ {2} ^ {T}} - \\frac {\\bar {\\mathbf {X}} ^ {t _ {i} , t _ {T}}}{\\bar {z} _ {2} ^ {T}} \\right\\| \\tag {8}\n$$\n",
|
| 794 |
+
"text_format": "latex",
|
| 795 |
+
"bbox": [
|
| 796 |
+
99,
|
| 797 |
+
445,
|
| 798 |
+
482,
|
| 799 |
+
479
|
| 800 |
+
],
|
| 801 |
+
"page_idx": 5
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "text",
|
| 805 |
+
"text": "We further freeze the parameters in Decoder1 and Decoder2 when training the temporal downstream tasks at the second stage. In our work, the temporal window length $T$ is set to 12. Additional explorations on the temporal length can be found in Sec.4.",
|
| 806 |
+
"bbox": [
|
| 807 |
+
89,
|
| 808 |
+
489,
|
| 809 |
+
483,
|
| 810 |
+
566
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 5
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"text": "4. Experiments",
|
| 817 |
+
"text_level": 1,
|
| 818 |
+
"bbox": [
|
| 819 |
+
89,
|
| 820 |
+
580,
|
| 821 |
+
223,
|
| 822 |
+
598
|
| 823 |
+
],
|
| 824 |
+
"page_idx": 5
|
| 825 |
+
},
|
| 826 |
+
{
|
| 827 |
+
"type": "text",
|
| 828 |
+
"text": "4.1. Experimental Details",
|
| 829 |
+
"text_level": 1,
|
| 830 |
+
"bbox": [
|
| 831 |
+
89,
|
| 832 |
+
606,
|
| 833 |
+
290,
|
| 834 |
+
622
|
| 835 |
+
],
|
| 836 |
+
"page_idx": 5
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "text",
|
| 840 |
+
"text": "Training data. We train our network with a mixture of five datasets: PointOdyssey [54], Tartanair [45], ParallelDomain4D [40], DynamicReplica [21] and Carla (0.9.15) [10]. The specific number and the usage ratio of each dataset can be found in the supplementary materials. All datasets include pixel-accurate ground truth depth, as well as camera intrinsics and extrinsics, and encompass a wide variety of dynamic scenes across both indoor and outdoor environments. Among them, PointOdyssey and DynamicReplica have additional 2D trajectory annotations for dynamic objects which can be used to construct pointmap matching ground truth following Eq. 2. All datasets are used to supervise geometry learning on $\\mathrm{Head}_1$ and $\\mathrm{Head}_2$ , while only PointOdyssey, DynamicReplica, and TartanAir are used to train the proposed pointmap matching head.",
|
| 841 |
+
"bbox": [
|
| 842 |
+
89,
|
| 843 |
+
628,
|
| 844 |
+
482,
|
| 845 |
+
854
|
| 846 |
+
],
|
| 847 |
+
"page_idx": 5
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "text",
|
| 851 |
+
"text": "Training and inference details. Our model architecture is based on the publicly available DUSt3R [52] model, utilizing the same backbone consisting of a ViT-Large encoder",
|
| 852 |
+
"bbox": [
|
| 853 |
+
89,
|
| 854 |
+
854,
|
| 855 |
+
483,
|
| 856 |
+
900
|
| 857 |
+
],
|
| 858 |
+
"page_idx": 5
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "text",
|
| 862 |
+
"text": "and a ViT-Base decoder. To fully leverage MonST3R's geometry estimation capabilities in dynamic scenes, we initialize our model using the publicly available MonST3R checkpoint. For the newly introduced pointmap matching head, we initialize its weights from the pretrained $\\mathrm{Head}_2$ weights of MonST3R. The temporal motion module is initialized following [14]. We train our network for 10 epochs with a cosine learning rate schedule, with an initial learning rate of 1e-4. In the first stage, which involves pairwise training, we use a batch size of 16 on 4 A100 GPUs (40G). In the second stage, where the temporal motion module is introduced, the batch size is set to 4 with a fixed temporal window length of 12. During each training iteration, we randomly sample a downstream task—3D point tracking, video depth estimation, or 3D reconstruction—to construct the input pairs and apply the corresponding loss function.",
|
| 863 |
+
"bbox": [
|
| 864 |
+
511,
|
| 865 |
+
349,
|
| 866 |
+
906,
|
| 867 |
+
592
|
| 868 |
+
],
|
| 869 |
+
"page_idx": 5
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "text",
|
| 873 |
+
"text": "4.2. Video Depth Estimation",
|
| 874 |
+
"text_level": 1,
|
| 875 |
+
"bbox": [
|
| 876 |
+
511,
|
| 877 |
+
604,
|
| 878 |
+
733,
|
| 879 |
+
619
|
| 880 |
+
],
|
| 881 |
+
"page_idx": 5
|
| 882 |
+
},
|
| 883 |
+
{
|
| 884 |
+
"type": "text",
|
| 885 |
+
"text": "Following MonST3R [52] and CUT3R [43], we rescale all predictions from the same video to align them together by conducting two forms of alignment: per-sequence scale and shift alignment and per-sequence scale alignment. Thus, we can measure the per-frame depth quality and inter-frame depth consistency. We employ our proposed motion module for video depth estimation in a feed-forward manner as described in Sec.3.5 and compare our method against several variants of DUSt3R, including DUSt3R [44], MAST3R [26], MonST3R [52], Spann3R [41], and CUT3R [43]. Given 6 frames of $288 \\times 512$ on an NVIDIA 4070 GPU, POMATO reconstructs the 3D point cloud in 0.7 seconds, whereas global alignment-based methods such as MonST3R require 5.8 seconds. As shown in Tab. 1, our method demonstrates comparable performance to the global alignment (GA)-based MonST3R [52] on the Sintel [4] and BONN [30] datasets, while surpassing it on KITTI dataset. Besides, we",
|
| 886 |
+
"bbox": [
|
| 887 |
+
511,
|
| 888 |
+
628,
|
| 889 |
+
906,
|
| 890 |
+
900
|
| 891 |
+
],
|
| 892 |
+
"page_idx": 5
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"type": "table",
|
| 896 |
+
"img_path": "images/f5f28d1c790be8bf149f762f29cd56a6fb328dbe358471623ca58d55907612bc.jpg",
|
| 897 |
+
"table_caption": [],
|
| 898 |
+
"table_footnote": [],
|
| 899 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"2\">PointOdyssey [54]</td><td colspan=\"2\">ADT [31]</td><td colspan=\"2\">PStudio [20]</td><td colspan=\"2\">Average</td></tr><tr><td>L-12</td><td>L-24</td><td>L-12</td><td>L-24</td><td>L-12</td><td>L-24</td><td>L-12</td><td>L-24</td></tr><tr><td>SpatialTracker* [47]</td><td>20.46</td><td>20.71</td><td>21.64</td><td>20.67</td><td>30.41</td><td>25.87</td><td>24.17</td><td>22.42</td></tr><tr><td>DUSt3R [44]</td><td>19.03</td><td>19.03</td><td>29.02</td><td>25.55</td><td>9.72</td><td>6.50</td><td>19.26</td><td>17.03</td></tr><tr><td>MASt3R [26]</td><td>16.58</td><td>17.35</td><td>27.36</td><td>26.46</td><td>11.78</td><td>8.09</td><td>18.57</td><td>17.30</td></tr><tr><td>MonST3R [52]</td><td>27.31</td><td>27.92</td><td>28.30</td><td>26.13</td><td>16.50</td><td>11.06</td><td>24.03</td><td>21.70</td></tr><tr><td>POMATO</td><td>33.20</td><td>33.58</td><td>31.57</td><td>28.22</td><td>24.59</td><td>19.79</td><td>29.79</td><td>27.20</td></tr></table>",
|
| 900 |
+
"bbox": [
|
| 901 |
+
205,
|
| 902 |
+
93,
|
| 903 |
+
787,
|
| 904 |
+
202
|
| 905 |
+
],
|
| 906 |
+
"page_idx": 6
|
| 907 |
+
},
|
| 908 |
+
{
|
| 909 |
+
"type": "text",
|
| 910 |
+
"text": "Table 2. 3D tracking evaluation. We report the APD metric to evaluate 3D point tracking on the PointOdyssey [54], ADT [31], and PStudio [20] datasets. L-12 and L-24 indicate tracking within the temporal length of 12 frames and 24 frames, respectively.",
|
| 911 |
+
"bbox": [
|
| 912 |
+
89,
|
| 913 |
+
208,
|
| 914 |
+
906,
|
| 915 |
+
236
|
| 916 |
+
],
|
| 917 |
+
"page_idx": 6
|
| 918 |
+
},
|
| 919 |
+
{
|
| 920 |
+
"type": "image",
|
| 921 |
+
"img_path": "images/ffa2a8e80de9dcbc736d37d3eb5d1989e36111cb092a54c8fa4c690e19d35a0a.jpg",
|
| 922 |
+
"image_caption": [
|
| 923 |
+
"Figure 6. Qualitative comparison of dynamic scenes. Compared to MonST3R, our POMATO can provide more reliable motion masks, 3D point tracking, and reconstruction performance."
|
| 924 |
+
],
|
| 925 |
+
"image_footnote": [],
|
| 926 |
+
"bbox": [
|
| 927 |
+
112,
|
| 928 |
+
250,
|
| 929 |
+
888,
|
| 930 |
+
501
|
| 931 |
+
],
|
| 932 |
+
"page_idx": 6
|
| 933 |
+
},
|
| 934 |
+
{
|
| 935 |
+
"type": "text",
|
| 936 |
+
"text": "consistently outperform the state-of-the-art online method, CUT3R [43], across various settings. These results underscore the effectiveness of our approach, specifically (1) the joint learning of geometry and pointmap matching, and (2) the temporal motion module.",
|
| 937 |
+
"bbox": [
|
| 938 |
+
88,
|
| 939 |
+
566,
|
| 940 |
+
482,
|
| 941 |
+
643
|
| 942 |
+
],
|
| 943 |
+
"page_idx": 6
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"type": "text",
|
| 947 |
+
"text": "4.3. 3D Point Tracking",
|
| 948 |
+
"text_level": 1,
|
| 949 |
+
"bbox": [
|
| 950 |
+
89,
|
| 951 |
+
651,
|
| 952 |
+
269,
|
| 953 |
+
667
|
| 954 |
+
],
|
| 955 |
+
"page_idx": 6
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "text",
|
| 959 |
+
"text": "For 3D point tracking task, we use the Aria Digital Twin (ADT) [31], and Panoptic Studio (PStudio) [20] benchmarks from the TAPVid-3D [25] dataset along with the validation set on the PointOdyssey [54] dataset. We report the Average Percent Deviation (APD) metric, which quantifies the average percentage of points within a threshold relative to the ground truth depth. The APD metric serves as a direct measure of the accuracy of the predicted tracking. We reformulate the datasets and project all the query points within a temporal window to the first frame. We report tracking results on the length of 12 and 24 frames. As shown in Tab.2, our POMATO achieves the best performance on both PointOdyssey and ADT datasets. It's worth mentioning that SpatialTracker [47] is a state-of-the-art network tailored for 3D point tracking with ground truth camera intrinsic as ad",
|
| 960 |
+
"bbox": [
|
| 961 |
+
89,
|
| 962 |
+
674,
|
| 963 |
+
483,
|
| 964 |
+
900
|
| 965 |
+
],
|
| 966 |
+
"page_idx": 6
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "text",
|
| 970 |
+
"text": "ditional input data. POMATO surpasses it on two datasets and improves the average APD metric by $23.3\\%$ and $21.4\\%$ for 12 frames and 24 frames, respectively. For DUSt3R-based methods, we use the output of $\\mathrm{Head}_2$ as tracking results. Obviously, the ambiguous matching representation limits its capability to handle this fine-grained 3D reconstruction task in dynamic scenes.",
|
| 971 |
+
"bbox": [
|
| 972 |
+
511,
|
| 973 |
+
566,
|
| 974 |
+
906,
|
| 975 |
+
675
|
| 976 |
+
],
|
| 977 |
+
"page_idx": 6
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "text",
|
| 981 |
+
"text": "4.4. Camera Pose Estimation",
|
| 982 |
+
"text_level": 1,
|
| 983 |
+
"bbox": [
|
| 984 |
+
511,
|
| 985 |
+
681,
|
| 986 |
+
740,
|
| 987 |
+
696
|
| 988 |
+
],
|
| 989 |
+
"page_idx": 6
|
| 990 |
+
},
|
| 991 |
+
{
|
| 992 |
+
"type": "text",
|
| 993 |
+
"text": "Following DUSt3R-based methods, we perform global alignment with the model trained in the first stage on the Bonn [30] and TUM [37] datasets. The sampling stride is set to 5 for the Bonn dataset and 3 for the TUM dataset. Compared with optical-flow assisted global alignment in MonST3R, the dynamic mask is computed according to Eq. 5 while the 2D pseudo label is replaced by projecting the pointmap matching results to 2D coordinates with estimated camera intrinsic. Absolute Translation Error (ATE), Relative Translation Error (RPE trans), and Relative Rotation Error (RPE rot) are reported. The evaluation results over 40 frames are reported in Tab. 4. Notably, POMATO obtains an overall state-of-the-art performance and signifi",
|
| 994 |
+
"bbox": [
|
| 995 |
+
511,
|
| 996 |
+
703,
|
| 997 |
+
906,
|
| 998 |
+
900
|
| 999 |
+
],
|
| 1000 |
+
"page_idx": 6
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"type": "table",
|
| 1004 |
+
"img_path": "images/48af71f3300680b8034c43f62dca0f4717a244f56bdddfab423db451007292d7.jpg",
|
| 1005 |
+
"table_caption": [],
|
| 1006 |
+
"table_footnote": [],
|
| 1007 |
+
"table_body": "<table><tr><td rowspan=\"3\">Temporal Length</td><td colspan=\"6\">Video Depth</td><td colspan=\"3\">Tracking (12 Frames)</td></tr><tr><td colspan=\"2\">Sintel [4]</td><td colspan=\"2\">Bonn [30]</td><td colspan=\"2\">KITTI [13]</td><td colspan=\"3\">PointOdyssey [54] ADT [31] PStudio [20]</td></tr><tr><td>Abs Rel ↓ δ<1.25 ↑</td><td>Abs Rel ↓ δ<1.25 ↑</td><td>Abs Rel ↓ δ<1.25 ↑</td><td>Abs Rel ↓ δ<1.25 ↑</td><td>APD↑</td><td>APD↑</td><td>APD↑</td><td></td><td></td></tr><tr><td>Pair-wise</td><td>0.548</td><td>46.2</td><td>0.087</td><td>94.0</td><td>0.113</td><td>89.5</td><td>32.06</td><td>29.87</td><td>23.10</td></tr><tr><td>6 frames</td><td>0.436</td><td>51.3</td><td>0.076</td><td>95.9</td><td>0.085</td><td>93.5</td><td>32.69</td><td>30.93</td><td>24.52</td></tr><tr><td>12 frames</td><td>0.416</td><td>53.6</td><td>0.075</td><td>96.1</td><td>0.086</td><td>93.3</td><td>33.20</td><td>31.57</td><td>24.59</td></tr></table>",
|
| 1008 |
+
"bbox": [
|
| 1009 |
+
174,
|
| 1010 |
+
88,
|
| 1011 |
+
823,
|
| 1012 |
+
186
|
| 1013 |
+
],
|
| 1014 |
+
"page_idx": 7
|
| 1015 |
+
},
|
| 1016 |
+
{
|
| 1017 |
+
"type": "table",
|
| 1018 |
+
"img_path": "images/d5e4b9b2b316a0891838b1d256865499a344168e46176581f1098b0fa64d8c42.jpg",
|
| 1019 |
+
"table_caption": [
|
| 1020 |
+
"Table 3. Ablation study on the temporal motion module. The introduction of the temporal motion module brings a significant improvement. As the temporal window length enlarges from 6 frames to 12 frames, we obtain an overall consistent improvement."
|
| 1021 |
+
],
|
| 1022 |
+
"table_footnote": [],
|
| 1023 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">TUM [37]</td><td colspan=\"3\">Bonn [30]</td></tr><tr><td>ATE ↓</td><td>RPE trans ↓</td><td>RPE rot ↓</td><td>ATE ↓</td><td>RPE trans ↓</td><td>RPE rot ↓</td></tr><tr><td>DUSt3R [44]</td><td>0.025</td><td>0.013</td><td>2.361</td><td>0.030</td><td>0.025</td><td>2.522</td></tr><tr><td>MASt3R [26]</td><td>0.027</td><td>0.015</td><td>1.910</td><td>0.031</td><td>0.025</td><td>2.478</td></tr><tr><td>MonST3R [52]</td><td>0.021</td><td>0.006</td><td>1.142</td><td>0.025</td><td>0.021</td><td>2.120</td></tr><tr><td>CUT3R [43]</td><td>0.023</td><td>0.016</td><td>0.510</td><td>0.028</td><td>0.033</td><td>2.569</td></tr><tr><td>POMATO</td><td>0.020</td><td>0.010</td><td>0.509</td><td>0.037</td><td>0.016</td><td>1.782</td></tr></table>",
|
| 1024 |
+
"bbox": [
|
| 1025 |
+
104,
|
| 1026 |
+
250,
|
| 1027 |
+
470,
|
| 1028 |
+
337
|
| 1029 |
+
],
|
| 1030 |
+
"page_idx": 7
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"type": "image",
|
| 1034 |
+
"img_path": "images/48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg",
|
| 1035 |
+
"image_caption": [],
|
| 1036 |
+
"image_footnote": [],
|
| 1037 |
+
"bbox": [
|
| 1038 |
+
102,
|
| 1039 |
+
391,
|
| 1040 |
+
220,
|
| 1041 |
+
444
|
| 1042 |
+
],
|
| 1043 |
+
"page_idx": 7
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "image",
|
| 1047 |
+
"img_path": "images/98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg",
|
| 1048 |
+
"image_caption": [],
|
| 1049 |
+
"image_footnote": [],
|
| 1050 |
+
"bbox": [
|
| 1051 |
+
102,
|
| 1052 |
+
445,
|
| 1053 |
+
218,
|
| 1054 |
+
497
|
| 1055 |
+
],
|
| 1056 |
+
"page_idx": 7
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "image",
|
| 1060 |
+
"img_path": "images/c01e277c9d6d6d62313b376a0e50c866e3c5aa3c837fecebdb575154bd8f9543.jpg",
|
| 1061 |
+
"image_caption": [
|
| 1062 |
+
"Input Images",
|
| 1063 |
+
"Figure 7. Effectiveness of our pointmap matching head. Without explicitly filtering out the motion area, both pose and geometry estimation will be degraded."
|
| 1064 |
+
],
|
| 1065 |
+
"image_footnote": [],
|
| 1066 |
+
"bbox": [
|
| 1067 |
+
102,
|
| 1068 |
+
500,
|
| 1069 |
+
218,
|
| 1070 |
+
551
|
| 1071 |
+
],
|
| 1072 |
+
"page_idx": 7
|
| 1073 |
+
},
|
| 1074 |
+
{
|
| 1075 |
+
"type": "image",
|
| 1076 |
+
"img_path": "images/abf58e9ec06158973e58dbe4a60955efcc728a923015cf9b8a45e5acad73ffcb.jpg",
|
| 1077 |
+
"image_caption": [],
|
| 1078 |
+
"image_footnote": [],
|
| 1079 |
+
"bbox": [
|
| 1080 |
+
222,
|
| 1081 |
+
392,
|
| 1082 |
+
470,
|
| 1083 |
+
459
|
| 1084 |
+
],
|
| 1085 |
+
"page_idx": 7
|
| 1086 |
+
},
|
| 1087 |
+
{
|
| 1088 |
+
"type": "image",
|
| 1089 |
+
"img_path": "images/a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg",
|
| 1090 |
+
"image_caption": [
|
| 1091 |
+
"3D Reconstruction with our Pointmap Matching."
|
| 1092 |
+
],
|
| 1093 |
+
"image_footnote": [],
|
| 1094 |
+
"bbox": [
|
| 1095 |
+
222,
|
| 1096 |
+
459,
|
| 1097 |
+
470,
|
| 1098 |
+
546
|
| 1099 |
+
],
|
| 1100 |
+
"page_idx": 7
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"type": "table",
|
| 1104 |
+
"img_path": "images/80719c660669e2d285012f6663ec105a6137bf97e42c4107de2e9c454bfec2f8.jpg",
|
| 1105 |
+
"table_caption": [
|
| 1106 |
+
"Table 4. Pose estimation. Our method achieves an overall best performance and improves the RPE rot metric significantly."
|
| 1107 |
+
],
|
| 1108 |
+
"table_footnote": [],
|
| 1109 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">Bonn [30]</td><td>PointOdyssey [54]</td><td>ADT [31]</td><td>PStudio [20]</td></tr><tr><td>ATE ↓</td><td>RPE trans ↓</td><td>RPE rot ↓</td><td>APD ↑</td><td>APD ↑</td><td>APD ↑</td></tr><tr><td>W/O Head3</td><td>0.040</td><td>0.015</td><td>1.721</td><td>29.10</td><td>29.62</td><td>16.94</td></tr><tr><td>W/ Head3</td><td>0.037</td><td>0.016</td><td>1.782</td><td>32.06</td><td>29.87</td><td>23.10</td></tr></table>",
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
104,
|
| 1112 |
+
637,
|
| 1113 |
+
468,
|
| 1114 |
+
691
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 7
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "text",
|
| 1120 |
+
"text": "Table 5. Ablation study on the effectiveness of the pointmap matching head. The comparisons are reported on the pose estimation and 3D point tracking tasks.",
|
| 1121 |
+
"bbox": [
|
| 1122 |
+
89,
|
| 1123 |
+
702,
|
| 1124 |
+
482,
|
| 1125 |
+
744
|
| 1126 |
+
],
|
| 1127 |
+
"page_idx": 7
|
| 1128 |
+
},
|
| 1129 |
+
{
|
| 1130 |
+
"type": "text",
|
| 1131 |
+
"text": "cantly improves the RPE-rot metric, surpassing MonST3R by $55.4\\%$ and $13.3\\%$ on the TUM and Bonn datasets.",
|
| 1132 |
+
"bbox": [
|
| 1133 |
+
89,
|
| 1134 |
+
773,
|
| 1135 |
+
482,
|
| 1136 |
+
804
|
| 1137 |
+
],
|
| 1138 |
+
"page_idx": 7
|
| 1139 |
+
},
|
| 1140 |
+
{
|
| 1141 |
+
"type": "text",
|
| 1142 |
+
"text": "4.5. Ablation Study",
|
| 1143 |
+
"text_level": 1,
|
| 1144 |
+
"bbox": [
|
| 1145 |
+
89,
|
| 1146 |
+
816,
|
| 1147 |
+
243,
|
| 1148 |
+
832
|
| 1149 |
+
],
|
| 1150 |
+
"page_idx": 7
|
| 1151 |
+
},
|
| 1152 |
+
{
|
| 1153 |
+
"type": "text",
|
| 1154 |
+
"text": "We conduct extensive ablation studies to evaluate the effectiveness of the temporal motion module and the proposed pointmap matching head. As shown in Table 3, we report results for three models: one trained with only pairwise",
|
| 1155 |
+
"bbox": [
|
| 1156 |
+
89,
|
| 1157 |
+
839,
|
| 1158 |
+
483,
|
| 1159 |
+
901
|
| 1160 |
+
],
|
| 1161 |
+
"page_idx": 7
|
| 1162 |
+
},
|
| 1163 |
+
{
|
| 1164 |
+
"type": "text",
|
| 1165 |
+
"text": "images (first-stage training), one using a shorter temporal window of 6 frames, and another using the default temporal window length of 12 frames. Incorporating temporal consistency yields substantial improvements across all datasets for video depth estimation and 3D point tracking. Further improvement is achieved when the temporal window length increases from 6 frames to 12 frames. In Table 5, we evaluate the effectiveness of the pointmap matching head. While it introduces only a modest improvement in the ATE metric, we attribute this to the limited motion and minimal viewpoint variation in the indoor evaluation dataset. As illustrated in Fig. 7, under challenging in-the-wild conditions with significant motion and rapid viewpoint changes, removing the pointmap matching head introduces ambiguity in explicit rigid transformation estimation, resulting in a clear degradation in performance. To further demonstrate the impact of the pointmap matching head on 3D point tracking, we conduct tracking experiments over 12 frames using the pairwise input setup. Clearly, removing the pointmap matching head (using only $\\mathrm{Head}_2$ ) leads to an inevitable performance drop, emphasizing explicit correspondence modeling for reliable long-term tracking.",
|
| 1166 |
+
"bbox": [
|
| 1167 |
+
511,
|
| 1168 |
+
252,
|
| 1169 |
+
906,
|
| 1170 |
+
585
|
| 1171 |
+
],
|
| 1172 |
+
"page_idx": 7
|
| 1173 |
+
},
|
| 1174 |
+
{
|
| 1175 |
+
"type": "text",
|
| 1176 |
+
"text": "5. Discussion and Conclusion",
|
| 1177 |
+
"text_level": 1,
|
| 1178 |
+
"bbox": [
|
| 1179 |
+
511,
|
| 1180 |
+
598,
|
| 1181 |
+
761,
|
| 1182 |
+
614
|
| 1183 |
+
],
|
| 1184 |
+
"page_idx": 7
|
| 1185 |
+
},
|
| 1186 |
+
{
|
| 1187 |
+
"type": "text",
|
| 1188 |
+
"text": "We introduce POMATO, a unified framework for geometry estimation and motion understanding in dynamic scenes. By leveraging the proposed pointmap matching head, our method effectively distinguishes moving regions, thereby mitigating the interference introduced by dynamic objects. The temporal motion module further facilitates the learning of temporal dynamics across frames, enhancing scale consistency and improving performance in tasks where both geometry and matching are critical—most notably, 3D point tracking. The downstream temporal tasks including 3D point tracking, video depth estimation, and 3D reconstruction can be easily applied in a feed-forward manner. In future work, we plan to scale up training with more dynamic reconstruction and matching datasets to further enhance 3D reconstruction and tracking performance.",
|
| 1189 |
+
"bbox": [
|
| 1190 |
+
511,
|
| 1191 |
+
623,
|
| 1192 |
+
906,
|
| 1193 |
+
849
|
| 1194 |
+
],
|
| 1195 |
+
"page_idx": 7
|
| 1196 |
+
},
|
| 1197 |
+
{
|
| 1198 |
+
"type": "text",
|
| 1199 |
+
"text": "Acknowledgement. This work was supported by the National Natural Science Foundation of China (No. 62206244)",
|
| 1200 |
+
"bbox": [
|
| 1201 |
+
511,
|
| 1202 |
+
849,
|
| 1203 |
+
905,
|
| 1204 |
+
895
|
| 1205 |
+
],
|
| 1206 |
+
"page_idx": 7
|
| 1207 |
+
},
|
| 1208 |
+
{
|
| 1209 |
+
"type": "text",
|
| 1210 |
+
"text": "References",
|
| 1211 |
+
"text_level": 1,
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
91,
|
| 1214 |
+
89,
|
| 1215 |
+
187,
|
| 1216 |
+
104
|
| 1217 |
+
],
|
| 1218 |
+
"page_idx": 8
|
| 1219 |
+
},
|
| 1220 |
+
{
|
| 1221 |
+
"type": "list",
|
| 1222 |
+
"sub_type": "ref_text",
|
| 1223 |
+
"list_items": [
|
| 1224 |
+
"[1] Daniel Barath, Dmytro Mishkin, Luca Cavalli, Paul-Edouard Sarlin, Petr Hruby, and Marc Pollefeys. Affineglue: Joint matching and robust estimation. arXiv preprint arXiv:2307.15381, 2023. 3",
|
| 1225 |
+
"[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, Varun Jampani, and Robin Rombach. Stable video diffusion: Scaling latent video diffusion models to large datasets. abs/2311.15127, 2023. 3",
|
| 1226 |
+
"[3] Aleksei Bochkovskii, Amael Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R. Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv, 2024. 3",
|
| 1227 |
+
"[4] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In ECCV, pages 611-625, 2012. 6, 8",
|
| 1228 |
+
"[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 2",
|
| 1229 |
+
"[6] Sili Chen, Hengkai Guo, Shengnan Zhu, Feihu Zhang, Zi long Huang, Jiashi Feng, and Bingyi Kang. Video depth anything: Consistent depth estimation for super-long videos. arXiv preprint arXiv:2501.12375, 2025. 4",
|
| 1230 |
+
"[7] Yu Chen, Yisong Chen, and Guoping Wang. Bundle adjustment revisited. arXiv preprint arXiv: 1912.03858, 2019. 3",
|
| 1231 |
+
"[8] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 3",
|
| 1232 |
+
"[9] Carl Doersch, Yi Yang, Mel Vecerik, Dilara Gokay, Ankush Gupta, Yusuf Aytar, Joao Carreira, and Andrew Zisserman. Tapir: Tracking any point with per-frame initialization and temporal refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10061-10072, 2023. 3",
|
| 1233 |
+
"[10] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. Carla: An open urban driving simulator. In Conference on robot learning, pages 1-16. PMLR, 2017. 6, 2",
|
| 1234 |
+
"[11] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. arXiv preprint arXiv: 2403.12013, 2024. 3",
|
| 1235 |
+
"[12] Yasutaka Furukawa, Carlos Hernández, et al. Multi-view stereo: A tutorial. Foundations and Trends® in Computer Graphics and Vision, 9(1-2):1-148, 2015. 3",
|
| 1236 |
+
"[13] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In CVPR, pages 3354-3361, 2012. 6, 8",
|
| 1237 |
+
"[14] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff:Animate your personalized text-to-image diffusion models without specific tuning, 2023.6"
|
| 1238 |
+
],
|
| 1239 |
+
"bbox": [
|
| 1240 |
+
93,
|
| 1241 |
+
114,
|
| 1242 |
+
483,
|
| 1243 |
+
900
|
| 1244 |
+
],
|
| 1245 |
+
"page_idx": 8
|
| 1246 |
+
},
|
| 1247 |
+
{
|
| 1248 |
+
"type": "list",
|
| 1249 |
+
"sub_type": "ref_text",
|
| 1250 |
+
"list_items": [
|
| 1251 |
+
"[15] Adam W. Harley, Zhaoyuan Fang, and Katerina Fragkiadaki. Particle video revisited: Tracking through occlusions using point trajectories. In ECCV, pages 59-75. Springer, 2022. 3",
|
| 1252 |
+
"[16] Mu Hu, Wei Yin, Chi Zhang, Zhipeng Cai, Xiaoxiao Long, Hao Chen, Kaixuan Wang, Gang Yu, Chunhua Shen, and Shaojie Shen. Metric3d v2: A versatile monocular geometric foundation model for zero-shot metric depth and surface normal estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2, 3",
|
| 1253 |
+
"[17] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 3",
|
| 1254 |
+
"[18] Mustafa Işık, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 3",
|
| 1255 |
+
"[19] Muhammad Zubair Irshad, Mauro Comi, Yen-Chen Lin, Nick Heppert, Abhinav Valada, Rares Ambrus, Zsolt Kira, and Jonathan Tremblay. Neural fields in robotics: A survey. arXiv preprint arXiv: 2410.20220, 2024. 2",
|
| 1256 |
+
"[20] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017. 7, 8",
|
| 1257 |
+
"[21] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Dynamic stereo: Consistent dynamic depth from stereo videos. CVPR, 2023. 6, 2",
|
| 1258 |
+
"[22] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Co-tracker: It is better to track together. In Proc. ECCV, 2024. 3",
|
| 1259 |
+
"[23] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3",
|
| 1260 |
+
"[24] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM TOG, 42(4):139-1, 2023. 3",
|
| 1261 |
+
"[25] Skanda Koppula, Ignacio Rocco, Yi Yang, Joe Heyward, João Carreira, Andrew Zisserman, Gabriel Brostow, and Carl Doersch. Tapvid-3d: A benchmark for tracking any point in 3d. arXiv preprint arXiv: 2407.05921, 2024. 7",
|
| 1262 |
+
"[26] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. European Conference on Computer Vision, 2024. 2, 6, 7, 8",
|
| 1263 |
+
"[27] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. Lightglue: Local feature matching at light speed. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17627-17638, 2023. 3",
|
| 1264 |
+
"[28] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:"
|
| 1265 |
+
],
|
| 1266 |
+
"bbox": [
|
| 1267 |
+
516,
|
| 1268 |
+
92,
|
| 1269 |
+
903,
|
| 1270 |
+
900
|
| 1271 |
+
],
|
| 1272 |
+
"page_idx": 8
|
| 1273 |
+
},
|
| 1274 |
+
{
|
| 1275 |
+
"type": "list",
|
| 1276 |
+
"sub_type": "ref_text",
|
| 1277 |
+
"list_items": [
|
| 1278 |
+
"Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 3",
|
| 1279 |
+
"[29] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, and Marc Szafraniec et al. DINOv2: Learning robust visual features without supervision. Trans. Mach. Learn. Research, 2024. 3",
|
| 1280 |
+
"[30] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862. IEEE, 2019. 6, 7, 8",
|
| 1281 |
+
"[31] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Carl Yuheng Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. arXiv preprint arXiv: 2306.06362, 2023. 7, 8",
|
| 1282 |
+
"[32] Rene Ranftl, Vibhav Vineet, Qifeng Chen, and Vladlen Koltun. Dense monocular depth estimation in complex dynamic scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4058-4066, 2016. 3",
|
| 1283 |
+
"[33] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF international conference on computer vision, pages 12179-12188, 2021. 5",
|
| 1284 |
+
"[34] Chris Russell, Rui Yu, and Lourdes Agapito. Video popuup: Monocular 3d reconstruction of dynamic scenes. In European conference on computer vision, pages 583-598. Springer, 2014. 3",
|
| 1285 |
+
"[35] Peter Sand and Seth Teller. Particle video: Long-range motion estimation using point trajectories. International journal of computer vision, 80:72-91, 2008. 3",
|
| 1286 |
+
"[36] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. abs/2406.01493, 2024. 3",
|
| 1287 |
+
"[37] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of RGB-D SLAM systems. pages 573-580, 2012. 7, 8",
|
| 1288 |
+
"[38] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16, pages 402–419. Springer, 2020. 3",
|
| 1289 |
+
"[39] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Neural Information Processing Systems, 2021. 2",
|
| 1290 |
+
"[40] Basile Van Hoorick, Rundi Wu, Ege Ozguroglu, Kyle Sargent, Ruoshi Liu, Pavel Tokmakov, Achal Dave, Changxi Zheng, and Carl Vondrick. Generative camera dolly: Extreme monocular dynamic novel view synthesis. arXiv preprint arXiv:2405.14868, 2024. 6, 2",
|
| 1291 |
+
"[41] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024. 2, 6"
|
| 1292 |
+
],
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
91,
|
| 1295 |
+
92,
|
| 1296 |
+
482,
|
| 1297 |
+
898
|
| 1298 |
+
],
|
| 1299 |
+
"page_idx": 9
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"type": "list",
|
| 1303 |
+
"sub_type": "ref_text",
|
| 1304 |
+
"list_items": [
|
| 1305 |
+
"[42] Qianqian Wang, Vickie Ye, Hang Gao, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of motion: 4d reconstruction from a single video. arXiv preprint arXiv:2407.13764, 2024. 2, 3",
|
| 1306 |
+
"[43] Qianqian Wang, Yifei Zhang, Aleksander Holynski, Alexei A. Efros, and Angjoo Kanazawa. Continuous 3d perception model with persistent state, 2025. 2, 6, 7, 8",
|
| 1307 |
+
"[44] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. DUSt3R: Geometric 3D vision made easy. In CVPR, pages 20697-20709, 2024. 2, 3, 4, 6, 7, 8",
|
| 1308 |
+
"[45] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian Scherer. TartanAir: A dataset to push the limits of visual SLAM. pages 4909-4916, 2020. 6, 2",
|
| 1309 |
+
"[46] Yihan Wang, Lahav Lipson, and Jia Deng. SEA-RAFT: Simple, efficient, accurate RAFT for optical flow. In ECCV, 2024. 1",
|
| 1310 |
+
"[47] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3, 7",
|
| 1311 |
+
"[48] Guangkai Xu, Yongtao Ge, Mingyu Liu, Chengxiang Fan, Kangyang Xie, Zhiyue Zhao, Hao Chen, and Chunhua Shen. Diffusion models trained with large data are transferable visual models. arXiv preprint arXiv: 2403.06090, 2024. 3",
|
| 1312 |
+
"[49] Yueming Xu, Haochen Jiang, Zhongyang Xiao, Jianfeng Feng, and Li Zhang. Dg-slam: Robust dynamic gaussian splatting slam with hybrid pose optimization. arXiv preprint arXiv: 2411.08373, 2024. 2",
|
| 1313 |
+
"[50] Honghui Yang, Di Huang, Wei Yin, Chunhua Shen, Haifeng Liu, Xiaofei He, Binbin Lin, Wanli Ouyang, and Tong He. Depth any video with scalable synthetic data. arXiv preprint arXiv:2410.10815, 2024. 3",
|
| 1314 |
+
"[51] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 2, 3",
|
| 1315 |
+
"[52] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. arXiv preprint arxiv:2410.03825, 2024. 2, 3, 6, 7, 8, 1",
|
| 1316 |
+
"[53] Guosheng Zhao, Chaojun Ni, Xiaofeng Wang, Zheng Zhu, Xueyang Zhang, Yida Wang, Guan Huang, Xinze Chen, Boyuan Wang, Youyi Zhang, Wenjun Mei, and Xingang Wang. Drivedreamer4d: World models are effective data machines for 4d driving scene representation. 2024. 2",
|
| 1317 |
+
"[54] Yang Zheng, Adam W. Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J. Guibas. PointOdyssey: A large-scale synthetic dataset for long-term point tracking. In ICCV, 2023. 6, 7, 8, 2"
|
| 1318 |
+
],
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
516,
|
| 1321 |
+
92,
|
| 1322 |
+
903,
|
| 1323 |
+
825
|
| 1324 |
+
],
|
| 1325 |
+
"page_idx": 9
|
| 1326 |
+
},
|
| 1327 |
+
{
|
| 1328 |
+
"type": "text",
|
| 1329 |
+
"text": "POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction",
|
| 1330 |
+
"text_level": 1,
|
| 1331 |
+
"bbox": [
|
| 1332 |
+
166,
|
| 1333 |
+
85,
|
| 1334 |
+
831,
|
| 1335 |
+
130
|
| 1336 |
+
],
|
| 1337 |
+
"page_idx": 10
|
| 1338 |
+
},
|
| 1339 |
+
{
|
| 1340 |
+
"type": "text",
|
| 1341 |
+
"text": "Supplementary Material",
|
| 1342 |
+
"bbox": [
|
| 1343 |
+
380,
|
| 1344 |
+
141,
|
| 1345 |
+
614,
|
| 1346 |
+
162
|
| 1347 |
+
],
|
| 1348 |
+
"page_idx": 10
|
| 1349 |
+
},
|
| 1350 |
+
{
|
| 1351 |
+
"type": "text",
|
| 1352 |
+
"text": "A. Pointmap Matching for Global Alignment.",
|
| 1353 |
+
"text_level": 1,
|
| 1354 |
+
"bbox": [
|
| 1355 |
+
89,
|
| 1356 |
+
178,
|
| 1357 |
+
475,
|
| 1358 |
+
195
|
| 1359 |
+
],
|
| 1360 |
+
"page_idx": 10
|
| 1361 |
+
},
|
| 1362 |
+
{
|
| 1363 |
+
"type": "text",
|
| 1364 |
+
"text": "Given a sequence of video frames, the target of global alignment is to project all pairwise estimated pointmaps to the same global world coordinates. DUSt3R constructs a connectivity pairwise graph and aims to minimize the reprojection error for each image pair globally where the dynamic regions are supposed to be separated from the static regions. To this end, MonST3R [52] further introduces an assistant optical flow network [46] to help mask the dynamic regions and provide a pseudo label of 2D matching for minimizing the re-projection error in static regions. However, the introduced assistant model will introduce inevitable domain gaps and additional computation costs. Besides, the optical flow model is tailored for matching within two adjacent frames, suffering an obvious degeneration with the large view displacement. In POMATO, for an image pair $\\{\\mathbf{I}^i,\\mathbf{I}^j\\}$ , the dynamic mask $\\mathbf{D}^{j,i}$ is calculated by comparing the difference between $\\mathbf{X}^{j,i}$ and $\\mathbf{X}_m^{j,i}$ :",
|
| 1365 |
+
"bbox": [
|
| 1366 |
+
89,
|
| 1367 |
+
205,
|
| 1368 |
+
483,
|
| 1369 |
+
465
|
| 1370 |
+
],
|
| 1371 |
+
"page_idx": 10
|
| 1372 |
+
},
|
| 1373 |
+
{
|
| 1374 |
+
"type": "equation",
|
| 1375 |
+
"text": "\n$$\n\\mathbf {D} ^ {j, i} = \\left| \\left| \\mathbf {X} _ {m} ^ {j, i} - \\mathbf {X} ^ {j, i} \\right| \\right| > \\alpha , \\tag {9}\n$$\n",
|
| 1376 |
+
"text_format": "latex",
|
| 1377 |
+
"bbox": [
|
| 1378 |
+
191,
|
| 1379 |
+
479,
|
| 1380 |
+
482,
|
| 1381 |
+
497
|
| 1382 |
+
],
|
| 1383 |
+
"page_idx": 10
|
| 1384 |
+
},
|
| 1385 |
+
{
|
| 1386 |
+
"type": "text",
|
| 1387 |
+
"text": "where $\\alpha$ is a dynamic threshold defined as $3 \\times$ median $(\\|\\mathbf{X}_m^{j,i} - \\mathbf{X}^{j,i}\\|)$ .",
|
| 1388 |
+
"bbox": [
|
| 1389 |
+
89,
|
| 1390 |
+
513,
|
| 1391 |
+
482,
|
| 1392 |
+
544
|
| 1393 |
+
],
|
| 1394 |
+
"page_idx": 10
|
| 1395 |
+
},
|
| 1396 |
+
{
|
| 1397 |
+
"type": "text",
|
| 1398 |
+
"text": "Given the updated camera intrinsic $\\tilde{K}$ after an iteration of optimization, the target matching 2D coordinates $\\mathbf{F}_m^{j,i} \\in \\mathbb{R}^{H \\times W \\times 2}$ can be calculated as $\\mathbf{F}_m^{j,i} = p(\\tilde{\\mathbf{K}}\\mathbf{X}_m^{j,i})$ where $p$ is a mapping from 3D camera coordinates to 2D pixel coordinates. The optical flow loss proposed in MonST3R can thus be modified with our dynamic mask and 2D matching coordinates. Details about the optical flow loss are referred to MonST3R [52].",
|
| 1399 |
+
"bbox": [
|
| 1400 |
+
89,
|
| 1401 |
+
546,
|
| 1402 |
+
482,
|
| 1403 |
+
667
|
| 1404 |
+
],
|
| 1405 |
+
"page_idx": 10
|
| 1406 |
+
},
|
| 1407 |
+
{
|
| 1408 |
+
"type": "text",
|
| 1409 |
+
"text": "B. Fast 3D Reconstruction with video PO-MATO",
|
| 1410 |
+
"text_level": 1,
|
| 1411 |
+
"bbox": [
|
| 1412 |
+
89,
|
| 1413 |
+
688,
|
| 1414 |
+
482,
|
| 1415 |
+
722
|
| 1416 |
+
],
|
| 1417 |
+
"page_idx": 10
|
| 1418 |
+
},
|
| 1419 |
+
{
|
| 1420 |
+
"type": "text",
|
| 1421 |
+
"text": "Given a sequence of images less than the temporal window length of 12 frames, dynamic 3D reconstruction can be obtained by directly estimating the pointmaps of all reference images to the coordinate of the key frame as discussed in the Sec.3.4. Here, we provide more visualization results of this feed-forward manner and demonstrate the effectiveness of introducing the temporal motion module. As shown in Fig.8, directly applying the pairwise reconstruction will suffer from an obvious scale shift among different frames. After the temporal motion module, the consistency within the video sequence obtains an obvious enhancement.",
|
| 1422 |
+
"bbox": [
|
| 1423 |
+
89,
|
| 1424 |
+
734,
|
| 1425 |
+
482,
|
| 1426 |
+
900
|
| 1427 |
+
],
|
| 1428 |
+
"page_idx": 10
|
| 1429 |
+
},
|
| 1430 |
+
{
|
| 1431 |
+
"type": "text",
|
| 1432 |
+
"text": "C. Training Data Details",
|
| 1433 |
+
"text_level": 1,
|
| 1434 |
+
"bbox": [
|
| 1435 |
+
513,
|
| 1436 |
+
178,
|
| 1437 |
+
723,
|
| 1438 |
+
195
|
| 1439 |
+
],
|
| 1440 |
+
"page_idx": 10
|
| 1441 |
+
},
|
| 1442 |
+
{
|
| 1443 |
+
"type": "text",
|
| 1444 |
+
"text": "The details about the training datasets can be found in Tab.6. The finetuning procedure of POMATO was conducted exclusively using synthetic training datasets.",
|
| 1445 |
+
"bbox": [
|
| 1446 |
+
511,
|
| 1447 |
+
203,
|
| 1448 |
+
903,
|
| 1449 |
+
250
|
| 1450 |
+
],
|
| 1451 |
+
"page_idx": 10
|
| 1452 |
+
},
|
| 1453 |
+
{
|
| 1454 |
+
"type": "text",
|
| 1455 |
+
"text": "D. More Visualizations on Dynamic Scenes",
|
| 1456 |
+
"text_level": 1,
|
| 1457 |
+
"bbox": [
|
| 1458 |
+
511,
|
| 1459 |
+
262,
|
| 1460 |
+
875,
|
| 1461 |
+
280
|
| 1462 |
+
],
|
| 1463 |
+
"page_idx": 10
|
| 1464 |
+
},
|
| 1465 |
+
{
|
| 1466 |
+
"type": "text",
|
| 1467 |
+
"text": "We provide more visualizations in Fig. 9 and Fig. 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory.",
|
| 1468 |
+
"bbox": [
|
| 1469 |
+
511,
|
| 1470 |
+
287,
|
| 1471 |
+
905,
|
| 1472 |
+
364
|
| 1473 |
+
],
|
| 1474 |
+
"page_idx": 10
|
| 1475 |
+
},
|
| 1476 |
+
{
|
| 1477 |
+
"type": "table",
|
| 1478 |
+
"img_path": "images/5ae28c25f828dac5cf5304f158000a3d4b15aaa5c22767f9cce24e599bcd63ea.jpg",
|
| 1479 |
+
"table_caption": [],
|
| 1480 |
+
"table_footnote": [],
|
| 1481 |
+
"table_body": "<table><tr><td>Dataset</td><td>Domain</td><td>Scene Type</td><td># of Frames</td><td># of Scenes</td><td>Dynamics</td><td>Ratio</td></tr><tr><td>PointOdyssey [54]</td><td>Synthetic</td><td>Indoors & Outdoors</td><td>200k</td><td>131</td><td>Realistic</td><td>57.1%</td></tr><tr><td>TartanAir [45]</td><td>Synthetic</td><td>Indoors & Outdoors</td><td>100k</td><td>163</td><td>None</td><td>14.3%</td></tr><tr><td>DynamicReplica [21]</td><td>Synthetic</td><td>Indoors</td><td>145k</td><td>524</td><td>Realistic</td><td>14.3%</td></tr><tr><td>ParallelDomain4D [40]</td><td>Synthetic</td><td>Outdoors</td><td>750k</td><td>15015</td><td>Driving</td><td>8.6%</td></tr><tr><td>Carla [10]</td><td>Synthetic</td><td>Outdoors</td><td>7k</td><td>5</td><td>Driving</td><td>5.7%</td></tr></table>",
|
| 1482 |
+
"bbox": [
|
| 1483 |
+
156,
|
| 1484 |
+
170,
|
| 1485 |
+
841,
|
| 1486 |
+
276
|
| 1487 |
+
],
|
| 1488 |
+
"page_idx": 11
|
| 1489 |
+
},
|
| 1490 |
+
{
|
| 1491 |
+
"type": "text",
|
| 1492 |
+
"text": "Table 6. An overview of all training datasets and sample ratio. All datasets provide both camera pose, depth, and most of them include dynamic objects.",
|
| 1493 |
+
"bbox": [
|
| 1494 |
+
89,
|
| 1495 |
+
287,
|
| 1496 |
+
906,
|
| 1497 |
+
316
|
| 1498 |
+
],
|
| 1499 |
+
"page_idx": 11
|
| 1500 |
+
},
|
| 1501 |
+
{
|
| 1502 |
+
"type": "image",
|
| 1503 |
+
"img_path": "images/133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg",
|
| 1504 |
+
"image_caption": [
|
| 1505 |
+
"Figure 8. Fast 3D reconstruction with our temporal motion module. Given a sequence of images less than temporal window length, our POMATO can directly obtain a global pointmap under the key frame coordinate."
|
| 1506 |
+
],
|
| 1507 |
+
"image_footnote": [],
|
| 1508 |
+
"bbox": [
|
| 1509 |
+
96,
|
| 1510 |
+
484,
|
| 1511 |
+
906,
|
| 1512 |
+
787
|
| 1513 |
+
],
|
| 1514 |
+
"page_idx": 11
|
| 1515 |
+
},
|
| 1516 |
+
{
|
| 1517 |
+
"type": "image",
|
| 1518 |
+
"img_path": "images/33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg",
|
| 1519 |
+
"image_caption": [
|
| 1520 |
+
"Figure 9. Compared with MonST3R, our POMATO can provide more complete dynamic masks and consistent geometry."
|
| 1521 |
+
],
|
| 1522 |
+
"image_footnote": [],
|
| 1523 |
+
"bbox": [
|
| 1524 |
+
109,
|
| 1525 |
+
141,
|
| 1526 |
+
890,
|
| 1527 |
+
824
|
| 1528 |
+
],
|
| 1529 |
+
"page_idx": 12
|
| 1530 |
+
},
|
| 1531 |
+
{
|
| 1532 |
+
"type": "image",
|
| 1533 |
+
"img_path": "images/1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg",
|
| 1534 |
+
"image_caption": [
|
| 1535 |
+
"Figure 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory."
|
| 1536 |
+
],
|
| 1537 |
+
"image_footnote": [],
|
| 1538 |
+
"bbox": [
|
| 1539 |
+
101,
|
| 1540 |
+
133,
|
| 1541 |
+
890,
|
| 1542 |
+
813
|
| 1543 |
+
],
|
| 1544 |
+
"page_idx": 13
|
| 1545 |
+
}
|
| 1546 |
+
]
|
data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_05xxx/2504.05692/996e8671-5341-4c85-8ef1-45152320354c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1303f8d0c47942ef0efb7001b1259f90c18c30bc3b374614b7913fcbcd2d50df
|
| 3 |
+
size 12579857
|
data/2025/2504_05xxx/2504.05692/full.md
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction
|
| 2 |
+
|
| 3 |
+
Songyan Zhang $^{1*}$ Yongtao Ge $^{2,3*}$ Jinyuan Tian $^{2*}$ Guangkai Xu $^{2}$ Hao Chen $^{2\boxtimes}$ Chen Lv $^{1}$ Chunhua Shen $^{2}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Nanyang Technological University, Singapore <sup>2</sup>Zhejiang University, China <sup>3</sup>The University of Adelaide, Australia
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
Figure 1. 3D reconstruction from an arbitrary dynamic video with POMATO. Without relying on external modules, POMATO can directly perform 3D reconstruction along with temporal 3D point tracking and dynamic mask estimation.
|
| 9 |
+
|
| 10 |
+

|
| 11 |
+
|
| 12 |
+

|
| 13 |
+
|
| 14 |
+
# Abstract
|
| 15 |
+
|
| 16 |
+
Recent approaches to 3D reconstruction in dynamic scenes primarily rely on the integration of separate geometry estimation and matching modules, where the latter plays a critical role in distinguishing dynamic regions and mitigating the interference caused by moving objects. Furthermore, the matching module explicitly models object motion, enabling the tracking of specific targets and advancing motion understanding in complex scenarios. Recently, the proposed representation of pointmap in DUSt3R suggests a potential solution to unify both geometry estimation and matching in 3D space, effectively reducing computational overhead by eliminating the need for redundant auxiliary modules. However, it still struggles with ambiguous correspondences in dynamic regions, which limits reconstruction
|
| 17 |
+
|
| 18 |
+
performance in such scenarios. In this work, we present POMATO, a unified framework for dynamic 3D reconstruction by marrying POintmap MAtching with Temporal mOtion. Specifically, our method first learns an explicit matching relationship by mapping RGB pixels across different views to 3D pointmaps within a unified coordinate system. Furthermore, we introduce a temporal motion module for dynamic motions that ensures scale consistency across different frames and enhances performance in 3D reconstruction tasks requiring both precise geometry and reliable matching, most notably 3D point tracking. We show the effectiveness of our proposed POMATO by demonstrating the remarkable performance across multiple downstream tasks, including video depth estimation, 3D point tracking, and pose estimation. Code and models are publicly available at https://github.com/wyddmw/POMATO.
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
Image1
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 2. Ambiguity in 3D point matching in dynamic scenes with DUSt3R. Given representative corresponding pixels of background (orange) and moving foreground (red) in two different views, DUSt3R outputs a pair of 3D points within the same coordinate system. In static regions, identical pixels share the same 3D coordinates which provide an accurate matching relationship in 3D space, but in moving regions, the 3D coordinates are inconsistent for corresponding pixels across views, leading to ambiguous 3D matching relationships.
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+
# 1. Introduction
|
| 29 |
+
|
| 30 |
+
Image-based 3D reconstruction is a fundamental task in computer vision with a wide range of applications including SLAM [39], robotics [19, 49], autonomous driving [53], and novel view synthesis [5]. While substantial progress has been achieved in static 3D reconstruction [16, 23, 26, 44, 51], dynamic scenes remain a major hurdle due to complexities like non-rigid motion and deformation, which may hamper the learning of local structure and camera motion, thereby complicating accurate 3D reconstruction for dynamic scenes. These scenarios require explicit modeling of both scene geometry and object motion. Moreover, downstream reconstruction tasks, such as 3D point tracking, demand precise geometry estimation and robust matching across views. To effectively distinguish dynamic regions, it is essential to establish reliable correspondences between different frames. Some pioneering works have attempted to address dynamic motion by incorporating additional auxiliary matching modules, such as optical flow [42, 52] or 2D tracking [47]. However, these approaches may suffer from domain gaps and accumulated errors between modules, limiting their effectiveness. A unified framework that seamlessly integrates geometry estimation and matching for dynamic 3D reconstruction remains a critical and underexplored challenge.
|
| 31 |
+
|
| 32 |
+
Recently, DUSt3R [44] proposes a promising solution to address this challenge. It introduces the concept of a pointmap that assigns each pixel in an image to a corresponding 3D coordinate. The network utilizes a standard transformer-based encoder-decoder architecture and receives a pair of images as input. The system incorporates two parallel decoders to predict pointmaps for each view within the same coordinate system. However, this representation is limited to static matching and struggles in dynamic scenes, as illustrated in Fig. 2.
|
| 33 |
+
|
| 34 |
+
To address this problem, we present POMATO, a unified network for dynamic 3D reconstruction by marrying
|
| 35 |
+
|
| 36 |
+
POintmap MMatching with Temporal mOtion. We argue that with iterative cross-attention modules across different views, matching features are well preserved in the decoder tokens. We thus introduce an auxiliary pointmap matching head to learn explicit correspondences. Specifically, for each pixel in the second view, the pointmap matching head predicts the corresponding 3D coordinates of its counterpart in the first view, under the shared coordinate system. Our proposed pointmap-based matching representation enables the establishment of explicit correspondences in 3D space, which can be directly leveraged for motion analysis, especially the estimation of dynamic regions. Moreover, we further extend our POMATO to handle 4D video sequences by introducing a temporal motion module that enhances the learning of temporal motions. This motion module promotes scale consistency across different frames and improves performance in tasks where both accurate geometry and reliable matching are paramount, most notably 3D point tracking. Compared with recent temporal 3D reconstruction methods [41, 43] based on an autoregression manner where the previous frames are blocked from the recently added frames, our temporal motion module is based on the self-attention mechanism along the temporal dimension, facilitating a comprehensive interaction across all frames. Our POMATO is trained in a two-stage manner. In the first stage, we used pairwise input images to learn fundamental geometry and matching capabilities. In the second stage, we extend the input to sequential video frames and incorporate the temporal motion module, enabling the model to effectively capture motions over time.
|
| 37 |
+
|
| 38 |
+
Our contributions can be summarized in threefold: First, we propose a novel approach that unifies the fundamental geometry estimation and motion understanding for dynamic 3D reconstruction into a single network by incorporating the representation of pointmap matching. Second, we introduce a temporal motion module to facilitate the interactions of motion features along the temporal dimension, which significantly improves the performance in tasks
|
| 39 |
+
|
| 40 |
+
where both accurate geometry and precise matching are required for video sequential input, most notably 3D point tracking. Third, we demonstrate promising performance on 3D vision tasks, including video depth estimation, 3D point tracking, and camera pose estimation.
|
| 41 |
+
|
| 42 |
+
# 2. Related Work
|
| 43 |
+
|
| 44 |
+
Geometry estimation refers to the process of determining the spatial properties and structures from different forms of visual data. Direct recovery of 3D geometry from a single RGB image is by nature an ill-posed problem. Many recent works [3, 16, 23, 51] have tried to leverage strong pre-trained models to learn generalizable depthmaps from large-scale real and synthetic datasets to solve ambiguities. For example, Marigold [23], Geowizard [11], and GenPercept [48] aim at leveraging the generative priors from pre-trained diffusion models by finetuning them on synthetic datasets. Depthanything V2 [51] proposes to estimate scale-and-shift invariant disparity map by finetuning DINOV2 [29] model on synthetic datasets and largescale pseudo labels. Depth Pro [3] further propose a FOV head to estimate the metric depthmap from a single image without relying on camera intrinsics as input. Due to the scale ambiguity in the monocular depth estimation models, ChronoDepth [36], DepthCrafter [17], and Depth-any-video [50] proposes to learn temporal consistent depthmaps by leveraging the priors from a video generative model, i.e. SVD [2]. In another line of the research, multi-view stereo reconstruction (MVS) methods seek to reconstruct visible surfaces from multiple viewpoints. Traditional MVS [12] and SfM pipelines break the reconstruction pipeline into several sub-problems, e.g., feature extraction [8], image matching [1, 27], triangulation, and bundle adjustment [7]. The chain is complicated and accumulates noise for every single step, thus often resulting in unsatisfactory performance in complex real-world scenes. Recognizing the limitations of previous MVS methods, seminal work DUSt3R [44] proposes 3D pointmaps representation, and trains a network from large-scale data to regress the dense and accurate pointmaps from a pair of images. The camera intrinsics and relative camera poses can be implicitly inferred from the two-view pointmaps. However, it still can not handle reconstruction for dynamic scenes. MonST3R [52] directly finetuned the original DUSt3R model upon synthetic datasets that contain dynamic scenes. Motion representation. Optical flow is a commonly used representation for 2D motion. RAFT [38] is a representative work for pairwise optical flow estimation, which employs a 4D cost volume and recurrently estimates the optical flow. Some follow-up methods further extend it to multi-frame (3-5 frames) settings, which is still insufficient for long-range tracking. To resolve the problem, Particle Video [35] represent video motion by using a set of particles. Each
|
| 45 |
+
|
| 46 |
+
particle is an image point sample with a long-duration trajectory and other properties. Particle videos have two key advantages over optical flow: (1) persistence through occlusions, and (2) multi-frame temporal context. Some recent works, PIPs [15], TAPIR [9] and Cotracker [22] have renewed interest in this representation and show promising long-term 2D point tracking results. Recognizing the advantage of point representation, SpatialTracker [47] lifts the 2D points into 3D and performs tracking in the 3D space. Though it can handle occlusions and enhance 3D tracking accuracy, it still relies on a separate monocular depth estimator, which prevents it performing 3D point tracking in an end-to-end fashion.
|
| 47 |
+
|
| 48 |
+
Multi-view dynamic reconstruction. Our work is closely connected to multi-view dynamic 3D reconstruction techniques. Early works [32, 34] take the straightforward idea that first pre-segment the scene into different regions, each corresponding to a single rigid part of an object, then apply the rigid-SfM technique to each of the regions. Some of the recent Neural Radiance Fields (NeRF) [28] and Gaussian Splatting [24] based methods have achieved state-of-the-art results. However, most of these methods require simultaneous multi-view video inputs or require predefined templates [18]. Shape of motion [42], proposes a new dynamic scene representation to represent the dynamic scene as a set of persistent 3D Gaussians, and optimize the representation from a monocular video by leveraging monocular depth estimation priors and 2D track estimates across frames.
|
| 49 |
+
|
| 50 |
+
# 3. Method
|
| 51 |
+
|
| 52 |
+
# 3.1. Preliminary
|
| 53 |
+
|
| 54 |
+
The overview of our POMATO is demonstrated in Fig.3. We adopt the definition of pointmap $\mathbf{X} \in \mathbb{R}^{H \times W \times 3}$ in DUSt3R [44] as a dense 2D field of 3D points where each point corresponds to its respective RGB pixel. Given a pair of input images $\mathbf{I}^1, \mathbf{I}^2 \in \mathbb{R}^{H \times W \times 3}$ from two different views, a weight-sharing ViT first extracts the corresponding features $\mathbf{F}^1, \mathbf{F}^2$ for each view. Two parallel branches are employed to decode the geometric structures and enhance the feature alignment via cross-attention in decoder modules, following a regression head to estimate pointmaps $\mathbf{X}^{1,1}, \mathbf{X}^{2,1} \in \mathbb{R}^{H \times W \times 3}$ along with a confidence map $\mathbf{C}^{1,1}, \mathbf{C}^{2,1} \in \mathbb{R}^{H \times W}$ for each image view. Generally, $\mathbf{X}^{n,m}$ indicates the pointmap $\mathbf{X}^n$ from camera $n$ expressed in camera $m$ 's coordinate frame, which is obtained by a rigid transformation:
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
\mathbf {X} ^ {n, m} = \mathbf {P} _ {m} \mathbf {P} _ {n} ^ {- 1} h \left(\mathbf {X} ^ {n}\right), \tag {1}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
where $\mathbf{P}_m, \mathbf{P}_n \in \mathbb{R}^{3 \times 4}$ are world-to-camera poses for camera $m$ and camera $n$ , respectively, and $h(\mathbf{X}^n)$ is a homogeneous mapping for the 3D coordinate in camera coordinate
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
Figure 3. Overview of our training pipeline. (1) Stage I: build upon DUSt3R [44] architecture, we introduce a third regression point-matching head: $\mathrm{Head}_3$ , which is in parallel to $\mathrm{Head}_2$ for explicit pointmap matching in 3D space. For each pixel in the second view, the output pointmap coordinate is the 3D point map of the corresponding pixel in the first view. (2) Stage II: we introduce a temporal fusion module in three heads that enables multi-style sequential input for learning temporal motions.
|
| 64 |
+
|
| 65 |
+

|
| 66 |
+
|
| 67 |
+
of camera $n$
|
| 68 |
+
|
| 69 |
+
The task for Decoder 1 and its regression head estimate the 3D points for $\mathbf{I}^1$ in its own coordinate system while Decoder 2 and its regression head are responsible for estimating pixel-wise 3D coordinates for $\mathbf{I}^2$ in $\mathbf{I}^1$ 's coordinate system after a rigid transformation of global rotation and translation. In the following contents, we will first introduce our POMATO with pairwise input images and then extend it to the video sequence input with our temporal motion module.
|
| 70 |
+
|
| 71 |
+
# 3.2. Pointmap Matching with Pairwise Input
|
| 72 |
+
|
| 73 |
+
As discussed before, the definition of $\mathbf{X}^{2,1}$ depicts a rigid camera transformation that is ambiguous to reflect explicit matching relationships for dynamic regions. To tackle this, we propose to formulate an explicit pointmap matching $\mathbf{X}_m^{2,1} \in \mathbb{R}^{H \times W \times 3}$ that maps dense RGB pixels of $\mathbf{I}^2$ to 3D coordinates of corresponding pixels in $\mathbf{I}^1$ under the first image's coordinate system. Given a 2D query pixel at $(x_2, y_2)$ in $\mathbf{I}^2$ and its corresponding pixel at $(x_1, y_1)$ in $\mathbf{I}^1$ , the matched pointmap at $(x_2, y_2)$ in $\mathbf{I}^2$ is:
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\mathbf {X} _ {m} ^ {2, 1} \left(x _ {2}, y _ {2}\right) = \mathbf {X} ^ {1, 1} \left(x _ {1}, y _ {1}\right), \tag {2}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
where $(x,y)$ indicates the coordinates of 2D grid. For the representative dynamic point (red) in Fig. 2, the pointmap matching result is the 3D coordinate of point A in the coordinate system of the first image. As shown in Fig. 3, $\mathbf{X}_m^{2,1}$ and $\mathbf{X}^{1,1}$ are supposed to match perfectly in 3D space on the premise of neglecting occluded regions. We argue that the set of decoder tokens from the second branch preserves abundant matching information with iterative cross-attentions, so we introduce a matching head with the same architecture of $\mathrm{Head}_1$ and $\mathrm{Head}_2$ . The supervision for pointmap matching $\mathbf{X}_m^{2,1}$ still follows the 3D regression loss which is defined as the Euclidean distance:
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\mathcal {L} _ {\mathrm {m}} = \left\| \frac {1}{z _ {m}} \mathbf {X} _ {m} ^ {2, 1} - \frac {1}{\bar {z} _ {m}} \bar {\mathbf {X}} _ {m} ^ {2, 1} \right\|, \tag {3}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
where $\bar{\mathbf{X}}_m^{2,1}$ is the ground truth pointmap matching, which can be obtained following Eq. 2 on the 2D tracking dataset
|
| 86 |
+
|
| 87 |
+
with the depth and camera information. $z_{m},\bar{z}_{m}$ are the same norm factor defined in DUSt3R. The matching confidence $\mathbf{C}_m^{2,1}$ is also learned following the confidence loss for $\mathrm{Head}_1$ and $\mathrm{Head}_2$ within valid regions:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\mathcal {L} _ {\mathrm {m c o n f}} = \mathbf {C} _ {m} ^ {2, 1} \mathcal {L} _ {\mathrm {m}} - \alpha \log \mathbf {C} _ {m} ^ {2, 1} \tag {4}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
The final loss $\mathcal{L}$ of our POMATO for pairwise input is a combination of predefined DUSt3R loss $\mathcal{L}_{\mathrm{DUSt3R}}$ , matching loss $\mathcal{L}_{\mathrm{m}}$ , and matching confidence loss $\mathcal{L}_{\mathrm{mconf}}$ . When training our POMATO for pairwise input images at the first stage, the parameters in the encoder are frozen.
|
| 94 |
+
|
| 95 |
+
# 3.3. Dynamic Mask Estimation
|
| 96 |
+
|
| 97 |
+
Taking advantage of the explicit pointmap matching head, our POMATO can directly perform dynamic mask estimation without introducing an assistant module such as the optical flow model, getting rid of the additional computation cost and the potential domain gap. For an image pair $\{\mathbf{I}^i,\mathbf{I}^j\}$ along with the estimation of $\mathbf{X}^{j,i}$ from $\mathrm{Head}_2$ and $\mathbf{X}_{m}^{j,i}$ from $\mathrm{Head}_3$ , the dynamic mask $\mathbf{D}^{j,i}$ can be obtained by comparing the difference between $\mathbf{X}^{j,i}$ and $\mathbf{X}_{m}^{j,i}$ :
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\mathbf {D} ^ {j, i} = \left| \left| \mathbf {X} _ {m} ^ {j, i} - \mathbf {X} ^ {j, i} \right| \right| > \alpha , \tag {5}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $\alpha$ is a dynamic threshold defined as $3 \times$ median $(\|\mathbf{X}_m^{j,i} - \mathbf{X}^{j,i}\|)$ . The explicit dynamic mask can be incorporated into the global alignment process to minimize the interference of moving objects for pose estimation and 3D reconstruction. Details on the incorporation of dynamic masks for global alignment are provided in the supplementary materials.
|
| 104 |
+
|
| 105 |
+
# 3.4. Temporal Motion Module
|
| 106 |
+
|
| 107 |
+
With the fundamental capability of geometric estimation and pointmap matching for pairwise images, we follow [6] and extend our POMATO to 4D video sequences by inserting a transformer-based motion module into the vanilla DPT head to construct the "temporal DPT head", which is illustrated in Fig.4. For a set of decoder tokens $\mathbf{G} \in \mathbb{R}^{B,T,N,C}$
|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
Figure 4. Architecture of our temporal motion module. We insert a transformer-based motion module (in shallow yellow) into the vanilla DPT [33] head to enhance the temporal consistency.
|
| 111 |
+
|
| 112 |
+
where $B, T, N, C$ represent the batch size, window length of a video sequence, token number, and token dimension, respectively, we merge the token number dimension into the batch axis and apply the motion module which consists of two blocks of standard multi-head self-attention modules and feed-forward networks along the temporal dimension $T$ . To reduce the computation cost, the temporal motion modules are applied to features of low resolution.
|
| 113 |
+
|
| 114 |
+
# 3.5. Downstream Temporal Tasks
|
| 115 |
+
|
| 116 |
+
Given a video sequence of $T$ frames $\mathbf{I}^{t_1},\mathbf{I}^{t_2},\ldots ,\mathbf{I}^{t_T}$ , we construct a unique set of stereo image pairs for each task. As illustrated in Fig. 5, the flexible construction of input pairs—combined with the proposed temporal motion module and pointmap matching head—enables POMATO to seamlessly address downstream temporal tasks, including 3D point tracking, video depth estimation, and 3D reconstruction. The keyframe selection strategy and input formulation for each task are detailed in the following section.
|
| 117 |
+
|
| 118 |
+
Besides the default regression losses for $\mathrm{Head}_1$ and $\mathrm{Head}_2$ , and predefined losses Eq. 3 and Eq. 4 for $\mathrm{Head}_3$ , we further employ a temporal consistency loss, $\mathcal{L}_{\mathrm{t}}$ , which will be described in detail below.
|
| 119 |
+
|
| 120 |
+
In addition to the default regression losses for $\mathrm{Head}_1$ and $\mathrm{Head}_2$ , and the predefined losses in Eq. 3 and Eq. 4 for $\mathrm{Head}_3$ , we further introduce a temporal consistency loss, $\mathcal{L}_{\mathrm{t}}$ , which will also be described in detail below.
|
| 121 |
+
|
| 122 |
+
3D Point Tracking. As illustrated at the top of Fig.5, the keyframe is set to the first image of the global video sequence and fed to the proposed $\mathrm{Head}_3$ to obtain the pointmap matching result of each query point (initialized at the first image) under the coordinate system of each reference frame $\{\mathbf{X}_{m}^{t_{1},t_{1}},\mathbf{X}_{m}^{t_{1},t_{2}},\mathbf{X}_{m}^{t_{1},t_{3}},\dots \mathbf{X}_{m}^{t_{1},t_{T}}\}$ , while the set of reference frames $\{\mathbf{I}^{t_1},\mathbf{I}^{t_2},\mathbf{I}^{t_3},\dots \mathbf{I}^{t_T}\}$ is fed to the $\mathrm{Head}_1$ to obtain the pointmap under each ego coordinate system. The dense tracking results can be further sparsified by indexing the 2D coordinates. When inference on a video
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
Figure 5. Inference pipelines for point tracking, video depth, and multi-view reconstruction. $t_k$ indicates the keyframe. With the help of the motion module and flexible input construction, PO-MATO can be easily applied to downstream temporal tasks.
|
| 126 |
+
|
| 127 |
+
longer than $T$ frames, a simple sliding-window approach with an overlap of four frames is adopted to enhance the consistency between adjacent video windows. The temporal consistency loss $\mathcal{L}_{\mathrm{t}}$ for tracking is:
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\mathcal {L} _ {\mathrm {t}} = \frac {1}{T} \sum_ {i = 1} ^ {T} \left\| \frac {\mathbf {X} _ {m} ^ {t _ {1} , t _ {i}}}{z _ {m} ^ {T}} - \frac {\bar {\mathbf {X}} _ {m} ^ {t _ {1} , t _ {i}}}{\bar {z} _ {m} ^ {T}} \right\| + \left\| \frac {\mathbf {X} ^ {t _ {i} , t _ {i}}}{z ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\bar {z} ^ {T}} \right\|, \tag {6}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
where $z_{m}^{T} = \mathrm{norm}\left(\mathbf{X}_{m}^{t_{1},t_{1}},\mathbf{X}_{m}^{t_{1},t_{2}},\dots,\mathbf{X}_{m}^{t_{1},t_{T}}\right)$ and $\bar{z}_T =$ norm $(\bar{\mathbf{X}}_m^{t_1,t_1},\bar{\mathbf{X}}_m^{t_1,t_2},\dots,\bar{\mathbf{X}}_m^{t_1,t_T})$ . $z_{m}^{T}$ and $\bar{z}_T$ are defined analogously.
|
| 134 |
+
|
| 135 |
+
Video Depth Estimation. As shown in the middle part of the Fig. 5, the input video sequence is formulated to a set of identical image pairs $\{(\mathbf{I}^{t_1},\mathbf{I}^{t_1}),(\mathbf{I}^{t_2},\mathbf{I}^{t_2}),\dots,(\mathbf{I}^{t_T},\mathbf{I}^{t_T})\}$ and fed to $\mathrm{Head}_1$ and $\mathrm{Head}_2$ , where the predictions from each head are identical: $\{\mathbf{X}^{t_1,t_1},\mathbf{X}^{t_2,t_2},\dots,\mathbf{X}^{t_N,t_N}\}$ . We use the output of $\mathrm{Head}_1$ as our final video depth estimation. The temporal consistency loss $\mathcal{L}_{\mathrm{t}}$ is defined as:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\mathcal {L} _ {\mathrm {t}} = \frac {1}{T} \sum_ {i = 1} ^ {T} \left\| \frac {\mathbf {X} _ {1} ^ {t _ {i} , t _ {i}}}{z _ {1} ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\bar {z} ^ {T}} \right\| + \left\| \frac {\mathbf {X} _ {2} ^ {t _ {i} , t _ {i}}}{z _ {2} ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {i} , t _ {i}}}{\bar {z} ^ {T}} \right\|, (7)
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $\mathbf{X}_1^{t_i,t_i}$ and $\mathbf{X}_2^{t_i,t_i}$ indicate the output from Head_1 and Head_2, respectively. $\bar{\mathbf{X}}^{t_i,t_i}$ is the pointmap groundtruth.
|
| 142 |
+
|
| 143 |
+
3D Reconstruction. Assisted by the temporal motion module, redundant post-process operations such as global alignment can be omitted, allowing the reconstructed 3D point cloud to be obtained in a feed-forward manner. As shown in the bottom part of Fig.5, the keyframe is set to the last frame
|
| 144 |
+
|
| 145 |
+
<table><tr><td rowspan="2">Alignment</td><td rowspan="2">Method</td><td rowspan="2" colspan="2">Optim. Onl.</td><td colspan="2">Sintel [4]</td><td colspan="2">BONN [30]</td><td colspan="2">KITTI [13]</td></tr><tr><td>Abs Rel ↓</td><td>δ<1.25 ↑</td><td>Abs Rel ↓</td><td>δ<1.25 ↑</td><td>Abs Rel ↓</td><td>δ<1.25 ↑</td></tr><tr><td rowspan="6">Per-sequence scale</td><td>DUSt3R-GA [44]</td><td>✓</td><td></td><td>0.656</td><td>45.2</td><td>0.155</td><td>83.3</td><td>0.144</td><td>81.3</td></tr><tr><td>MASt3R-GA [26]</td><td>✓</td><td></td><td>0.641</td><td>43.9</td><td>0.252</td><td>70.1</td><td>0.183</td><td>74.5</td></tr><tr><td>MonST3R-GA [52]</td><td>✓</td><td></td><td>0.378</td><td>55.8</td><td>0.067</td><td>96.3</td><td>0.168</td><td>74.4</td></tr><tr><td>Spann3R [41]</td><td></td><td>✓</td><td>0.622</td><td>42.6</td><td>0.144</td><td>81.3</td><td>0.198</td><td>73.7</td></tr><tr><td>CUT3R [43]</td><td></td><td>✓</td><td>0.421</td><td>47.9</td><td>0.078</td><td>93.7</td><td>0.118</td><td>88.1</td></tr><tr><td>POMATO</td><td></td><td>✓</td><td>0.416</td><td>53.6</td><td>0.074</td><td>96.1</td><td>0.085</td><td>93.3</td></tr><tr><td rowspan="3">Per-sequence scale & shift</td><td>MonST3R-GA [52]</td><td>✓</td><td></td><td>0.335</td><td>58.5</td><td>0.063</td><td>96.4</td><td>0.104</td><td>89.5</td></tr><tr><td>CUT3R [43]</td><td></td><td>✓</td><td>0.466</td><td>56.2</td><td>0.111</td><td>88.3</td><td>0.075</td><td>94.3</td></tr><tr><td>POMATO</td><td></td><td>✓</td><td>0.345</td><td>57.9</td><td>0.072</td><td>96.5</td><td>0.084</td><td>93.4</td></tr></table>
|
| 146 |
+
|
| 147 |
+
Table 1. Video depth evaluation. We report scale-invariant depth and scale & shift invariant depth accuracy on Sintel [4], Bonn [30], and KITTI [13] datasets. Methods requiring global alignment are marked “GA”, while “Optim.” and “Onl.” indicate optimization-based and online methods, respectively. The best and second best results in each category are bold and underlined, respectively.
|
| 148 |
+
|
| 149 |
+
$\mathbf{I}^{t_T}$ within the temporal window of length $T$ and is fed to $\mathrm{Head}_1$ with a set output of $\{\mathbf{X}^{t_T,t_T},\mathbf{X}^{t_T,t_T},\dots,\mathbf{X}^{t_T,t_T}\}$ . All the reference frames are input to the $\mathrm{Head}_2$ so the target pointmaps $\{\mathbf{X}^{t_1,t_T},\mathbf{X}^{t_2,t_T},\dots,\mathbf{X}^{t_T,t_T}\}$ are aligned under the coordinate system of the keyframe. The temporal consistency loss $\mathcal{L}_{\mathrm{t}}$ is:
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\mathcal {L} _ {\mathrm {t}} = \frac {1}{T} \sum_ {i = 1} ^ {T} \left\| \frac {\mathbf {X} ^ {t _ {T} , t _ {T}}}{z _ {1} ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {T} , t _ {T}}}{\bar {z} _ {1} ^ {T}} \right\| + \left\| \frac {\mathbf {X} ^ {t _ {i} , t _ {T}}}{z _ {2} ^ {T}} - \frac {\bar {\mathbf {X}} ^ {t _ {i} , t _ {T}}}{\bar {z} _ {2} ^ {T}} \right\| \tag {8}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
We further freeze the parameters in Decoder1 and Decoder2 when training the temporal downstream tasks at the second stage. In our work, the temporal window length $T$ is set to 12. Additional explorations on the temporal length can be found in Sec.4.
|
| 156 |
+
|
| 157 |
+
# 4. Experiments
|
| 158 |
+
|
| 159 |
+
# 4.1. Experimental Details
|
| 160 |
+
|
| 161 |
+
Training data. We train our network with a mixture of five datasets: PointOdyssey [54], Tartanair [45], ParallelDomain4D [40], DynamicReplica [21] and Carla (0.9.15) [10]. The specific number and the usage ratio of each dataset can be found in the supplementary materials. All datasets include pixel-accurate ground truth depth, as well as camera intrinsics and extrinsics, and encompass a wide variety of dynamic scenes across both indoor and outdoor environments. Among them, PointOdyssey and DynamicReplica have additional 2D trajectory annotations for dynamic objects which can be used to construct pointmap matching ground truth following Eq. 2. All datasets are used to supervise geometry learning on $\mathrm{Head}_1$ and $\mathrm{Head}_2$ , while only PointOdyssey, DynamicReplica, and TartanAir are used to train the proposed pointmap matching head.
|
| 162 |
+
|
| 163 |
+
Training and inference details. Our model architecture is based on the publicly available DUSt3R [52] model, utilizing the same backbone consisting of a ViT-Large encoder
|
| 164 |
+
|
| 165 |
+
and a ViT-Base decoder. To fully leverage MonST3R's geometry estimation capabilities in dynamic scenes, we initialize our model using the publicly available MonST3R checkpoint. For the newly introduced pointmap matching head, we initialize its weights from the pretrained $\mathrm{Head}_2$ weights of MonST3R. The temporal motion module is initialized following [14]. We train our network for 10 epochs with a cosine learning rate schedule, with an initial learning rate of 1e-4. In the first stage, which involves pairwise training, we use a batch size of 16 on 4 A100 GPUs (40G). In the second stage, where the temporal motion module is introduced, the batch size is set to 4 with a fixed temporal window length of 12. During each training iteration, we randomly sample a downstream task—3D point tracking, video depth estimation, or 3D reconstruction—to construct the input pairs and apply the corresponding loss function.
|
| 166 |
+
|
| 167 |
+
# 4.2. Video Depth Estimation
|
| 168 |
+
|
| 169 |
+
Following MonST3R [52] and CUT3R [43], we rescale all predictions from the same video to align them together by conducting two forms of alignment: per-sequence scale and shift alignment and per-sequence scale alignment. Thus, we can measure the per-frame depth quality and inter-frame depth consistency. We employ our proposed motion module for video depth estimation in a feed-forward manner as described in Sec.3.5 and compare our method against several variants of DUSt3R, including DUSt3R [44], MAST3R [26], MonST3R [52], Spann3R [41], and CUT3R [43]. Given 6 frames of $288 \times 512$ on an NVIDIA 4070 GPU, POMATO reconstructs the 3D point cloud in 0.7 seconds, whereas global alignment-based methods such as MonST3R require 5.8 seconds. As shown in Tab. 1, our method demonstrates comparable performance to the global alignment (GA)-based MonST3R [52] on the Sintel [4] and BONN [30] datasets, while surpassing it on KITTI dataset. Besides, we
|
| 170 |
+
|
| 171 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">PointOdyssey [54]</td><td colspan="2">ADT [31]</td><td colspan="2">PStudio [20]</td><td colspan="2">Average</td></tr><tr><td>L-12</td><td>L-24</td><td>L-12</td><td>L-24</td><td>L-12</td><td>L-24</td><td>L-12</td><td>L-24</td></tr><tr><td>SpatialTracker* [47]</td><td>20.46</td><td>20.71</td><td>21.64</td><td>20.67</td><td>30.41</td><td>25.87</td><td>24.17</td><td>22.42</td></tr><tr><td>DUSt3R [44]</td><td>19.03</td><td>19.03</td><td>29.02</td><td>25.55</td><td>9.72</td><td>6.50</td><td>19.26</td><td>17.03</td></tr><tr><td>MASt3R [26]</td><td>16.58</td><td>17.35</td><td>27.36</td><td>26.46</td><td>11.78</td><td>8.09</td><td>18.57</td><td>17.30</td></tr><tr><td>MonST3R [52]</td><td>27.31</td><td>27.92</td><td>28.30</td><td>26.13</td><td>16.50</td><td>11.06</td><td>24.03</td><td>21.70</td></tr><tr><td>POMATO</td><td>33.20</td><td>33.58</td><td>31.57</td><td>28.22</td><td>24.59</td><td>19.79</td><td>29.79</td><td>27.20</td></tr></table>
|
| 172 |
+
|
| 173 |
+
Table 2. 3D tracking evaluation. We report the APD metric to evaluate 3D point tracking on the PointOdyssey [54], ADT [31], and PStudio [20] datasets. L-12 and L-24 indicate tracking within the temporal length of 12 frames and 24 frames, respectively.
|
| 174 |
+
|
| 175 |
+

|
| 176 |
+
Figure 6. Qualitative comparison of dynamic scenes. Compared to MonST3R, our POMATO can provide more reliable motion masks, 3D point tracking, and reconstruction performance.
|
| 177 |
+
|
| 178 |
+
consistently outperform the state-of-the-art online method, CUT3R [43], across various settings. These results underscore the effectiveness of our approach, specifically (1) the joint learning of geometry and pointmap matching, and (2) the temporal motion module.
|
| 179 |
+
|
| 180 |
+
# 4.3. 3D Point Tracking
|
| 181 |
+
|
| 182 |
+
For 3D point tracking task, we use the Aria Digital Twin (ADT) [31], and Panoptic Studio (PStudio) [20] benchmarks from the TAPVid-3D [25] dataset along with the validation set on the PointOdyssey [54] dataset. We report the Average Percent Deviation (APD) metric, which quantifies the average percentage of points within a threshold relative to the ground truth depth. The APD metric serves as a direct measure of the accuracy of the predicted tracking. We reformulate the datasets and project all the query points within a temporal window to the first frame. We report tracking results on the length of 12 and 24 frames. As shown in Tab.2, our POMATO achieves the best performance on both PointOdyssey and ADT datasets. It's worth mentioning that SpatialTracker [47] is a state-of-the-art network tailored for 3D point tracking with ground truth camera intrinsic as ad
|
| 183 |
+
|
| 184 |
+
ditional input data. POMATO surpasses it on two datasets and improves the average APD metric by $23.3\%$ and $21.4\%$ for 12 frames and 24 frames, respectively. For DUSt3R-based methods, we use the output of $\mathrm{Head}_2$ as tracking results. Obviously, the ambiguous matching representation limits its capability to handle this fine-grained 3D reconstruction task in dynamic scenes.
|
| 185 |
+
|
| 186 |
+
# 4.4. Camera Pose Estimation
|
| 187 |
+
|
| 188 |
+
Following DUSt3R-based methods, we perform global alignment with the model trained in the first stage on the Bonn [30] and TUM [37] datasets. The sampling stride is set to 5 for the Bonn dataset and 3 for the TUM dataset. Compared with optical-flow assisted global alignment in MonST3R, the dynamic mask is computed according to Eq. 5 while the 2D pseudo label is replaced by projecting the pointmap matching results to 2D coordinates with estimated camera intrinsic. Absolute Translation Error (ATE), Relative Translation Error (RPE trans), and Relative Rotation Error (RPE rot) are reported. The evaluation results over 40 frames are reported in Tab. 4. Notably, POMATO obtains an overall state-of-the-art performance and signifi
|
| 189 |
+
|
| 190 |
+
<table><tr><td rowspan="3">Temporal Length</td><td colspan="6">Video Depth</td><td colspan="3">Tracking (12 Frames)</td></tr><tr><td colspan="2">Sintel [4]</td><td colspan="2">Bonn [30]</td><td colspan="2">KITTI [13]</td><td colspan="3">PointOdyssey [54] ADT [31] PStudio [20]</td></tr><tr><td>Abs Rel ↓ δ<1.25 ↑</td><td>Abs Rel ↓ δ<1.25 ↑</td><td>Abs Rel ↓ δ<1.25 ↑</td><td>Abs Rel ↓ δ<1.25 ↑</td><td>APD↑</td><td>APD↑</td><td>APD↑</td><td></td><td></td></tr><tr><td>Pair-wise</td><td>0.548</td><td>46.2</td><td>0.087</td><td>94.0</td><td>0.113</td><td>89.5</td><td>32.06</td><td>29.87</td><td>23.10</td></tr><tr><td>6 frames</td><td>0.436</td><td>51.3</td><td>0.076</td><td>95.9</td><td>0.085</td><td>93.5</td><td>32.69</td><td>30.93</td><td>24.52</td></tr><tr><td>12 frames</td><td>0.416</td><td>53.6</td><td>0.075</td><td>96.1</td><td>0.086</td><td>93.3</td><td>33.20</td><td>31.57</td><td>24.59</td></tr></table>
|
| 191 |
+
|
| 192 |
+
Table 3. Ablation study on the temporal motion module. The introduction of the temporal motion module brings a significant improvement. As the temporal window length enlarges from 6 frames to 12 frames, we obtain an overall consistent improvement.
|
| 193 |
+
|
| 194 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">TUM [37]</td><td colspan="3">Bonn [30]</td></tr><tr><td>ATE ↓</td><td>RPE trans ↓</td><td>RPE rot ↓</td><td>ATE ↓</td><td>RPE trans ↓</td><td>RPE rot ↓</td></tr><tr><td>DUSt3R [44]</td><td>0.025</td><td>0.013</td><td>2.361</td><td>0.030</td><td>0.025</td><td>2.522</td></tr><tr><td>MASt3R [26]</td><td>0.027</td><td>0.015</td><td>1.910</td><td>0.031</td><td>0.025</td><td>2.478</td></tr><tr><td>MonST3R [52]</td><td>0.021</td><td>0.006</td><td>1.142</td><td>0.025</td><td>0.021</td><td>2.120</td></tr><tr><td>CUT3R [43]</td><td>0.023</td><td>0.016</td><td>0.510</td><td>0.028</td><td>0.033</td><td>2.569</td></tr><tr><td>POMATO</td><td>0.020</td><td>0.010</td><td>0.509</td><td>0.037</td><td>0.016</td><td>1.782</td></tr></table>
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Input Images
|
| 202 |
+
Figure 7. Effectiveness of our pointmap matching head. Without explicitly filtering out the motion area, both pose and geometry estimation will be degraded.
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
3D Reconstruction with our Pointmap Matching.
|
| 208 |
+
|
| 209 |
+
Table 4. Pose estimation. Our method achieves an overall best performance and improves the RPE rot metric significantly.
|
| 210 |
+
|
| 211 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">Bonn [30]</td><td>PointOdyssey [54]</td><td>ADT [31]</td><td>PStudio [20]</td></tr><tr><td>ATE ↓</td><td>RPE trans ↓</td><td>RPE rot ↓</td><td>APD ↑</td><td>APD ↑</td><td>APD ↑</td></tr><tr><td>W/O Head3</td><td>0.040</td><td>0.015</td><td>1.721</td><td>29.10</td><td>29.62</td><td>16.94</td></tr><tr><td>W/ Head3</td><td>0.037</td><td>0.016</td><td>1.782</td><td>32.06</td><td>29.87</td><td>23.10</td></tr></table>
|
| 212 |
+
|
| 213 |
+
Table 5. Ablation study on the effectiveness of the pointmap matching head. The comparisons are reported on the pose estimation and 3D point tracking tasks.
|
| 214 |
+
|
| 215 |
+
cantly improves the RPE-rot metric, surpassing MonST3R by $55.4\%$ and $13.3\%$ on the TUM and Bonn datasets.
|
| 216 |
+
|
| 217 |
+
# 4.5. Ablation Study
|
| 218 |
+
|
| 219 |
+
We conduct extensive ablation studies to evaluate the effectiveness of the temporal motion module and the proposed pointmap matching head. As shown in Table 3, we report results for three models: one trained with only pairwise
|
| 220 |
+
|
| 221 |
+
images (first-stage training), one using a shorter temporal window of 6 frames, and another using the default temporal window length of 12 frames. Incorporating temporal consistency yields substantial improvements across all datasets for video depth estimation and 3D point tracking. Further improvement is achieved when the temporal window length increases from 6 frames to 12 frames. In Table 5, we evaluate the effectiveness of the pointmap matching head. While it introduces only a modest improvement in the ATE metric, we attribute this to the limited motion and minimal viewpoint variation in the indoor evaluation dataset. As illustrated in Fig. 7, under challenging in-the-wild conditions with significant motion and rapid viewpoint changes, removing the pointmap matching head introduces ambiguity in explicit rigid transformation estimation, resulting in a clear degradation in performance. To further demonstrate the impact of the pointmap matching head on 3D point tracking, we conduct tracking experiments over 12 frames using the pairwise input setup. Clearly, removing the pointmap matching head (using only $\mathrm{Head}_2$ ) leads to an inevitable performance drop, emphasizing explicit correspondence modeling for reliable long-term tracking.
|
| 222 |
+
|
| 223 |
+
# 5. Discussion and Conclusion
|
| 224 |
+
|
| 225 |
+
We introduce POMATO, a unified framework for geometry estimation and motion understanding in dynamic scenes. By leveraging the proposed pointmap matching head, our method effectively distinguishes moving regions, thereby mitigating the interference introduced by dynamic objects. The temporal motion module further facilitates the learning of temporal dynamics across frames, enhancing scale consistency and improving performance in tasks where both geometry and matching are critical—most notably, 3D point tracking. The downstream temporal tasks including 3D point tracking, video depth estimation, and 3D reconstruction can be easily applied in a feed-forward manner. In future work, we plan to scale up training with more dynamic reconstruction and matching datasets to further enhance 3D reconstruction and tracking performance.
|
| 226 |
+
|
| 227 |
+
Acknowledgement. This work was supported by the National Natural Science Foundation of China (No. 62206244)
|
| 228 |
+
|
| 229 |
+
# References
|
| 230 |
+
|
| 231 |
+
[1] Daniel Barath, Dmytro Mishkin, Luca Cavalli, Paul-Edouard Sarlin, Petr Hruby, and Marc Pollefeys. Affineglue: Joint matching and robust estimation. arXiv preprint arXiv:2307.15381, 2023. 3
|
| 232 |
+
[2] Andreas Blattmann, Tim Dockhorn, Sumith Kulal, Daniel Mendelevitch, Maciej Kilian, Dominik Lorenz, Yam Levi, Zion English, Vikram Voleti, Adam Letts, Varun Jampani, and Robin Rombach. Stable video diffusion: Scaling latent video diffusion models to large datasets. abs/2311.15127, 2023. 3
|
| 233 |
+
[3] Aleksei Bochkovskii, Amael Delaunoy, Hugo Germain, Marcel Santos, Yichao Zhou, Stephan R. Richter, and Vladlen Koltun. Depth pro: Sharp monocular metric depth in less than a second. arXiv, 2024. 3
|
| 234 |
+
[4] Daniel J. Butler, Jonas Wulff, Garrett B. Stanley, and Michael J. Black. A naturalistic open source movie for optical flow evaluation. In ECCV, pages 611-625, 2012. 6, 8
|
| 235 |
+
[5] Ang Cao and Justin Johnson. Hexplane: A fast representation for dynamic scenes. CVPR, 2023. 2
|
| 236 |
+
[6] Sili Chen, Hengkai Guo, Shengnan Zhu, Feihu Zhang, Zi long Huang, Jiashi Feng, and Bingyi Kang. Video depth anything: Consistent depth estimation for super-long videos. arXiv preprint arXiv:2501.12375, 2025. 4
|
| 237 |
+
[7] Yu Chen, Yisong Chen, and Guoping Wang. Bundle adjustment revisited. arXiv preprint arXiv: 1912.03858, 2019. 3
|
| 238 |
+
[8] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 224-236, 2018. 3
|
| 239 |
+
[9] Carl Doersch, Yi Yang, Mel Vecerik, Dilara Gokay, Ankush Gupta, Yusuf Aytar, Joao Carreira, and Andrew Zisserman. Tapir: Tracking any point with per-frame initialization and temporal refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10061-10072, 2023. 3
|
| 240 |
+
[10] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. Carla: An open urban driving simulator. In Conference on robot learning, pages 1-16. PMLR, 2017. 6, 2
|
| 241 |
+
[11] Xiao Fu, Wei Yin, Mu Hu, Kaixuan Wang, Yuexin Ma, Ping Tan, Shaojie Shen, Dahua Lin, and Xiaoxiao Long. Geowizard: Unleashing the diffusion priors for 3d geometry estimation from a single image. arXiv preprint arXiv: 2403.12013, 2024. 3
|
| 242 |
+
[12] Yasutaka Furukawa, Carlos Hernández, et al. Multi-view stereo: A tutorial. Foundations and Trends® in Computer Graphics and Vision, 9(1-2):1-148, 2015. 3
|
| 243 |
+
[13] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In CVPR, pages 3354-3361, 2012. 6, 8
|
| 244 |
+
[14] Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff:Animate your personalized text-to-image diffusion models without specific tuning, 2023.6
|
| 245 |
+
|
| 246 |
+
[15] Adam W. Harley, Zhaoyuan Fang, and Katerina Fragkiadaki. Particle video revisited: Tracking through occlusions using point trajectories. In ECCV, pages 59-75. Springer, 2022. 3
|
| 247 |
+
[16] Mu Hu, Wei Yin, Chi Zhang, Zhipeng Cai, Xiaoxiao Long, Hao Chen, Kaixuan Wang, Gang Yu, Chunhua Shen, and Shaojie Shen. Metric3d v2: A versatile monocular geometric foundation model for zero-shot metric depth and surface normal estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2, 3
|
| 248 |
+
[17] Wenbo Hu, Xiangjun Gao, Xiaoyu Li, Sijie Zhao, Xiaodong Cun, Yong Zhang, Long Quan, and Ying Shan. Depthcrafter: Generating consistent long depth sequences for open-world videos. In CVPR, 2025. 3
|
| 249 |
+
[18] Mustafa Işık, Martin Rünz, Markos Georgopoulos, Taras Khakhulin, Jonathan Starck, Lourdes Agapito, and Matthias Nießner. Humanrf: High-fidelity neural radiance fields for humans in motion. ACM Transactions on Graphics (TOG), 42(4):1-12, 2023. 3
|
| 250 |
+
[19] Muhammad Zubair Irshad, Mauro Comi, Yen-Chen Lin, Nick Heppert, Abhinav Valada, Rares Ambrus, Zsolt Kira, and Jonathan Tremblay. Neural fields in robotics: A survey. arXiv preprint arXiv: 2410.20220, 2024. 2
|
| 251 |
+
[20] Hanbyul Joo, Tomas Simon, Xulong Li, Hao Liu, Lei Tan, Lin Gui, Sean Banerjee, Timothy Scott Godisart, Bart Nabbe, Iain Matthews, Takeo Kanade, Shohei Nobuhara, and Yaser Sheikh. Panoptic studio: A massively multiview system for social interaction capture. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2017. 7, 8
|
| 252 |
+
[21] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Dynamic stereo: Consistent dynamic depth from stereo videos. CVPR, 2023. 6, 2
|
| 253 |
+
[22] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Co-tracker: It is better to track together. In Proc. ECCV, 2024. 3
|
| 254 |
+
[23] Bingxin Ke, Anton Obukhov, Shengyu Huang, Nando Metzger, Rodrigo Caye Daudt, and Konrad Schindler. Repurposing diffusion-based image generators for monocular depth estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3
|
| 255 |
+
[24] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM TOG, 42(4):139-1, 2023. 3
|
| 256 |
+
[25] Skanda Koppula, Ignacio Rocco, Yi Yang, Joe Heyward, João Carreira, Andrew Zisserman, Gabriel Brostow, and Carl Doersch. Tapvid-3d: A benchmark for tracking any point in 3d. arXiv preprint arXiv: 2407.05921, 2024. 7
|
| 257 |
+
[26] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. European Conference on Computer Vision, 2024. 2, 6, 7, 8
|
| 258 |
+
[27] Philipp Lindenberger, Paul-Edouard Sarlin, and Marc Pollefeys. Lightglue: Local feature matching at light speed. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17627-17638, 2023. 3
|
| 259 |
+
[28] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf:
|
| 260 |
+
|
| 261 |
+
Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 3
|
| 262 |
+
[29] Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, and Marc Szafraniec et al. DINOv2: Learning robust visual features without supervision. Trans. Mach. Learn. Research, 2024. 3
|
| 263 |
+
[30] Emanuele Palazzolo, Jens Behley, Philipp Lottes, Philippe Giguere, and Cyril Stachniss. Refusion: 3d reconstruction in dynamic environments for rgb-d cameras exploiting residuals. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 7855-7862. IEEE, 2019. 6, 7, 8
|
| 264 |
+
[31] Xiaqing Pan, Nicholas Charron, Yongqian Yang, Scott Peters, Thomas Whelan, Chen Kong, Omkar Parkhi, Richard Newcombe, and Carl Yuheng Ren. Aria digital twin: A new benchmark dataset for egocentric 3d machine perception. arXiv preprint arXiv: 2306.06362, 2023. 7, 8
|
| 265 |
+
[32] Rene Ranftl, Vibhav Vineet, Qifeng Chen, and Vladlen Koltun. Dense monocular depth estimation in complex dynamic scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4058-4066, 2016. 3
|
| 266 |
+
[33] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. In Proceedings of the IEEE/CVF international conference on computer vision, pages 12179-12188, 2021. 5
|
| 267 |
+
[34] Chris Russell, Rui Yu, and Lourdes Agapito. Video popuup: Monocular 3d reconstruction of dynamic scenes. In European conference on computer vision, pages 583-598. Springer, 2014. 3
|
| 268 |
+
[35] Peter Sand and Seth Teller. Particle video: Long-range motion estimation using point trajectories. International journal of computer vision, 80:72-91, 2008. 3
|
| 269 |
+
[36] Jiahao Shao, Yuanbo Yang, Hongyu Zhou, Youmin Zhang, Yujun Shen, Matteo Poggi, and Yiyi Liao. Learning temporally consistent video depth from video diffusion priors. abs/2406.01493, 2024. 3
|
| 270 |
+
[37] Jürgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of RGB-D SLAM systems. pages 573-580, 2012. 7, 8
|
| 271 |
+
[38] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part II 16, pages 402–419. Springer, 2020. 3
|
| 272 |
+
[39] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Neural Information Processing Systems, 2021. 2
|
| 273 |
+
[40] Basile Van Hoorick, Rundi Wu, Ege Ozguroglu, Kyle Sargent, Ruoshi Liu, Pavel Tokmakov, Achal Dave, Changxi Zheng, and Carl Vondrick. Generative camera dolly: Extreme monocular dynamic novel view synthesis. arXiv preprint arXiv:2405.14868, 2024. 6, 2
|
| 274 |
+
[41] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024. 2, 6
|
| 275 |
+
|
| 276 |
+
[42] Qianqian Wang, Vickie Ye, Hang Gao, Jake Austin, Zhengqi Li, and Angjoo Kanazawa. Shape of motion: 4d reconstruction from a single video. arXiv preprint arXiv:2407.13764, 2024. 2, 3
|
| 277 |
+
[43] Qianqian Wang, Yifei Zhang, Aleksander Holynski, Alexei A. Efros, and Angjoo Kanazawa. Continuous 3d perception model with persistent state, 2025. 2, 6, 7, 8
|
| 278 |
+
[44] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. DUSt3R: Geometric 3D vision made easy. In CVPR, pages 20697-20709, 2024. 2, 3, 4, 6, 7, 8
|
| 279 |
+
[45] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian Scherer. TartanAir: A dataset to push the limits of visual SLAM. pages 4909-4916, 2020. 6, 2
|
| 280 |
+
[46] Yihan Wang, Lahav Lipson, and Jia Deng. SEA-RAFT: Simple, efficient, accurate RAFT for optical flow. In ECCV, 2024. 1
|
| 281 |
+
[47] Yuxi Xiao, Qianqian Wang, Shangzhan Zhang, Nan Xue, Sida Peng, Yujun Shen, and Xiaowei Zhou. Spatialtracker: Tracking any 2d pixels in 3d space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2024. 2, 3, 7
|
| 282 |
+
[48] Guangkai Xu, Yongtao Ge, Mingyu Liu, Chengxiang Fan, Kangyang Xie, Zhiyue Zhao, Hao Chen, and Chunhua Shen. Diffusion models trained with large data are transferable visual models. arXiv preprint arXiv: 2403.06090, 2024. 3
|
| 283 |
+
[49] Yueming Xu, Haochen Jiang, Zhongyang Xiao, Jianfeng Feng, and Li Zhang. Dg-slam: Robust dynamic gaussian splatting slam with hybrid pose optimization. arXiv preprint arXiv: 2411.08373, 2024. 2
|
| 284 |
+
[50] Honghui Yang, Di Huang, Wei Yin, Chunhua Shen, Haifeng Liu, Xiaofei He, Binbin Lin, Wanli Ouyang, and Tong He. Depth any video with scalable synthetic data. arXiv preprint arXiv:2410.10815, 2024. 3
|
| 285 |
+
[51] Lihe Yang, Bingyi Kang, Zilong Huang, Zhen Zhao, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything v2. arXiv:2406.09414, 2024. 2, 3
|
| 286 |
+
[52] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. arXiv preprint arxiv:2410.03825, 2024. 2, 3, 6, 7, 8, 1
|
| 287 |
+
[53] Guosheng Zhao, Chaojun Ni, Xiaofeng Wang, Zheng Zhu, Xueyang Zhang, Yida Wang, Guan Huang, Xinze Chen, Boyuan Wang, Youyi Zhang, Wenjun Mei, and Xingang Wang. Drivedreamer4d: World models are effective data machines for 4d driving scene representation. 2024. 2
|
| 288 |
+
[54] Yang Zheng, Adam W. Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J. Guibas. PointOdyssey: A large-scale synthetic dataset for long-term point tracking. In ICCV, 2023. 6, 7, 8, 2
|
| 289 |
+
|
| 290 |
+
# POMATO: Marrying Pointmap Matching with Temporal Motions for Dynamic 3D Reconstruction
|
| 291 |
+
|
| 292 |
+
Supplementary Material
|
| 293 |
+
|
| 294 |
+
# A. Pointmap Matching for Global Alignment.
|
| 295 |
+
|
| 296 |
+
Given a sequence of video frames, the target of global alignment is to project all pairwise estimated pointmaps to the same global world coordinates. DUSt3R constructs a connectivity pairwise graph and aims to minimize the reprojection error for each image pair globally where the dynamic regions are supposed to be separated from the static regions. To this end, MonST3R [52] further introduces an assistant optical flow network [46] to help mask the dynamic regions and provide a pseudo label of 2D matching for minimizing the re-projection error in static regions. However, the introduced assistant model will introduce inevitable domain gaps and additional computation costs. Besides, the optical flow model is tailored for matching within two adjacent frames, suffering an obvious degeneration with the large view displacement. In POMATO, for an image pair $\{\mathbf{I}^i,\mathbf{I}^j\}$ , the dynamic mask $\mathbf{D}^{j,i}$ is calculated by comparing the difference between $\mathbf{X}^{j,i}$ and $\mathbf{X}_m^{j,i}$ :
|
| 297 |
+
|
| 298 |
+
$$
|
| 299 |
+
\mathbf {D} ^ {j, i} = \left| \left| \mathbf {X} _ {m} ^ {j, i} - \mathbf {X} ^ {j, i} \right| \right| > \alpha , \tag {9}
|
| 300 |
+
$$
|
| 301 |
+
|
| 302 |
+
where $\alpha$ is a dynamic threshold defined as $3 \times$ median $(\|\mathbf{X}_m^{j,i} - \mathbf{X}^{j,i}\|)$ .
|
| 303 |
+
|
| 304 |
+
Given the updated camera intrinsic $\tilde{K}$ after an iteration of optimization, the target matching 2D coordinates $\mathbf{F}_m^{j,i} \in \mathbb{R}^{H \times W \times 2}$ can be calculated as $\mathbf{F}_m^{j,i} = p(\tilde{\mathbf{K}}\mathbf{X}_m^{j,i})$ where $p$ is a mapping from 3D camera coordinates to 2D pixel coordinates. The optical flow loss proposed in MonST3R can thus be modified with our dynamic mask and 2D matching coordinates. Details about the optical flow loss are referred to MonST3R [52].
|
| 305 |
+
|
| 306 |
+
# B. Fast 3D Reconstruction with video PO-MATO
|
| 307 |
+
|
| 308 |
+
Given a sequence of images less than the temporal window length of 12 frames, dynamic 3D reconstruction can be obtained by directly estimating the pointmaps of all reference images to the coordinate of the key frame as discussed in the Sec.3.4. Here, we provide more visualization results of this feed-forward manner and demonstrate the effectiveness of introducing the temporal motion module. As shown in Fig.8, directly applying the pairwise reconstruction will suffer from an obvious scale shift among different frames. After the temporal motion module, the consistency within the video sequence obtains an obvious enhancement.
|
| 309 |
+
|
| 310 |
+
# C. Training Data Details
|
| 311 |
+
|
| 312 |
+
The details about the training datasets can be found in Tab.6. The finetuning procedure of POMATO was conducted exclusively using synthetic training datasets.
|
| 313 |
+
|
| 314 |
+
# D. More Visualizations on Dynamic Scenes
|
| 315 |
+
|
| 316 |
+
We provide more visualizations in Fig. 9 and Fig. 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory.
|
| 317 |
+
|
| 318 |
+
<table><tr><td>Dataset</td><td>Domain</td><td>Scene Type</td><td># of Frames</td><td># of Scenes</td><td>Dynamics</td><td>Ratio</td></tr><tr><td>PointOdyssey [54]</td><td>Synthetic</td><td>Indoors & Outdoors</td><td>200k</td><td>131</td><td>Realistic</td><td>57.1%</td></tr><tr><td>TartanAir [45]</td><td>Synthetic</td><td>Indoors & Outdoors</td><td>100k</td><td>163</td><td>None</td><td>14.3%</td></tr><tr><td>DynamicReplica [21]</td><td>Synthetic</td><td>Indoors</td><td>145k</td><td>524</td><td>Realistic</td><td>14.3%</td></tr><tr><td>ParallelDomain4D [40]</td><td>Synthetic</td><td>Outdoors</td><td>750k</td><td>15015</td><td>Driving</td><td>8.6%</td></tr><tr><td>Carla [10]</td><td>Synthetic</td><td>Outdoors</td><td>7k</td><td>5</td><td>Driving</td><td>5.7%</td></tr></table>
|
| 319 |
+
|
| 320 |
+
Table 6. An overview of all training datasets and sample ratio. All datasets provide both camera pose, depth, and most of them include dynamic objects.
|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
Figure 8. Fast 3D reconstruction with our temporal motion module. Given a sequence of images less than temporal window length, our POMATO can directly obtain a global pointmap under the key frame coordinate.
|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
Figure 9. Compared with MonST3R, our POMATO can provide more complete dynamic masks and consistent geometry.
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
Figure 10. MonST3R suffers obvious degeneration when the view displacement is large as reflected by the erroneous pose estimation while POMATO can still provide a consistent camera trajectory.
|
data/2025/2504_05xxx/2504.05692/images/133a5be75fe9852f50d9dc75a3e28f6a3f8ed363159786f123db3e08abb2c86a.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/1553dd9fea27bacf18cff9e88695799cf7d92dd8244a549641fdbd79c3b38df9.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/17b51d4a6a4dec36c51e858d07bf3a17155c0a4b6326adbd345b3eed1d1b4ef0.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/2038d457d38fd0c221b12bdad5e2a377292a6a5391edc6521c9a1a933405292f.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/22f3ec6d130b32d236544f7a36f8d34c068ee43491968d8b8252afd707b98ed1.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/2725a90e3c969c688b8b4aece0822e6dc29099b7a2ee22c8e885d7c14e48523e.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/33c6bad035555d5e4e9fdaf94cf6b764db4cf1f575afef326383c07bccb07da0.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/48af71f3300680b8034c43f62dca0f4717a244f56bdddfab423db451007292d7.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/48fc00ed0ccff1357aa20e08011e8b828d049473515dabed56bb834c4997282a.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/5ae28c25f828dac5cf5304f158000a3d4b15aaa5c22767f9cce24e599bcd63ea.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/61e6296f3d781a21567857de4b2ea166d7a6b53edff37bc595f168c4d288e0ee.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/6a30322c438fc8178fdadc08700fa2c23601ca083bfd8c9601b0c19f2ade291e.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/77c2053e0882f10da0a9b7e380f417e6d9b57a1125876608e85426222f0b0d45.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/7e0a527c5d772d5170207369443f4492fcc8f881d8ee08801d58a55b0d28fec9.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/7ef319305d817284a367ded55f397489a47ecfddf523efa5658896a2d17354bf.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/80719c660669e2d285012f6663ec105a6137bf97e42c4107de2e9c454bfec2f8.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/80953ea0dc5ca9ee59f95c9853c3873761fadebe7ab58f819e72258d3f71d946.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/8239ab20be4562800b2c68569d3d5ab2c42bca1f03dcdbe48b9eabdda64d41a0.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/865c5f7f18ca194c59889f3437641b2c340f0dae47f7ba5904d226f77a02757d.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/98a876741c25930c1c2f4bfce1d1c178379109f82b67d15153cad3df741cec13.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/a00bf8bcacaf47797f494012a320e2a40fdd47220aa6b3eb0c87cdcafa2a834a.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_05xxx/2504.05692/images/a14c594c62cedd5e32d97e3ea1a176b458e13f8f8cdad1bfc378e63d14129e06.jpg
ADDED
|
Git LFS Details
|