Chelsea707 commited on
Commit
d5f0c5f
·
verified ·
1 Parent(s): 6bd46a1

MinerU Batch 2b42d067-1025-45f5-9390-f481a398959a (Part 1/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_content_list.json +0 -0
  3. data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_model.json +0 -0
  4. data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_origin.pdf +3 -0
  5. data/2025/2503_12xxx/2503.12933/full.md +478 -0
  6. data/2025/2503_12xxx/2503.12933/images/061f389c91dff21d715784e13d2bb92b0ce1df9f862751e39e580c598af0aa23.jpg +3 -0
  7. data/2025/2503_12xxx/2503.12933/images/11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg +3 -0
  8. data/2025/2503_12xxx/2503.12933/images/1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg +3 -0
  9. data/2025/2503_12xxx/2503.12933/images/2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg +3 -0
  10. data/2025/2503_12xxx/2503.12933/images/2f232bd4a97c3fdf7f35dadee020a4b313e917af6e86f761c541d5c89d75b5f5.jpg +3 -0
  11. data/2025/2503_12xxx/2503.12933/images/6410f07cb949eca3b22ee97fdcbbad013a03f757c41dd81b3492a8c8d7258e5a.jpg +3 -0
  12. data/2025/2503_12xxx/2503.12933/images/7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg +3 -0
  13. data/2025/2503_12xxx/2503.12933/images/8638bc94a87df3220c56eaf20424b1d9b0ac79c22a95c4d941e476120acb7b7a.jpg +3 -0
  14. data/2025/2503_12xxx/2503.12933/images/afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg +3 -0
  15. data/2025/2503_12xxx/2503.12933/images/b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg +3 -0
  16. data/2025/2503_12xxx/2503.12933/images/b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg +3 -0
  17. data/2025/2503_12xxx/2503.12933/images/bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg +3 -0
  18. data/2025/2503_12xxx/2503.12933/images/ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg +3 -0
  19. data/2025/2503_12xxx/2503.12933/images/f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg +3 -0
  20. data/2025/2503_12xxx/2503.12933/images/f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg +3 -0
  21. data/2025/2503_12xxx/2503.12933/images/f63e1c9d8dc06359caf5a4b12930ff0b0d5eaf774c06aa36bff6f66e42c719c4.jpg +3 -0
  22. data/2025/2503_12xxx/2503.12933/layout.json +0 -0
  23. data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_content_list.json +1741 -0
  24. data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_model.json +0 -0
  25. data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_origin.pdf +3 -0
  26. data/2025/2503_12xxx/2503.12937/full.md +380 -0
  27. data/2025/2503_12xxx/2503.12937/images/08a395e3dfac9af5c8b9b4bcea8c772a6dd987bb1bf0054d2e9907a99f461acd.jpg +3 -0
  28. data/2025/2503_12xxx/2503.12937/images/1558e8c3b9e8da8ef6634dfd8880bbdd56fd4f2cc1fe7590b5502e41bf971d92.jpg +3 -0
  29. data/2025/2503_12xxx/2503.12937/images/30454a921be72743b7012a32c0b65488666b790c3263f8df6ab07a0349fea014.jpg +3 -0
  30. data/2025/2503_12xxx/2503.12937/images/31ea3d8e55752229dfade1dd6643121292be3f7cec00e13905b27dd70e12926c.jpg +3 -0
  31. data/2025/2503_12xxx/2503.12937/images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg +3 -0
  32. data/2025/2503_12xxx/2503.12937/images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg +3 -0
  33. data/2025/2503_12xxx/2503.12937/images/5606d2223621250bdaa6c74f2d34a58e7f44e544758016ac5a16d0b60f1acd4f.jpg +3 -0
  34. data/2025/2503_12xxx/2503.12937/images/6b3c67a4c50ce11940655a5fb86d1d6562af7aedeea159567fe508f24e38ba79.jpg +3 -0
  35. data/2025/2503_12xxx/2503.12937/images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg +3 -0
  36. data/2025/2503_12xxx/2503.12937/images/89c21106dadd9e892de897a3997bdb6531f3aa0bde3862bde14835d4ccdfd1d5.jpg +3 -0
  37. data/2025/2503_12xxx/2503.12937/images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg +3 -0
  38. data/2025/2503_12xxx/2503.12937/images/c4dcc9464dd8f5bae7a084695aadece73c6c64879dbae0c0fa3ed1632dd6f628.jpg +3 -0
  39. data/2025/2503_12xxx/2503.12937/images/cc8691112c299eff8cc7beb85c16c3122f1c04b330a8141f6be8b51d0884c159.jpg +3 -0
  40. data/2025/2503_12xxx/2503.12937/images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg +3 -0
  41. data/2025/2503_12xxx/2503.12937/images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg +3 -0
  42. data/2025/2503_12xxx/2503.12937/images/f8c2153c05b6d636fd93e2d9701b86feb20c9c13112ae8afc8e353930bce0932.jpg +3 -0
  43. data/2025/2503_12xxx/2503.12937/layout.json +0 -0
  44. data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_content_list.json +877 -0
  45. data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_model.json +1004 -0
  46. data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_origin.pdf +3 -0
  47. data/2025/2503_12xxx/2503.12952/full.md +165 -0
  48. data/2025/2503_12xxx/2503.12952/images/9c0eacd157e54f2d5f632c6c7f77df6b42ee84b9d3b991b5c9429c568a4961ff.jpg +3 -0
  49. data/2025/2503_12xxx/2503.12952/images/bd69976a57eb5e31707407c28643d178dd46223bf5a751e795ecb0e0d3d78495.jpg +3 -0
  50. data/2025/2503_12xxx/2503.12952/images/cd5e919056af76267e409d8fb1057479a9f46d5fb8d12f6435f7d32ac2f38b8c.jpg +3 -0
.gitattributes CHANGED
@@ -1656,3 +1656,11 @@ data/2025/2503_13xxx/2503.13861/24ebc1ba-af7b-4d3e-a5d9-ba11158e223d_origin.pdf
1656
  data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_origin.pdf filter=lfs diff=lfs merge=lfs -text
1657
  data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_origin.pdf filter=lfs diff=lfs merge=lfs -text
1658
  data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1656
  data/2025/2503_13xxx/2503.13881/f8ea68e7-ed15-4c1e-a85c-7872bf8b0c7c_origin.pdf filter=lfs diff=lfs merge=lfs -text
1657
  data/2025/2503_13xxx/2503.13933/abcf6c14-6474-4c8a-adec-45f736d3be15_origin.pdf filter=lfs diff=lfs merge=lfs -text
1658
  data/2025/2503_14xxx/2503.14350/f51fef62-e3ca-4f33-b47c-8b3a779fe535_origin.pdf filter=lfs diff=lfs merge=lfs -text
1659
+ data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_origin.pdf filter=lfs diff=lfs merge=lfs -text
1660
+ data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_origin.pdf filter=lfs diff=lfs merge=lfs -text
1661
+ data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_origin.pdf filter=lfs diff=lfs merge=lfs -text
1662
+ data/2025/2503_12xxx/2503.12972/66f6c6f6-89fc-4fc3-8936-54da38b8d574_origin.pdf filter=lfs diff=lfs merge=lfs -text
1663
+ data/2025/2503_13xxx/2503.13107/91b69939-46cd-4d75-af9c-6d9f6dccd58b_origin.pdf filter=lfs diff=lfs merge=lfs -text
1664
+ data/2025/2503_13xxx/2503.13139/d822d65e-325f-4d59-9422-a1e3da2df18a_origin.pdf filter=lfs diff=lfs merge=lfs -text
1665
+ data/2025/2503_13xxx/2503.13195/86c7a688-40c1-4477-ae2d-59118e3bfc36_origin.pdf filter=lfs diff=lfs merge=lfs -text
1666
+ data/2025/2506_12xxx/2506.12103/2704209e-62a6-4e6a-a67a-5d895054a41f_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_12xxx/2503.12933/52570aa5-5dd9-4c45-a08c-c1948885d88d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:652712b520275a0f7ffc619f56020cbc7f5e570f45a5a65bd0dd4384d5e22141
3
+ size 2787992
data/2025/2503_12xxx/2503.12933/full.md ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Empath-D: VR-based Empathetic App Design for Accessibility
2
+
3
+ Wonjung Kim\* wjkim@nclab.kaist.ac.kr KAIST
4
+
5
+ Kenny Tsu Wei Choo kenny.choo.2012@smu.edu.sg Singapore Management University
6
+
7
+ Youngki Lee
8
+ youngkilee@smu.edu.sg
9
+ Singapore Management University
10
+
11
+ Archan Misra
12
+
13
+ archanm@smu.edu.sg
14
+
15
+ Singapore Management University
16
+
17
+ Rajesh Krishna Balan
18
+
19
+ rajesh@smu.edu.sg
20
+
21
+ Singapore Management University
22
+
23
+ # ABSTRACT
24
+
25
+ With app-based interaction increasingly permeating all aspects of daily living, it is essential to ensure that apps are designed to be inclusive and are usable by a wider audience such as the elderly, with various impairments (e.g., visual, audio and motor). We propose Empath-D, a system that fosters empathetic design, by allowing app designers, in-situ, to rapidly evaluate the usability of their apps, from the perspective of impaired users. To provide a truly authentic experience, Empath-D carefully orchestrates the interaction between a smartphone and a VR device, allowing the user to experience simulated impairments in a virtual world while interacting naturally with the app, using a real smartphone. By carefully orchestrating the VR-smarphone interaction, Empath-D tackles challenges such as preserving low-latency app interaction, accurate visualization of hand movement and low-overhead perturbation of I/O streams. Experimental results show that user interaction with Empath-D is comparable (both in accuracy and user perception) to real-world app usage, and that it can simulate impairment effects as effectively as a custom hardware simulator.
26
+
27
+ # CCS CONCEPTS
28
+
29
+ - Human-centered computing $\rightarrow$ Systems and tools for interaction design; Ubiquitous and mobile computing systems and tools; Accessibility design and evaluation methods; Accessibility systems and tools; Ubiquitous and mobile computing design and evaluation methods;
30
+
31
+ # KEYWORDS
32
+
33
+ empathetic design; accessibility; mobile design; virtual reality; multi-device, distributed user interfaces
34
+
35
+ # ACM Reference Format:
36
+
37
+ Wonjung Kim, Kenny Tsu Wei Choo, Youngki Lee, Archan Misra, and Rajesh Krishna Balan. 2018. Empath-D: VR-based Empathetic App Design for Accessibility. In MobiSys '18: The 16th Annual International Conference on Mobile
38
+
39
+ *This work was done while the author was on an internship at Singapore Management University
40
+
41
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
42
+
43
+ MobiSys '18, June 10-15, 2018, Munich, Germany
44
+
45
+ $\odot$ 2018 Association for Computing Machinery.
46
+
47
+ ACM ISBN 978-1-4503-5720-3/18/06...$15.00
48
+
49
+ https://doi.org/10.1145/3210240.3210331
50
+
51
+ ![](images/afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg)
52
+ Figure 1: Overview of Empath-D
53
+
54
+ Systems, Applications, and Services, June 10-15, 2018, Munich, Germany. ACM, New York, NY, USA, 13 pages. https://doi.org/10.1145/3210240.3210331
55
+
56
+ # 1 INTRODUCTION
57
+
58
+ Digital interactions have become increasingly commonplace and immersive. We now constantly interact with our personal devices and computing-enhanced ambient objects (such as coffeemakers, home automation systems and digital directories), while engaging in everyday activities, such as commuting, shopping or exercising. Given the ubiquity of such interactions, it is important to ensure that the associated computing interfaces remain accessible to segments of the population, such as the elderly, who suffer from various impairments. The global elderly population is projected to reach $16.7\%$ by 2050 [33], and such users suffer disproportionately from impairments (e.g., vision) that hinder accessibility.
59
+
60
+ To support more accessible design, our earlier work [11] introduced the vision of Empath-D, which uses a virtual reality (VR) device to provide mobile application/object designers with a realistic emulation of the interaction experience that impaired users would encounter. In this work, we present the design, implementation and validation of the Empath-D system inspired by this vision. Empath-D's goal is to allow unimpaired application designers to step into the shoes of impaired users and rapidly evaluate the usability of alternative prototypes. While we shall principally focus on empathetic evaluation of mobile applications (apps), Empath-D's design is generic enough to permit emulation of other real-world interactions-e.g., how an elderly user with cataracts and hearing loss would experience a traffic-light controlled pedestrian intersection.
61
+
62
+ Empath-D's $^1$ key idea is to present the user with an impairment-augmented view of the smartphone interface (or other digital objects) in a virtual world, while allowing the non-impaired user to perform natural interactions, using a physical smartphone, with a real-world instance of the smartphone app. At a high-level, Empath-D works as follows (see Figure 1): The (unimpaired) user uses a physical smartphone to perform real-world interactions (such as scrolls, taps or gestures) with the app, while wearing a VR device. The results of such interactions are projected instantaneously through the I/O interfaces (e.g., screen, speaker) of a 'virtual smartphone' visible in the VR display, but only after those I/O streams have been appropriately degraded by the specified impairment. For example, in Figure 1, the virtual phone's display (and the world view) has been appropriately vignetted, to mimic the experience of a user suffering from glaucoma.
63
+
64
+ Key Challenges: To mimic impairments with adequate fidelity and usability, Empath-D must support the following features:
65
+
66
+ - Fast, Accurate Multi-device Operation: Empath-D utilizes a split-interaction paradigm: a user interacts with an app using a real-world handheld smartphone, while perceiving (viewing, hearing) the app responses through the VR interface. To faithfully replicate the real-world experience, this split-mode interaction must have tight time coupling and visual fidelity (of the virtual phone's screen), comparable to direct interactions with a standalone smartphone.
67
+ - Real-time Tracking: To preserve a user's perception of naturalistic interactions, Empath-D must not only capture explicit phone events, but also mirrors physical actions taken by the user (e.g., swinging the phone around or having one's hand hover over the phone). Thus, Empath-D must also track and render, in real-time, the orientation/location of both the phone and the user's hand within the VR device's field-of-view.
68
+ - Lightweight Impairment Execution: To preserve the feel of natural interaction, Empath-D must insert the impairment-specific perturbations into the input/output streams with imperceptible latency or computational overhead (e.g., no reduction in video frame rate).
69
+
70
+ Key Contributions: We make the following major contributions:
71
+
72
+ - 3-Tier Virtualisation Model: We design a novel 3-tier architecture where (i) the real-world smartphone serves merely as a tracker, forwarding user interaction events (e.g., screen touch and gestures) to a computationally powerful intermediary, after which (ii) the intermediary device perturbs those events by blending in specific input impairments (e.g., hand tremors) and passes them to an app instance running on a smartphone emulator, and finally (iii) the VR device receives the redirected outputs from this app instance and renders an appropriately-impaired (by blending in the output impairments) virtual world, including a virtual smartphone.
73
+ Real-time Hand and Phone Tracking: We use an RGB-Depth camera, mounted on the head-worn VR device, to track the outline of a user's hand, and subsequently perform a lightweight but realistic 3-D rendering of the hand on the VR
74
+
75
+ display. We also use fiducial marker tracking [14] by the camera to track the position/orientation of the real-world smartphone. We demonstrate our ability to achieve both high-fidelity (pointing error $\leq 5\,mm$ ) and low-latency (end-to-end delays below 120 msec) hand tracking and display.
76
+
77
+ - Usability of Virtualized Phone, in Use Environments: We show that Empath-D is not just usable, but that user performance (absent impairments) using Empath-D's virtual smartphone is equivalent to real-world interaction with a smartphone. In addition, we allow usability testing of apps in their use environments, a key enabler for design of mobile applications which may be used anywhere. Our Samsung Gear VR-based prototype has end-to-end latency low-enough (only 96.3 msec of latency, excluding the mobile app emulation) to permit faithful reproduction of direct smartphone usage.
78
+ - Validation of Impairment Fidelity and Overall System: We implement two distinct vision (glaucoma & cataract) and one audio (high-frequency hearing loss) impairment in our Empath-D prototype. We then conduct a set of studies using the vision impairments, where 12 participants perform a series of standardised activities (e.g., add an alarm), using both our Empath-D prototype (test) and a commercial hardware vision impairment simulator (control) and establish that the performance of users is equivalent across the test and control groups. Finally, we conduct a small-scale study to provide preliminary evidence that our empathetic approach allows developers to design accessible mobile UIs faster and better.
79
+
80
+ # 2 THE EMPATH-D VISION
81
+
82
+ We use an example to illustrate the use of Empath-D:
83
+
84
+ Designing for Visual Impairment. Alice is designing a mobile app that automatically magnifies text from real environments seen through its rear camera to aid people who suffer from cataracts (a condition that dims and blurs vision). Alice starts Empath-D and is presented with a web interface that allows her to customise impairments (e.g., specify the intensity of visual blur). After customising the environment, Alice clicks in the Empath-D web interface to (1) compile the environment to her phone used for VR display (VR-phone)<sup>2</sup> and (2) connect an input/output service to a separate phone (IO-phone). She then plugs the VR-phone into the VR headset.
85
+
86
+ Alice then compiles her Android app, and runs it in the Android emulator. She puts on the VR headset and holds the IO-phone in her hands. A virtual smartphone (Virt-phone) shows up in VR, tracking the real-world motion of the IO-phone. Alice now navigates through the virtual world, experiencing it as an "impaired user, with cataracts". She holds up IO-phone on a street corner (in the real world), and notices that the magnified text (as seen in the virtual phone in the virtual world) is not clear enough to be legible to a cataract-impaired user. She can now iteratively and rapidly modify her app, recompile it, and execute it in the Android emulator, until she is satisfied with the output. This scenario demonstrates the ease-of-use for Empath-D, with no need for special instrumentation of the app.
87
+
88
+ # 3 SYSTEM OVERVIEW
89
+
90
+ # 3.1 Design Goals and Implications
91
+
92
+ Empath-D has the following key goals, which directly influence the salient implementation choices.
93
+
94
+ - Holistic emulation of impairments: For a truly empathetic experience, the app designer must perceive the effects of impairments not just while using the mobile app, but throughout her immersion in the virtual world. Consider a user, suffering from cataract, who is interacting with her smartphone while attending a dimly dit dinner gathering. Simply blurring the phone display, while leaving the background illumination and focus unchanged, might not replicate challenges in visual contrast that an impaired user would face in reality. This requirement precludes the straightforward use of I/O redirection techniques such as Rio [8], which can potentially perturb the I/O streams of only the mobile device. Instead, the impairment must be applied holistically, to the entire virtual world.
95
+ - Realistic emulation of smartphone and mobile apps in the virtual world: Empath-D aims at realistically emulating mobile apps within the virtual world rendered by a commodity VR headset. Realistic emulation of mobile apps imposes two requirements. (a) First, the virtual smartphone should have sufficient visual resolution, corresponding to typical usage where the smartphone is held $\approx 30\mathrm{cm}$ away from the eye. We shall see (in Section 6.3) that this requirement, coupled with differences in display resolutions between smartphones and VR devices, requires careful magnification of the virtual smartphone to provide legibility without hampering usage fidelity. (b) Second, the user should not perceive any lag between her user input and the rendered view of the app, seen through the VR device. Quantitatively, we thus require that the task completion time, experienced by a user interacting with the emulated application in the virtual world, should be comparable to real-world app usage on a real smartphone.
96
+ - Use of unmodified app For easy and low-overhead adoption by app designers, Empath-D should support the emulation of mobile applications using the original, unmodified binaries (e.g., .apk for Android). Empath-D's requirement to support empathetic emulation without app modifications implies that app designers would be able to adopt Empath-D with minimal impact to existing development practices.
97
+ - Low-latency, accurate finger tracking: This goal is an extension of the holistic emulation objective. In the real-world, users utilise instantaneous visual feedback and proprioception to move their fingers around the smartphone display, even when they are hovering but not actually touching the display. To ensure consistency between the user's tactile, visual and proprioceptive perceptions of her hand movement, Empath-D should also realistically render, in the virtual world, the user's hand movements and any changes in the position/orientation of the real-world smartphone, without any perceptible lag. In Section 6, we shall see how the Empath-D implementation meets these stringent performance bounds.
98
+
99
+ ![](images/bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg)
100
+ Figure 2:Empath- $D$ architecture
101
+
102
+ - Light-weight, effective emulation of impairments: Empath-D will need to emulate impairments, at different levels of severity. For high-fidelity empathetic emulation, the insertion of such impairments in the I/O streams of the smartphone should not add generate any additional artefacts (e.g., increased latency, reduction in display refresh rate, etc.).
103
+
104
+ # 3.2 System Overview
105
+
106
+ We now present the overview of the Empath-D system (illustrated in Figure 2).
107
+
108
+ Using Empath-D in VR. To immersively evaluate the application, the developer (or the tester) starts by installing her developed application binaries (i.e., Android .apkss) to run on the emulated smartphone. The developer then adjusts the profile settings for the impairment using Empath-D's web dashboard and selects a use case scenario (e.g., in office, in the street, etc.). She holds her physical smartphone and puts on the VR headset, earphones (when hearing impairments are involved) and experiences the immersive reality (where she can use the app - now mapped onto the physical smartphone - with the configured impairment under the designated use case scenario) that Empath-D generates. She then tests out various interfaces and functionalities of the app in the immersive VR environments.
109
+
110
+ Components of Empath-D. Empath-D runs across three different physical devices: a physical smartphone, a computer, and a VR device (see Figure 2).
111
+
112
+ Smartphone: In Empath-D, the user interacts with the app using a real smartphone held in her hand. Interestingly, this smartphone does not run the app itself, but functions as a tracking device, helping to preserve the user's realistic sense of smartphone interaction. The smartphone simply redirects the user interaction events (e.g., touch events such as clicks and swipes on the display and motion events captured by inertial sensors) to the computer, which is in
113
+
114
+ charge of the app emulation. This smartphone also displays a fiducial marker array [14] on its display, to help in efficient, real-time tracking of the phone's location.
115
+
116
+ Computer: The computer is at the heart of Empath-D's ability to fuse the real and virtual world. It consists of two major components: Phone and Hand Tracker and Mobile Emulator, as well as a Web Dashboard (see Figure 6), which allows the user to select the impairment profile to be applied. In addition, as we shall discuss shortly, this computer may run an Impairment Generator cum Virtual World Renderer). Key functions include:
117
+
118
+ - The Phone and Hand Tracker, uses image captured by the VR headset-mounted camera, to track the position and pose of the smartphone (relative to the VR device), and create the virtual phone image at the correct position in the virtual world. It also uses the same camera to track the user's hand, as it interacts with the smartphone, and then renders it in the virtual world.
119
+ - The Mobile Emulator executes the app being tested, using the redirected stream of user interaction events transmitted by the smartphone. The resulting visual output of the app is then transmitted as a sequence of images to the VR device, where these images will be integrated into the virtual phone object; likewise, audio output (if any) is directly streamed to the VR device.
120
+
121
+ The overall Empath-D framework includes an Impairment Generator that is typically applied as one or more filters over the Virtual World Renderer (an engine such as Unity [44]) which is responsible for combining various virtual objects and rendering the virtual world). The Impairment Generator effectively perturbs/modifies the audio/video feeds of the virtual world, before they are displayed on the VR device. For example, to emulate cataracts, it applies an appropriate 'blurring/dimming' filter on the video feed; similarly to emulate high-frequency hearing loss (an audio impairment), this generator will apply a low-pass filter on the output audio stream. These two components are placed inside a dotted-line rectangle in Figure 2, to reflect the reality that these components run on either the Computer or the VR device, depending on whether the VR device is tethered or not. In untethered VR devices (such as the Samsung Gear VR), the Impairment Generator and the Virtual World Renderer run on the VR device itself. In contrast, tethered devices such as the HTC Vive will run on the computer, and typically offer higher graphics quality, frame rates, faster execution.
122
+
123
+ VR Device: Finally, the VR device is used to display the synthesised virtual world to the user. This synthesis involves the fusion of the virtual smartphone, the user's hand and the ambient virtual world, all subject to the impairment filter.
124
+
125
+ # 4 VR-BASED EMULATION OF MOBILE INTERACTION
126
+
127
+ Empath-D follows a split-interaction paradigm: for realistic immersion, Empath-D renders the visual and audio output of the target app in the virtual world (i.e., via VR headset's display and speakers), while allowing the user to interact naturalistically with a real-world physical phone. A major challenge in this paradigm is to enable natural, low-latency tracking and display of the real-world motion of both the phone and the user's hands, so as to ensure consistency
128
+
129
+ across the user's visual, tactile and proprioceptive experience. We achieve this by performing three distinct steps: (a) smartphone tracking, (b) hand tracking, and (c) hand rendering in VR, using an RGB-Depth (RGB-D) camera mounted on the VR headset. Empath-D first tracks the position and orientation of the physical smartphone and synchronises the position of the virtual phone to the physical smartphone (See Section 4.1). Separately, Empath-D also captures fingers in the real world and displays them at the correct position (relative to the virtual smartphone) in the virtual world (See Section 4.2 and 4.3).
130
+
131
+ ![](images/2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg)
132
+ Figure 3: Tracking physical phone with fiducial markers
133
+
134
+ Empath-D uses the headset-mounted RGB-D camera to capture the colour image along with the depth values, relative to the camera. The camera's position is always fixed, relative to the user's head. Its three axes are thus aligned to a user's head: $z$ -axis to the user's forward (gaze) direction, and $x$ and $y$ axes capturing the vertical and horizontal displacement.
135
+
136
+ # 4.1 Tracking the physical smartphone
137
+
138
+ Empath-D uses fiducial markers, displayed on the physical smartphone's screen, to localise the smartphone efficiently. It takes a colour image as an input, and returns the transformation relative to the camera's coordinate system: translation and rotation, i.e., x, y, z, roll, pitch, yaw from the RGB-D camera's coordinate system. We employ a technique proposed and detailed in [14].
139
+
140
+ The Empath-D Hand Tracker component tracks the physical phone using markers captured by the camera. Each marker, displayed on the phone screen, has a distinct pattern. The tracker knows the position of each marker (e.g., top-left, top-right, bottom-left and bottom-right) in the physical smartphone screen's coordinate system. The system first detects these markers in a given colour image, identifying them based on their unique patterns (see Figure 3). In particular, the system recognises the coordinates of each of the four corners of each marker. Moreover, the system knows the true size of, and separation between, each marker. It then uses an object pose estimation algorithm (provided by openCV's solvePnP function [6]), along with the array of fiducial marker points, to compute the 3-D position and orientation of the smartphone. Past
141
+
142
+ Algorithm 1 Hand Segmentation
143
+ 1: Input: $T\gets$ Phone's translation (3-D vector)
144
+ 2: Input: $R\gets$ Phone's orientation $(3\times 3$ rotation matrix),
145
+ 3: Input: $F\gets$ RGBD Frame, 2-D array that each entry $F_{i,j}$ holds a color value and 3-D position relative to the camera.
146
+ 4: Input: $V\gets$ 3-D region of interest (relative to the phone)
147
+ 5: Output: fgMask, 2D bool array whose dimension equals to $F$
148
+ 6:
149
+ 7: fgMask[i,j] $\leftarrow$ false for all $(i,j)$
150
+ 8: for point $(i,j)$ in $F$ do
151
+ 9: if $(i,j)$ in screen_border then
152
+ 10: /\* Case A: Blue background segmentation \*/
153
+ 11: fgMask[i,j] $\leftarrow$ 1-Blue $(F_{i,j}) + 0.5\cdot Red(F_{i,j}) > \tau$
154
+ 12: else
155
+ 13: /\* Case B: Depth-based segmentation \*/
156
+ 14: posphone $\leftarrow$ $R^{-1}\cdot (Position(F_{i,j}) - T)$
157
+ 15: fgMask[i,j] $\leftarrow$ (posphone $\in V$ )
158
+ 16: end if
159
+ 17: end for
160
+ 18: return fgMask
161
+
162
+ results [14] show that this technique can compute an object's position and orientation with sub-cm level accuracy.
163
+
164
+ This fiducial marker-based algorithm would fail under two conditions: (a) when the markers are occluded by the user's hand, and (b) if the ambient illumination levels are too low or too high, reducing the contrast level of the markers. To tackle (a), the smartphone screen uses an entire array of markers displayed across the scene, thereby ensuring correct smartphone tracking as long as some part of the phone is visible. Contrast concerns are not particularly relevant in our scenario, as we assume that the user is testing the app in a regularly lit work/office environment.
165
+
166
+ # 4.2 Hand Segmentation
167
+
168
+ Empath-D uses the frames captured by the RGB-D camera to track and segment the user's hand. For each frame, we extract the segment (polygon of pixels) that represents the user's hand, and render that segment in the virtual world. As the goal of hand-tracking is to provide the user with a natural view of her smartphone interactions, we restrict the tracking technique to a 3-D region of interest (ROI) that is centred at the phone, with a depth of $2cm$ and a planar boundary of $6cm$ . In other words, we only track the hand while it is $\leq 2cms$ away from the smartphone screen, and within $\leq 6cms$ of the smartphone edges.
169
+
170
+ A straightforward approach is to apply a depth-based segmentation strategy, where we first isolate only the foreground points which lie within a depth $= 2cm$ of the smartphone surface. However, we empirically observed that, due to the glossy surface of the smartphone, such depth estimation was inaccurate for points located on the smartphone's screen. Accordingly, we implemented two separate segmentation methods (detailed in Algorithm 1): (case A) a colour-based segmentation approach to identify points which are directly over the smartphone, and (case B) a depth-based approach to identify points which are near, but not over, the smartphone's
171
+
172
+ ![](images/f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg)
173
+ Figure 4: Mesh of hand
174
+
175
+ ![](images/b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg)
176
+ Figure 5:Empath- $D$ hand segmentation
177
+
178
+ screen. We apply the colour-based segmentation to the points inside the screen's border (thick orange contour in Figure 3) and the depth-based approach to the points outside.
179
+
180
+ Colour-based segmentation: We adopt the colour-based technique proposed in [41]. The approach tests RGB values to segment foreground (hand) from background, coloured in blue. In our scenario, we target human skin as the foreground. Human skin has a property common in all races: its R value has about twice the value of G and B ( $R \approx 2G \approx 2B$ ). Given the property of human skin, we obtain a formula that discriminates the foreground from the background whose $B$ value is 1 (line 11 in Algorithm 1). $\tau$ is a user-tunable threshold which allows it to adapt to different lighting conditions.
181
+
182
+ However, note that, to enable tracking of the phone, the phone's screen cannot be completely blue, but will need to contain the array of fiducial markers. We tackle both problems simultaneously by using blue ( $R = 0$ , $G = 0$ , $B = 1$ ) to colour the markers, over a cyan ( $R = 0$ , $G = 1$ , $B = 1$ ) background. Here we modified only $G$ value, which is unused in the colour-based segmentation.
183
+
184
+ Points outside the smartphone's screen are segmented using the depth-based approach. After identifying the points corresponding to the user's hand, the system translates these points to 3-D coordinates in the camera's coordinate system, using the associated depth values.
185
+
186
+ # 4.3 Rendering the hand in the virtual world
187
+
188
+ After detecting the hand segment, the Empath-D system renders it in the virtual world. The system passes the tracked hands to the Virtual World Renderer, sharing the (i) 3D structure of the hands (surface mesh), (ii) colour image of the RGB-D frame (texture), and (iii) mapping between the surface mesh and the colour image (UV map). In common rendering engines (e.g. Unity), the 3D structure of the hand is represented by a triangle mesh-i.e., a set of vertices, constituting individual small triangles. The mesh is rendered at the same location as the user's hand in the real world. As the user's hand is localised in the coordinates of the RGB-D depth camera, the location is offset by an additional depth value (7cm in our implementation), to reflect the additional distance between the centre of the user's eyes and the depth camera. An important characteristic of our algorithm is that we render the actual image of the user's hands over this triangle mesh. Figure 4 illustrates the Delaunay
189
+
190
+ # Empath-D Dashboard
191
+
192
+ cataract (blur and contrast reduction)
193
+
194
+ enabled*
195
+
196
+ enabled
197
+
198
+ Blur intensity*
199
+
200
+ 0.1
201
+
202
+ Contrast reduction intensity
203
+
204
+ 1
205
+
206
+ triangulation of a set of points. The mesh is combined with the hand's image (Figure 5), and rendered in the VR display. Extracting and rendering the actual image of the user's finger enhances the immersive feeling of real-life smartphone navigation in the virtual world.
207
+
208
+ The complexity of the mesh-i.e., the number of vertices (or triangles) in the rendered hand-is an important parameter in the rendering process. A larger number of vertices captures the contours of the hand more precisely, resulting in a more life-like image. However, this also results in added rendering latency in the rendering engine. To support the twin objectives of low-latency and life-like rendering, we utilise a sub-sampling technique to construct the mesh. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a 32-fold downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the row and column axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We shall show, in Section 6, how our prototype Empath-D implementation uses this technique to achieve our twin objectives.
209
+
210
+ # 5 IMPAIRMENT SIMULATION
211
+
212
+ Empath-D aims to enable evaluation of the usability of app designs under visual, auditory and haptic impairment simulation. Realistic simulation of various impairments in the VR world is the essential requirement to achieve this goal.
213
+
214
+ There has been a thread of research to simulate impairments through physical simulator devices [1, 13, 29, 39, 49]. For instance, Zimmerman et al. use goggles and enclosing materials to simulate low vision impairments [49]. These hardware simulators generalise the impairment of interest and enable simulation of specific aspects of the impairment pathology rather than emulate exactly how an impairment is. However, impairments can vary greatly between individuals. For instance, glaucoma generally progresses in deterioration from the periphery towards the centre of vision, but in reality, it comes in different shapes and severity, affecting usability of applications in different ways. Existing physical impairment simulators simply approximate this as a central circle of
215
+
216
+ ![](images/b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg)
217
+ Figure 6: Screenshot of Empath-D impairment configuration dashboard
218
+
219
+ ![](images/11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg)
220
+ Figure 7: Simulated cataract (left) and simulated glaucoma (right)
221
+
222
+ clarity, with blur through to the periphery. Empath-D is advantageous over existing physical simulators in the following ways, it allows: 1) impairments to be customised, 2) simultaneous manifestation of multiple impairments, 3) the addition of new impairments easily. Figure 6 shows the web interface for designers to customise impairments for the target user group.
223
+
224
+ # 5.1 Simulating Visual Impairments
225
+
226
+ Vision is the dominant sensory system by which humans perceive the world, and is a key focus for Empath-D. Vision impairment is one of the most common causes of accessibility problems that comes with age. Common vision impairments include cataracts, glaucoma, and age-related macular degeneration. Such vision impairments present as reduced visual acuity, loss of central/peripheral vision, or decreased contrast sensitivity. It is widely studied that these symptoms can affect the interaction with various desktop and mobile applications; for example, humans use peripheral vision to pre-scan text ahead of his/her point of focus. As the peripheral vision narrows, the scanning becomes less effective, which slows reading [23]. In this work, we examine and simulate two commonly found visual impairments - cataracts and glaucoma.
227
+
228
+ Our approach is to apply an image effect at the "eye" (i.e., a camera pair of view renderers) of the VR scene. From this camera pair, the image effect will apply to all other objects in the scene (e.g., smartphone, fingers, scene), just as how impaired users would experience it. We employed various image filters for different impairments, which 1) provide realism of impairments to help designers to find out usability issues and take corrective actions, and 2) have small computational overhead not to add noticeable delays to our entire emulation.
229
+
230
+ The approach is flexible and lightweight. Impairment simulator's intensity is configurable at runtime. The image effects are applied at the last stage of the rendering pipeline. Glaucoma presents functionally as a loss in peripheral vision. To simulate glaucoma, we use a vignette with a clear inner circle, blurred inner-outer circle, and black extending outwards from the outer circle (see Figure 7). Cataracts presents functionally as reduced visual acuity and reduced contrast sensitivity. We use a blur filter to simulate reduced visual acuity, and a contrast reduction filter to simulate reduced contrast sensitivity (see Figure 7).
231
+
232
+ Table 1: Hardware of Empath-D
233
+
234
+ <table><tr><td>VR headset</td><td>Samsung Gear VR [5]</td></tr><tr><td>VR smartphone</td><td>Samsung Galaxy S7 [4]</td></tr><tr><td>RGB-D camera</td><td>Intel RealSense SR300 [20]</td></tr><tr><td>PC</td><td>CPU: 4 cores, 3.4 GHz
235
+ RAM: 16 GB
236
+ GPU: GeForce GTX 1080 [32]</td></tr><tr><td>Physical IO smartphone</td><td>Samsung Galaxy S5 [40]</td></tr></table>
237
+
238
+ The functional aspects of vision impairments are straightforward to create in VR, which give Empath-D high extendability to implement other types of visual impairments. While we just described two impairments pertaining to our studies, it is easy to create other impairments such as colour filters to simulate colour blindness. However, we leave the effect of eye movements on impairments as the future work. Since eye-tracking is currently not supported in Empath-D, a user will need to move his head to achieve the same effect.
239
+
240
+ # 5.2 Simulating Other Modalities
241
+
242
+ We discuss how other modalities may be simulated in Empath-D.
243
+
244
+ Hand Tremors. Hand tremors are a common symptom of Parkinson's disease or Essential tremor and make it hard for one to precisely point on a touchscreen. A hand tremor may be characterised by the frequency and amplitude of oscillatory movement. Since we present virtual representations of the user's hand (i.e., as a 3D mesh) to enable his interaction with the virtual mobile phone, Empath-D similarly perturbs this 3D mesh in VR to create hand tremors. While a user may physically not experience hand movement, the visual perturbation would be sufficient to hinder accurate touch to simulate hand tremors.
245
+
246
+ Hearing Loss. High-frequency hearing loss is a common symptom for the elderly population. People diagnosed with high-frequency hearing loss are unable to hear sounds between $2,000\mathrm{Hz}$ and 8,000 Hz. These people often struggle to understand or keep up with daily conversations (missing consonants in higher registers, such as the letters F and S or female voices). Empath-D applies a bandpass filter over the output sound of the target application to diminish the sound signals between $2\mathrm{kHz}$ and $8\mathrm{kHz}$ and plays the filtered audio feed through the VR device.
247
+
248
+ # 6 IMPLEMENTATION
249
+
250
+ # 6.1 Hardware
251
+
252
+ We implemented our current Empath-D prototype using the hardware described in Table 1. We used the Samsung Gear VR fitted with the Samsung Galaxy S7 as the VR headset. We used the Intel RealSense SR300 RGB-D camera for finger tracking, selecting this among alternatives as: 1) its small size and low weight allowed us to easily attach it to the VR headset, and 2) its minimum sensing range is low enough to permit hand tracking at a distance of $30\mathrm{cm}$ . We employed the Samsung Galaxy S5 as the physical I/O device, and a powerful laptop (4 core 3.4 GHz CPU, 16GB RAM) as the intermediary device. The choice of the VR headset itself was deliberate. We chose a Samsung Gear VR headset (an untethered
253
+
254
+ ![](images/1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg)
255
+ Figure 8: Rendering frame rate under varying virtual display resolution (width : height = 9 : 16, default resolution of Android emulator is 1080x1920)
256
+
257
+ smartphone-powered VR device) over more powerful PC-tethered VR devices such as the HTC Vive or Oculus Rift. This was mainly because PC-tethered devices such as HTC Vive use IR lasers to localise the headset, which interferes with the IR laser emitted by the RGB-D camera used for depth sensing in hand tracking.
258
+
259
+ # 6.2 Rendering an Emulated App
260
+
261
+ We used empirical studies to determine an appropriate screen resolution and frame rate to render the emulated app (and the smartphone) in the VR headset. Empath-D obtains screenshots of its mobile emulator using the Android virtual display [35] and transmits these screenshots over WiFi to the Gear VR device. The overhead of transmitting and rendering these emulated screenshots is proportional to their resolution. The default 1080p resolution could sustain a frame rate of only 18 fps, which causes visible jerkiness. To reduce this overhead, we reduced the resolution (using setDisplayProjection() method), and applied differential transmissions, sending a screenshot only when the emulated app's display changes.
262
+
263
+ Figure 8 shows the experimental results on the tradeoff between the resolution and the rendering frame rate, obtained while playing a video to ensure continuous change of the screen content. The frame rate saturates at $57~\text{fps}$ , at a screen resolution of $485\times 863$ . Moreover, through another user study (described next) to understand the minimum resolution to read an app's contents, we empirically verified that the participants had no issues in reading the app's content at the resolution of $485\times 863$ . Hence, we choose this resolution as our default, although this setting can be modified (e.g., we can pick a higher resolution, and a lower frame rate, for an app with mostly static content).
264
+
265
+ If Empath-D displays the virtual smartphone at its original size in the virtual world (portrait position), its display becomes illegible. For example, the Samsung Galaxy S7 (in the Gear VR) has a resolution of $2560 \times 1440$ and an $\approx 101^{\circ}$ horizontal field of view yielding a horizontal pixel density of $\approx 25.3$ pixels/degree. When a virtual phone is held at $30\mathrm{cm}$ away, the horizontal pixel density drops below 25.3 pixels/degree due to downsampling of the virtual phone screen as seen through the VR display. This presents a problem for viewing the content of the virtual phone - in particular, text - as its pixel density is significantly lower than when viewing a physical
266
+
267
+ ![](images/7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg)
268
+ Figure 9: Readable font size of the virtual smartphone at a magnification ratio
269
+
270
+ phone. For instance, the Galaxy S5 gives $\approx 89.4$ pixels/degree at $30\mathrm{cm}$ distance.
271
+
272
+ We tackle this issue by scaling up the virtual phone's size by a factor that ensures that the phone's display text remains legible. To determine this factor, we recruited three participants and asked them to record the minimum readable font sizes, while showing them a virtual smartphone (at a distance of $30~\mathrm{cm}$ ) with various magnification ratios (increased by 0.1 from 1.0 to 2.7). Figure 9 shows that participants could read text with the font size= 12sp (the commonly used minimum font size for mobile apps) for magnification factors $\geq 1.5$ . Accordingly, we used 1.5 as the default magnification ratio for the smartphone and its display. We also proportionately scaled up the user's rendered hand. User studies (Section 7) show that users found this configuration highly usable.
273
+
274
+ # 6.3 Rendering Virtual Hand
275
+
276
+ As discussed in Section 4.3, the rendering latency of the virtual hand is proportional to the number of vertices in the Delaunay triangulation-based mesh. To reduce the latency, we apply a nonuniform sampling approach. Specifically, Empath-D preserves all the points on the edges of the segment, to preserve the precise contours of the hand. However, it performs a downsampling of the interior points (prior to constructing the Delaunay triangulation), along both the $x$ and $y$ axes, to reduce the computational time significantly, without materially affecting the reconstructed hand image. We empirically determined the sampling rate $X$ , by varying $X$ and measuring both (i) the processing latency and (ii) the SSIM [12, 46] (Structural SIMilarity; a metric of perceived image quality) of the hand images, using 200 RGB-D frames. Figure 10 shows the results. Without any subsampling ( $X = 0\%$ ), the rendering latency is 311.1 msec, which is too high for our responsiveness goal. We empirically downsample the internal pixels by a factor of 32 ( $X = 99.9\%$ ), i.e., choosing every $32^{nd}$ pixel on the grid. This results in a latency of 26.9 msec, while keeping the SSIM = 0.976, a level indistinguishable with the original as perceived by a human.
277
+
278
+ # 6.4 Environment Emulation
279
+
280
+ To enable holistic evaluation of app interactions, Empath-D emulates not just the virtual phone, but the entire virtual world as well. In our current implementation, we emulated a crowded Urban Street environment, which includes crosswalks, traffic lights, pedestrians and commonplace roadside obstacles. To further mimic real-world
281
+
282
+ ![](images/ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg)
283
+ Figure 10: Rendering latency vs. image quality of the virtual hand
284
+
285
+ movement, our implementation allows the user to navigate the virtual world by (i) rotating her head (this uses the head tracking ability of the VR device), and (ii) by 'walking in place', using the technique proposed in [45] as this does not require any additional hardware on the VR device.
286
+
287
+ # 6.5 VR Manager
288
+
289
+ This component currently executes on the VR smartphone, and is responsible for combining the output of the various components (Hand Tracker, Phone Tracker and Virtual Phone) in the virtual world. This component, implemented as a Unity application, renders these various components. This component is also responsible for applying the impairments on the output of the virtual world. The image effects simulating low vision impairments are defined as a script, Shaders in Unity.
290
+
291
+ # 7 EVALUATION
292
+
293
+ We now present a mix of system and user experiments to evaluate the performance and efficacy of our Empath-D implementation. Besides micro-benchmark studies, we conducted two experiments to capture user interaction with Empath-D. In Experiment 1, we examine the performance of Empath-D vs. a real-world smartphone, in the absence of any impairments. In Experiment 2, we consider an impairment-augmented version of Empath-D, comparing the performance of users against the use of commercial impairment simulation hardware.
294
+
295
+ # 7.1 Micro-benchmark Performance of Empath-D
296
+
297
+ We measured the overall latency of Empath-D, both in terms of the delay in reflecting touch interactions in the virtual world and in terms of the hand tracking delay.
298
+
299
+ 7.1.1 End-to-end Latency of Touch Interaction. As a measure of the overall responsiveness of Empath-D, we computed the latency between a touch input, on the physical smartphone, and the resulting change in the content of the virtual smartphone, rendered in the VR display. To measure this, we utilised a high framerate camera (operating at 240 fps) to concurrently record both the screen of the physical smartphone and the virtual phone (displayed in the VR). The phone screen is coloured green initially, and was programmed to turn red as soon as it received a touch input. We repeated the
300
+
301
+ ![](images/f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg)
302
+ Figure 11: Overhead of impairment simulation
303
+
304
+ measurement 23 times, capturing (via the video frames) the time gap between (i) the physical smartphone screen turning red and (ii) the virtual smartphone turning red in the VR display. The end-to-end latency is 237.70 msec ( $SD = 20.43$ ).
305
+
306
+ By monitoring the intermediary computer, we obtained the breakdown of this delay: (i) smartphone responsiveness (the time from the user touching the screen till the time the phone transmits the touch event to the computer) $= 0.3$ msec $(SD = 0.16)$ ; (ii) computer emulation responsiveness (the time from receiving the touch event till the time the screenshot of the modified display is sent to the VR device) $= 141.37$ msec $(SD = 6.6)$ , and (iii) the VR responsiveness (the time from receiving the screenshot till it is rendered on the VR display) $= 10.46$ msec $(SD = 8.36)$ . The remaining latency ( $\approx 87$ msec) can be attributed as the WiFi network latency. These micro-measurements suggest that the default Android emulator used in our studies was the dominant component of the latency. The default Android emulator is known to be fairly slow, and multiple third party emulators (e.g., Genymotion [16]) are reported to provide significantly lower latency. Accordingly, we anticipate that this overall latency can be reduced to $\leq 150$ msec, without any significant architectural modification of Empath-D.
307
+
308
+ 7.1.2 End-to-end Latency of Virtual Hand. We also evaluated the latency between the physical movement of the user's hand and the rendering of this movement in the VR display. To capture this time difference, we displayed a small circle, at a specific point on the display, on both the smartphone and the virtual phone. Users were instructed to swipe a finger on the screen to reach the circle. We measured, over 20 experiments, the time (no. of frames from the previously used high framerate camera) between the occlusion of the circle on the physical phone and the resulting occlusion in the virtual phone, computing an average latency of $117.46\mathrm{msec}$ ( $SD = 20.44$ ). Additionally, we measured the component delays of this rendering process as: (i) reading an RGBD frame: $4.90\mathrm{msec}$ ( $SD = 0.58$ ); (ii) phone tracking: $4.56\mathrm{msec}$ ( $SD = 0.25$ ); (iii) hand tracking: $8.0\mathrm{msec}$ ( $SD = 1.58$ ), and (iv) the VR responsiveness (the time from receiving the hand mesh till it is rendered on the VR display): $26.99\mathrm{msec}$ ( $SD = 5.22$ ). The remaining latency, attributable to the WiFi network, is $\approx 73\mathrm{msec}$ , consistent with the measurements reported above.
309
+
310
+ # 7.2 Study Design for Usability Experiments
311
+
312
+ We then conducted user studies on the usability and real-world fidelity of our Empath-D implementation. The user study (approved
313
+
314
+ Table 2: Study Tasks and Conditions in Experiment 1
315
+
316
+ <table><tr><td>Task</td><td>Cond-ition</td><td>Impairment</td><td>Simulator Type</td><td>Enviro-nment</td></tr><tr><td rowspan="6">T1-T4</td><td>A</td><td>none</td><td>none</td><td>Real</td></tr><tr><td>B</td><td>Cataracts</td><td>Physical</td><td>Real</td></tr><tr><td>C</td><td>none</td><td>none</td><td>Virtual</td></tr><tr><td>D</td><td>Cataracts</td><td>Virtual</td><td>Virtual</td></tr><tr><td>E</td><td>Glaucoma</td><td>Real</td><td>Physical</td></tr><tr><td>F</td><td>Glaucoma</td><td>Virtual</td><td>Virtual</td></tr></table>
317
+
318
+ Table 3: Smartphone Interaction Tasks in Experiment 1
319
+
320
+ <table><tr><td>Task Type</td><td>Task Code</td><td>Task Description</td></tr><tr><td rowspan="3">Everyday Phone Use</td><td>T1</td><td>Perform a Calculation</td></tr><tr><td>T2</td><td>Add an Alarm</td></tr><tr><td>T3</td><td>Search, Save Image on Browser</td></tr><tr><td>Controlled Pointing</td><td>T4</td><td>Number Search and Point</td></tr></table>
321
+
322
+ by our institution's IRB) consisted of 12 users (9 males) with no pre-existing uncorrected vision impairments. Users were aged 24-39, with a mean age of 30.3 years $(\mathrm{SD} = 5)$ .
323
+
324
+ Study Tasks and Measures. We adopted a repeated measures design, with participants counterbalanced for condition order (see Table 2 for the conditions). Participants were asked to perform four different tasks split into two task types; everyday phone use, and controlled pointing (see Table 3). Users were asked to perform all tasks using two-handed interaction, holding the phone at a distance that they normally would during daily use. We chose two-handed interaction to eliminate for phone balancing that is typical in one-handed interaction given the typical size of today's smartphones.
325
+
326
+ T1-T3 are everyday tasks users perform on a smartphone. They cover smartphone touch interaction of taps, swipes, and long press, on UI widgets such as keyboards, buttons and scrolling content. Users were asked to experience performing these tasks under six conditions, including under impairments (both using the physical hardware and the VR device). At the end of all three tasks (T1-T3), users completed the NASA-TLX[18] survey to indicate their perceived workload during task performance. T4, on the other hand, is a controlled pointing task experiment. Participants were given a stimulus number and then asked to click on the button with the corresponding number, as quickly and as precisely as they could. (See Figure 12 for a screenshot of the application used in this task.) Users repeated this task 80 times in succession, for each of the six conditions (A-F; see Table 2). We recorded the touch times and positions with the task app. We conducted a short semi-structured interview at the end of the study to understand users' experiences with, and perceptions of, the physical and virtual impairment simulations.
327
+
328
+ Instruments: We compared Empath-D with a commercial physical impairment simulator [13]. To calibrate for visual acuity, we adapted a test similar to a Snellen eye test chart [42] - showing rows of letters with each lower row having a smaller font size. We first used the physical impairment simulator to obtain the minimum acceptable font size. Using the same test page in the VR, we applied
329
+
330
+ 11
331
+
332
+ <table><tr><td>27</td><td>16</td><td>13</td></tr><tr><td>25</td><td>18</td><td>15</td></tr><tr><td>23</td><td>20</td><td>26</td></tr><tr><td>12</td><td>14</td><td>17</td></tr><tr><td>21</td><td>11</td><td>19</td></tr><tr><td>22</td><td>10</td><td>24</td></tr></table>
333
+
334
+ Figure 12: Screenshot of a test application for the pointing task
335
+
336
+ the impairment and gradually adjusted the severity until we hit the minimum acceptable font size. To calibrate the inner circle of clarity for glaucoma, we implemented an app that allows us to adjust the diameter of a coloured circle. We then used the physical impairment simulator for glaucoma, and adjusted the coloured circle to the point in which the circle reaches the fringe for clarity. We then calibrated the virtual glaucoma simulation in a similar manner. Three independent measurements for visual acuity and circle of clarity were taken from the research team and averaged to determine the final calibration parameters of font size $= 12$ sp and diameter $= 60$ mm.
337
+
338
+ # 7.3 Empath-D vs. Physical Smartphone
339
+
340
+ We first investigate whether the VR-based interaction is a sufficiently faithful replica of the real-world interaction that a user would have with a regular smartphone, in the absence of any impairments.
341
+
342
+ Touch Accuracy: In all six conditions, users were able to achieve high levels of button touch accuracy (see Table 4), with the accuracy being $98.8\%$ ( $SD = 1.67$ ) when the users interacted unimpaired with the VR device. Comparing the accuracies between the physical smartphone and the VR device, we noted that the VR condition had an accuracy of $99.12\%$ ( $SD = 1.32$ ) (across all 6 conditions), whereas the use of the physical smartphone provided $100\%$ accuracy. In terms of the location accuracy, we noted a difference of $2.28 \, \text{mm}$ ( $SD = 2.98$ ) between the use of Empath-D vs. a physical smartphone. This difference is well within the uncertainty associated with finger touch interactions, and thus demonstrates that user performance was equivalent across both Empath-D and a physical smartphone.
343
+
344
+ Perceived Workload: NASA-TLX scores indicated that the users did perceive significant differences in their workload using Empath-D, compared to use of the physical smartphone ( $Z = 2.824$ , $p = 0.005 < 0.05$ ). This does suggest that the navigating an app within the VR device does require greater cognitive effort than simply interacting with a regular smartphone. However, it is difficult to
345
+
346
+ Table 4: Accuracy of Button Touch Across All Users
347
+
348
+ <table><tr><td>Impairment</td><td>Environment</td><td>Accuracy (SD) %</td></tr><tr><td rowspan="2">None</td><td>Physical</td><td>100</td></tr><tr><td>Virtual</td><td>98.79 (1.67)</td></tr><tr><td rowspan="2">Cataracts</td><td>Physical</td><td>100</td></tr><tr><td>Virtual</td><td>99.09 (1.36)</td></tr><tr><td rowspan="2">Glaucoma</td><td>Physical</td><td>100</td></tr><tr><td>Virtual</td><td>99.49 (0.82)</td></tr></table>
349
+
350
+ decipher whether this difference is due to Empath-D-specific issues, or a general lack of familiarity with VR devices.
351
+
352
+ We additionally investigated the subjective feedback captured by the semi-structured interview. $83\%$ (10) of the users reported perceiving increased latency while using Empath-D, while 2 users indicated that they felt no noticeable latency difference. However, all 12 users indicated that the performance of Empath-D was "acceptable", and they would be able to use the Empath-D system for testing the usability of apps, as long as the apps do not require extremely low-latency interactions. (3 users indicated that the system might not be usable for testing real-time games.)
353
+
354
+ # 7.4 Empath-D vs. Hardware Impairment Simulators
355
+
356
+ We now study the performance of Empath-D vis-a-vis impairments generated using commercially available hardware. Figure 11 shows the overhead of Empath-D under impairment conditions, demonstrating that Empath-D is able to operate without significant performance loss even in the presence of impairments.
357
+
358
+ Touch Accuracy: Table 4 enumerates the accuracy for the pointing task (T4) for two distinct impairments (Cataract & Glaucoma), for both the VR-based Empath-D system and the hardware impairment simulator. We see that, in the Cataract condition, Empath-D had a mean accuracy of $99.09\%$ , which is virtually indistinguishable from that of the hardware device ( $100\%$ ). A similar pattern was observed for the Glaucoma impairment ( $99.49\%$ for Empath-D vs. $100\%$ for Hardware). In terms of the location accuracy, we noted a difference of $1.7 \, \text{mm}$ ( $SD = 1.9$ ) (for Cataract) and $1.2 \, \text{mm}$ ( $SD = 1.6$ ) (for Glaucoma) between the use of Empath-D vs. the impairment hardware. Once again, this difference is well within the uncertainty associated with finger touch interactions. These results provide strong evidence that Empath-D is able to emulate impairment conditions that are equivalent to that of dedicated, commercial hardware.
359
+
360
+ Perceived Workload: The numerical TLX scores indicated that there was no significant difference for Cataracts; however, the difference for Glaucoma was significant $(Z = 3.061$ , $p = 0.002 < 0.05)$ with users indicating a higher perceived workload for the VR device.
361
+
362
+ # 7.5 Motion sickness
363
+
364
+ At the end of the user study, we asked each participant if they felt discomfort or unwell. Only two of the twelve participants reported slight motion sickness while using Empath-D. Motion sickness may
365
+
366
+ arise from: (1) the use of the VR display itself, and (2) the latency from Empath-D. However, it is difficult to separate the two.
367
+
368
+ The effects of motion sickness are notably minor in our current prototype of Empath-D. The nature of our experimentation intensifies the use of the VR display, whereas practical use of Empath-D is likely to be more interspersed between app redesigns. We further discuss how we may improve on latency in Section 9.2 to reduce motion sickness that may result from the latency of Empath-D.
369
+
370
+ # 8 RELATED WORK
371
+
372
+ Designing for Inclusiveness. Newell et al. [31] pointed out that traditional user-centred design techniques provide little guidance for designing interfaces for elderly and disabled users due to the large variation amongst the type and degree of impairments. They also highlighted that the standard guidelines for designing disabled-friendly UIs are too general [30] and lacked empathy for users. For instance the WCAG 2.0 lists that the use of colour "is not used as the only visual means of conveying information, indicating an action, prompting a response or distinguishing a visual element". This requires interpretation by the designer into specific designs in his application. Over the years, various accessibility design guidelines (such as WCAG 2.0 [3], IBM Accessibility Checklist [38], US Section 508 Standards [2]) and tools (aChecker [15]) have been proposed and refined. However, the problems pointed out by Newell are remained unsolved to a large extent, which hinders elaborate design for a target user group with a specific impairment.
373
+
374
+ Simulated Design. There exists prior work on helping UI designers design better interfaces for people suffering from vision impairments. Higuchi et al. [19] proposed a tool to simulate the visual capabilities of the elderly for the design of control panels, while Mankoff et al. [26] developed a tool to simulate a user with visual and motor impairments on the desktop screen. SIMVIZ [9, 47] uses the Oculus Rift VR device to simulate visual impairments to examine reading text on a smartphone. For audio modalities, Werfel et al. [47] simulated hearing ailments by using a pair of microphones with equalised headphones.
375
+
376
+ Different from prior works, Empath-D uses VR as the medium for immersive evaluation to 1) flexibly support wider groups of impaired users, and 2) allow naturalistic interactions with a mobile phone in a virtual environment. This novel approach supports ecological validity in testing applications and is key for mobile apps which go beyond the static settings of previous work.
377
+
378
+ While previous work has focused on simulation in single modality (visual or auditory), Empath-D is able to flexibly combine modalities to support any application type, ailment (visual, auditory, motor) and usage environment.
379
+
380
+ System Support for Accessibility. Modern mobile OSes provide accessibility support; in particular, it allows users with far-sightedness to increase fonts and users with blindness to interact through vocal interfaces. Also, Zhong et al. enhanced Android accessibility for users with hand tremor by reducing fine pointing and steady tapping [48]. We believe Empath-D will significantly expand basic accessibility support of commodity devices and accelerates the design and deployment of various accessibility add-ons for different impaired users.
381
+
382
+ Testing of Mobile Applications. Recently there have been many systems, such as VanarSena [37], AMC [22], Puma [17], DynoDroid [25], DECAF [24], AppsPlayground [36], for automatically testing and identifying various types of UI and systems bugs in mobile applications. Empath-D takes a different approach in that we do not detect bugs after the application is developed and deployed. Instead, we allow the designer to test early iterations of the designs rapidly. In this way, we hope to reduce the pain of having to make significant UI changes at the end of the design cycle - or worse, end with an application that cannot be used effectively by the target impaired demographic.
383
+
384
+ # 9 DISCUSSION
385
+
386
+ Our current studies indicate the considerable promise of Empath-D, as a mechanism for rapid and empathetic evaluation of app usability. We now discuss some additional studies and issues that we intend to explore further.
387
+
388
+ # 9.1 User study with Designers
389
+
390
+ We conducted a short user study with two mobile app developers to qualitatively examine Empath-D in actual use. Both developers have previously worked to create an Android mobile application, which was used as the baseline for the study. The developers were tasked with redesigning the mobile app for the glaucoma-impaired under two conditions: 1) without Empath-D, but with materials describing glaucoma and showing functionally accurate examples of glaucoma, and 2) with the same materials, and Empath-D. Both developers agreed that Empath-D helped them improve their designs over the baseline condition. The developers reported that Empath-D allowed them to improve their designs in two ways: 1) they can focus their attention on re-designing particular problematic parts of the UI, and 2) they are able to appropriately calibrate their modifications (for instance increasing the font size may help, but text that is too large will also cause glaucoma sufferers to visually scan more, causing fatigue).
391
+
392
+ # 9.2 Dealing with Latency Issues
393
+
394
+ Our experimental studies indicate that users are able to utilise Empath-D effectively for "conventional" apps—i.e., those that typically involve sporadic interaction by users with UI elements, such as buttons and keyboards. The current end-to-end latency (of $\approx$ 200 msec) is not an impediment for high-fidelity evaluation of such apps. However, the participants also indicated that this latency (lag between user actions and rendering in the VR display) would pose a problem for highly latency-sensitive applications, such as games. At present, it is thus appropriate to state that Empath-D potentially needs additional optimisations to support such applications. The most obvious improvement would be to replace the default Android emulator with a faster, custom emulation engine—this is likely to reduce $\approx$ 100 msec of the delay budget.
395
+
396
+ The current implementation streams JPEG images (hand, emulator's screen) from the intermediary computer to the VR smartphone. We plan to adopt a low-latency video streaming codec such as H.265 HEVC [43], which would help reduce networking and rendering latency. OS-level optimisations (e.g., preemptive priority
397
+
398
+ for inter-component messages) may be needed to support even lower latency.
399
+
400
+ Recently, several works have proposed techniques for achieving high-quality VR experience on mobile devices [7, 10, 21]. Empath- $D$ could borrow some techniques to improve latency and video quality.
401
+
402
+ # 9.3 User Performance with VR Devices
403
+
404
+ Moreover, our user studies also indicated that the time for performing tasks (T1-T4) was marginally higher when using the VR environment, compared to the direct use of a real-world smartphone. More specifically, for the pointing task T4, there was an average difference of 654 msec in the task completion time using Empath-D, compared to the smartphone. In addition, anecdotal comments suggest that continued use of the VR device, for longer-lived sessions, might pose additional usability challenges. For example, a couple of users indicated some minor muscle fatigue, most likely as a result of using a 'heavy' VR device. It is an open question whether these issues will be mitigated over time, as VR devices become lighter and more ergonomic, and as users have greater familiarity with the use of VR devices.
405
+
406
+ # 9.4 Advanced Uses of Empath-D
407
+
408
+ Our current implementation of Empath-D supports the virtualisation of certain output modalities (specifically the display and audio) of the emulated app. The vision of Empath-D can be extended to create other richer interaction modes, often blending virtual and augmented reality (AR) settings. As an example, certain emulation conditions may need to generate and integrate synthetic sensor traces, to replace the real sensor traces from the smartphone-e.g., to mimic the user's movement in locations, such as forests and mountains, the phone's real GPS trace would need to be replaced by a synthetic GPS trace as in [27, 28]. Similarly, in some cases, the app itself might need to take inputs from the VR world-e.g., if the app was being used to magnify certain objects embedded in the VR world. While such use cases can be supported, they will require enhancements to the current Empath-D framework, and it is likely that the implementation may surface additional challenges, in terms of computational overhead and latency.
409
+
410
+ # 9.5 Developing Impairment Filters and Profiles
411
+
412
+ To demonstrate the viability of Empath-D, we focused on demonstrating the ability to simulate visual impairments and in particular cataracts and glaucoma. As we explored, these impairments have functional aspects that are commonly employed to characterise them, such as visual acuity or contrast sensitivity, and are often accompanied by standard tests such as the Snellen eye test chart [42] and Pelli-Robson contrast sensitivity chart [34] respectively. From examining the commercial physical impairment simulator and our experimentation, we believe that Empath-D has the ability to functionally simulate other impairments.
413
+
414
+ We recognise two important directions that Empath-D needs address to improve impairment simulation and use. First, impairment filters have to be developed in concert with medical professionals who are subject matter experts in the areas of the specific pathologies. This helps to develop a library of impairment filters. Second,
415
+
416
+ with verified impairment filters, we may create impairment profiles, which characterise groups of users with possibly overlapping requirements. For instance, a hypothetical impairment profile may calibrate for a demographic of a range of ages, sex, and percentage of the population who may have myopia and cataracts—both which affect visual acuity. With impairment profiles, app developers may easily select and understand the demographic to which they are designing for.
417
+
418
+ # 10 CONCLUSION
419
+
420
+ We presented the design and evaluation of Empath-D, a framework that allows app developers to 'step into the shoes' of impaired users, and perform an empathetic evaluation of their app interfaces. Our key idea is to utilise a virtual world (using a commodity VR device) to present an impaired view of the app's interface, while allowing the user to interact naturally with a real commodity smartphone in the physical world. Overcoming the current computational limitations (of the VR device and the Android emulator) required us to make careful system choices, such as (i) appropriate tradeoffs between the resolution and frame rate for rendering the virtual smartphone, (ii) subsampling of the mesh representing the user's hand and (iii) scaling up the size of the virtual smartphone to overcome the lower resolution of the VR device. User studies show that Empath-D is effective in (a) providing usability that is equivalent to using a real app (on a real smartphone), for applications that do not require ultra-low latency and (b) emulating impairments in a similar fashion to custom hardware devices. We believe that Empath-D can be a powerful new paradigm for effective bidirectional integration between real-world user actions and virtual worlds, and that this can enable additional immersive applications beyond just 'impairment emulation'.
421
+
422
+ # 11 ACKNOWLEDGEMENT
423
+
424
+ We are thankful to our shepherd Prof. Xia Zhou and all anonymous reviewers for their valuable reviews. This research is supported partially by Singapore Ministry of Education Academic Research Fund Tier 2 under research grant MOE2014-T2-1063, and by the National Research Foundation, Prime Minister's Office, Singapore under its IDM Futures Funding Initiative. All findings and recommendations are those of the authors and do not necessarily reflect the views of the granting agency, or SMU.
425
+
426
+ # REFERENCES
427
+
428
+ [1] [n. d.]. AGNES (Age Gain Now Empathy Systems. ([n. d.]). Retrieved 2018-04-13 from http://agelab.mit.edu/agnes-age-gain-now-empathy-system
429
+ [2] [n. d]. US Section 508 Standards. ([n. d]). Retrieved 2018-04-13 from https: //www.section508.gov/
430
+ [3] 2008. Web Content Accessibility Guidelines (WCAG) 2.0. (11 December 2008). Retrieved 2018-04-13 from https://www.w3.org/TR/WCAG20/
431
+ [4] 2016. Samsung Galaxy S7 Specifications. (2016). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/galaxy-s7/#!/spec
432
+ [5] 2017. Samsung Gear VR Specifications. (2017). Retrieved 2018-04-13 from http://www.samsung.com/global/galaxy/gear-vr/specs/
433
+ [6] 2018. SolvePnP, Camera Calibration and 3D Reconstruction, OpenCV. (2018). Retrieved 2018-04-13 from https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
434
+ [7] Omid Abari, Dinesh Bharadia, Austin Duffield, and Dina Katabi. 2017. Enabling High-Quality Untethered Virtual Reality. In 14th USENIX Symposium on Networked Systems Design and Implementation (NSDI 17). USENIX Association, Boston, MA, 531-544. https://www.usenix.org/conference/nsdi17/technical-sessions/presentation/abari
435
+
436
+ [8] Ardalan Amiri Sani, Kevin Boos, Min Hong Yun, and Lin Zhong. 2014. Rio: A System Solution for Sharing I/O Between Mobile Systems. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 259-272. https://doi.org/10.1145/2594368.2594370
437
+ [9] Halim Cagri Ates, Alexander Fiannaca, and Eelke Folmer. 2015. Immersive Simulation of Visual Impairments Using a Wearable See-through Display. In Proceedings of the Ninth International Conference on Tangible, Embedded, and Embodied Interaction (TEI '15). ACM, New York, NY, USA, 225-228. https://doi.org/10.1145/2677199.2680551
438
+ [10] Kevin Boos, David Chu, and Eduardo Cuervo. 2016. FlashBack: Immersive Virtual Reality on Mobile Devices via Rendering Memozoation. In Proceedings of the 14th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '16). ACM, New York, NY, USA, 291-304. https://doi.org/10.1145/2906388.2906418
439
+ [11] Kenny Tsu Wei Choo, Rajesh Krishna Balan, Tan Kiat Wee, Jagmohan Chauhan, Archan Misra, and Youngki Lee. 2017. Empath-D: Empathetic Design for Accessibility. In Proceedings of the 18th International Workshop on Mobile Computing Systems and Applications (HotMobile '17). ACM, New York, NY, USA, 55-60. https://doi.org/10.1145/3032970.3032981
440
+ [12] Eduardo Cuervo, Alec Wolman, Landon P. Cox, Kiron Lebeck, Ali Razeen, Stefan Saroiu, and Madanlal Musuvathi. 2015. Kahawai: High-Quality Mobile Gaming Using GPU Offload. In Proceedings of the 13th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '15). ACM, New York, NY, USA, 121-135. https://doi.org/10.1145/2742647.2742657
441
+ [13] Marshall Flax. 2018. Low Vision Simulators. (2018). Retrieved 2018-04-13 from https://www.lowvisionsimulators.com/
442
+ [14] S. Garrido-Jurado, R. Mu noz Salinas, F.J. Madrid-Cuevas, and M.J. Marin-Jiménez. 2014. Automatic generation and detection of highly reliable fiducial markers under occlusion. Pattern Recognition 47, 6 (2014), 2280-2292. https://doi.org/10.1016/j.patcog.2014.01.005
443
+ [15] Greg Gay and Cindy Qi Li. 2010. AChecker: Open, Interactive, Customizable, Web Accessibility Checking. In Proceedings of the 2010 International Cross Disciplinary Conference on Web Accessibility (W4A) (W4A '10). ACM, New York, NY, USA, Article 23, 2 pages. https://doi.org/10.1145/1805986.1806019
444
+ [16] Genymotion. [n. d.]. Genymotion Android Emulator. ([n. d.]). Retrieved 2018-04-13 from https://www.genymotion.com/
445
+ [17] Shuai Hao, Bin Liu, Suman Nath, William G.J. Halfond, and Ramesh Govindan. 2014. PUMA: Programmable UI-automation for Large-scale Dynamic Analysis of Mobile Apps. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 204-217. https://doi.org/10.1145/2594368.2594390
446
+ [18] Sandra G Hart and Lowell E Staveland. 1988. Development of NASA-TLX (Task Load Index): Results of empirical and theoretical research. In Advances in psychology. Vol. 52. Elsevier, 139-183.
447
+ [19] Kazunori Higuchi, Yasuo Sakaguchi, Kazuhiko Sugiyama, and Tomoaki Nakano. 1999. Simulating the human vision of elderly for designing control panels. In Systems, Man, and Cybernetics, 1999. IEEE SMC'99 Conference Proceedings. 1999 IEEE International Conference on, Vol. 5. IEEE, 703-708.
448
+ [20] Intel. 2016. Intel®RealSense™ Camera SR300 Product Specifications. (2016). Retrieved 2018-04-13 from https://ark.intel.com/products/92329/Intel-RealSense-Camera-SR300
449
+ [21] Zeci Lai, Y. Charlie Hu, Yong Cui, Linhui Sun, and Ningwei Dai. 2017. Furion: Engineering High-Quality Immersive Virtual Reality on Today's Mobile Devices. In Proceedings of the 23rd Annual International Conference on Mobile Computing and Networking (MobiCom '17). ACM, New York, NY, USA, 409-421. https://doi.org/10.1145/3117811.3117815
450
+ [22] Kyungmin Lee, Jason Flinn, T.J. Giuli, Brian Noble, and Christopher Peplin. 2013. AMC: Verifying User Interface Properties for Vehicular Applications. In Proceeding of the 11th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '13). ACM, New York, NY, USA, 1-12. https://doi.org/10.1145/2462456.2464459
451
+ [23] Gordon E Legge, Sing-Hang Cheung, Deyue Yu, Susana TL Chung, Hye-Won Lee, and Daniel P Owens. 2007. The case for the visual span as a sensory bottleneck in reading. Journal of Vision 7, 2 (2007), 9-9.
452
+ [24] Bin Liu, Suman Nath, Ramesh Govindan, and Jie Liu. 2014. DECAF: Detecting and Characterizing Ad Fraud in Mobile Apps. In 11th USENIX Symposium on Networked Systems Design and Implementation (NSDI 14). USENIX Association, Seattle, WA, 57-70. https://www.usenix.org/conference/nsdi14/technical-sessions/presentation/liu_bin
453
+ [25] Aravind Machiry, Rohan Tahiliani, and Mayur Naik. 2013. Dynodroid: An Input Generation System for Android Apps. In Proceedings of the 2013 9th Joint Meeting on Foundations of Software Engineering (ESEC/FSE 2013). ACM, New York, NY, USA, 224-234. https://doi.org/10.1145/2491411.2491450
454
+ [26] Jennifer Mankoff, Holly Fait, and Ray Juang. 2005. Evaluating accessibility by simulating the experiences of users with vision or motor impairments. IBM Systems Journal 44, 3 (2005), 505-517.
455
+
456
+ [27] Chulhong Min, Seungchul Lee, Changhun Lee, Youngki Lee, Seungwoo Kang, Seungpyo Choi, Wonjung Kim, and Junehwa Song. 2016. PADA: Power-aware Development Assistant for Mobile Sensing Applications. In Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp '16). ACM, New York, NY, USA, 946-957. https://doi.org/10.1145/2971648.2971676
457
+ [28] Chulhong Min, Youngki Lee, Chungkuk Yoo, Seungwoo Kang, Sangwon Choi, Pillsoon Park, Inseok Hwang, Younghyun Ju, Seungpyo Choi, and Junehwa Song. 2015. PowerForecaster: Predicting Smartphone Power Impact of Continuous Sensing Applications at Pre-installation Time. In Proceedings of the 13th ACM Conference on Embedded Networked Sensor Systems (SenSys '15). ACM, New York, NY, USA, 31-44. https://doi.org/10.1145/2809695.2809728
458
+ [29] Produkt + Projekt Wolfgang Moll. [n. d.]. Age simulation suit GERT - the GERontic Test suit. ([n. d.]). Retrieved 2018-04-13 from http://www.age-simulation-suit.com/
459
+ [30] Alan Newell and Peter Gregor. 1988. Human computer interaction for people with disabilities. (1988).
460
+ [31] Alan F Newell, Peter Gregor, Maggie Morgan, Graham Pullin, and Catriona Macaulay. 2011. User-sensitive inclusive design. Universal Access in the Information Society 10, 3 (2011), 235-243.
461
+ [32] Nvidia. 2016. GeForce GTX 1080 Specifications. (2016). Retrieved 2018-04-13 from https://www.geforce.com/hardware/Desktop-gpus/geforce-gtx-1080/ specifications
462
+ [33] National Institute on Aging. 2016. World's older population grows dramatically. (28 March 2016). Retrieved 2018-04-13 from https://www.nih.gov/news-events/news-releases/worlds-older-population-grows-dramatically
463
+ [34] DG Pelli, JG Robson, et al. 1988. The design of a new letter chart for measuring contrast sensitivity. In Clinical Vision Sciences. CiteSeer.
464
+ [35] Android Open Source Project. 2017. SurfaceFlinger and HardwareComposer. (March 2017). Retrieved 2018-04-13 from https://source.android.com/devices/ graphics/arch-sf-hwc
465
+ [36] Vaibhav Rastogi, Yan Chen, and William Enck. 2013. AppsPlayground: Automatic Security Analysis of Smartphone Applications. In Proceedings of the Third ACM Conference on Data and Application Security and Privacy (CODASPY '13). ACM, New York, NY, USA, 209-220. https://doi.org/10.1145/2435349.2435379
466
+ [37] Lenin Ravindranath, Suman Nath, Jitendra Padhye, and Hari Balakrishnan. 2014. Automatic and Scalable Fault Detection for Mobile Applications. In Proceedings of the 12th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys '14). ACM, New York, NY, USA, 190-203. https://doi.org/10.1145/2594368.2594377
467
+ [38] IBM Accessibility Research. 2017. IBM Accessibility Checklist 7.0. (18 July 2017). Retrieved 2018-04-13 from http://www-03.ibm.com/able/guidelines/ci162/accessibility_checklist.html
468
+ [39] Justin B. Rousek, Sonja Koneczny, and M. Susan Hallbeck. 2009. Simulating Visual Impairment to Detect Hospital Wayfinding Difficulties. Proceedings of the Human Factors and Ergonomics Society Annual Meeting 53, 8 (Oct. 2009), 531-535.
469
+ [40] Samsung. 2014. Samsung Galaxy S5 Specifications. (2014). Retrieved 2018-04-13 from http://www.samsung.com/uk/smartphones/galaxy-s5-g900f/SM-G900FZKABTU/
470
+ [41] Alvy Ray Smith and James F. Blinn. 1996. Blue Screen Matting. In Proceedings of the 23rd Annual Conference on Computer Graphics and Interactive Techniques (SIGGRAPH '96). ACM, New York, NY, USA, 259-268. https://doi.org/10.1145/237170.237263
471
+ [42] Herman Snellen. 1873. Probebuchstaben zur bestimmung der sehscharfe. Vol. 1. H. Peters.
472
+ [43] G. J. Sullivan, J. R. Ohm, W. J. Han, and T. Wiegand. 2012. Overview of the High Efficiency Video Coding (HEVC) Standard. IEEE Transactions on Circuits and Systems for Video Technology 22, 12 (Dec 2012), 1649-1668. https://doi.org/10.1109/TCSVT.2012.2221191
473
+ [44] Unity Technologies. [n. d.]. Unity. ([n. d.]). Retrieved 2018-04-13 from https://unity3d.com/
474
+ [45] Sam Tregillus and Eelke Folmer. 2016. VR-STEP: Walking-in-Place Using Inertial Sensing for Hands Free Navigation in Mobile VR Environments. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI '16). ACM, New York, NY, USA, 1250-1255. https://doi.org/10.1145/2858036.2858084
475
+ [46] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. 2004. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing 13, 4 (2004), 600-612.
476
+ [47] Fabian Werfel, Roman Wiche, Jochen Feitsch, and Christian Geiger. 2016. Empathizing Audiovisual Sense Impairments: Interactive Real-Time Illustration of Diminished Sense Perception. In Proc. of AH.
477
+ [48] Yu Zhong, Astrid Weber, Casey Burkhardt, Phil Weaver, and Jeffrey P. Bigham. 2015. Enhancing Android Accessibility for Users with Hand Tremor by Reducing Fine Pointing and Steady Tapping. In Proceedings of the 12th Web for All Conference (W4A '15). ACM, New York, NY, USA, Article 29, 10 pages. https://doi.org/10.1145/2745555.2747277
478
+ [49] George J. Zimmerman. 1979. Zimmerman Low Vision Simulation Kit. (1979). Retrieved 2018-04-13 from http://www.lowvisionsimulationkit.com/
data/2025/2503_12xxx/2503.12933/images/061f389c91dff21d715784e13d2bb92b0ce1df9f862751e39e580c598af0aa23.jpg ADDED

Git LFS Details

  • SHA256: fcaba3c6dcbb8de1a57a4a4c117494d27f928f82fa0bce72820e8fa8c4f0feb1
  • Pointer size: 130 Bytes
  • Size of remote file: 28.3 kB
data/2025/2503_12xxx/2503.12933/images/11e5b5ede7aadf764283bae50216204001ff4629a008d7bccfe665359f2a217c.jpg ADDED

Git LFS Details

  • SHA256: c0c5c4462bca831f10dcdf223646541b9da9e663ce5f3404a34fc9a633c4bc6c
  • Pointer size: 129 Bytes
  • Size of remote file: 7.05 kB
data/2025/2503_12xxx/2503.12933/images/1dcb64b45b7edead9f0980680ddcb16e04ceb7b4ff22f89c3ac8e10075db7d63.jpg ADDED

Git LFS Details

  • SHA256: a21e5594a2cb23ed5285f89455716ea650da62248828707ce65816eb01a77a10
  • Pointer size: 130 Bytes
  • Size of remote file: 14.7 kB
data/2025/2503_12xxx/2503.12933/images/2d0c925cd5b5c961e0517affb584f4170fe5ce6aae91f40748c0cd5792f14f77.jpg ADDED

Git LFS Details

  • SHA256: e48190ae57b97579fe5eb64d15acccdfcf8829d4f00105585a21ba0accd16568
  • Pointer size: 130 Bytes
  • Size of remote file: 26 kB
data/2025/2503_12xxx/2503.12933/images/2f232bd4a97c3fdf7f35dadee020a4b313e917af6e86f761c541d5c89d75b5f5.jpg ADDED

Git LFS Details

  • SHA256: 9fc3b2fb0fe686b984d84ef9279b5108ac7cb05c419aa3110698414894fbf183
  • Pointer size: 130 Bytes
  • Size of remote file: 26.6 kB
data/2025/2503_12xxx/2503.12933/images/6410f07cb949eca3b22ee97fdcbbad013a03f757c41dd81b3492a8c8d7258e5a.jpg ADDED

Git LFS Details

  • SHA256: 3586659b387b4418f8b1fae12f5d53b50bd092d455fb4b954c37ef2e64e341ec
  • Pointer size: 130 Bytes
  • Size of remote file: 32.1 kB
data/2025/2503_12xxx/2503.12933/images/7dc207ba8205ccdb334a933ebb68c6cfbb0d9d987fb5854960854ec9b06f67ca.jpg ADDED

Git LFS Details

  • SHA256: 6c4463ab469d5580d7e6da1bf020dc028263a0259da27ad9450f42f814d3fd9a
  • Pointer size: 130 Bytes
  • Size of remote file: 27.6 kB
data/2025/2503_12xxx/2503.12933/images/8638bc94a87df3220c56eaf20424b1d9b0ac79c22a95c4d941e476120acb7b7a.jpg ADDED

Git LFS Details

  • SHA256: 4b125913f12dd0e3e8ff35624572b9319ad189fbecfe0cb8b8265ef4fb3b85ab
  • Pointer size: 130 Bytes
  • Size of remote file: 15.4 kB
data/2025/2503_12xxx/2503.12933/images/afc2aa0c18463b9d417e5b5637078d0b8fe1fa5610f226957679066ce8a41372.jpg ADDED

Git LFS Details

  • SHA256: ebf4a8a028726b0ad3d9f2a6d16ed3d2a967e03d6626b3b9f65c29a876deb202
  • Pointer size: 130 Bytes
  • Size of remote file: 38.6 kB
data/2025/2503_12xxx/2503.12933/images/b377111a3890b5b21d82f0ce87f6713e12d99dbe61914f2f616d4f3b1f803405.jpg ADDED

Git LFS Details

  • SHA256: a244e3df8a6b7ef14d64981964acb1d16e7d4385e900f3508d875d6a9a65f69f
  • Pointer size: 129 Bytes
  • Size of remote file: 7.19 kB
data/2025/2503_12xxx/2503.12933/images/b3e9e489695a4a9946ad9fb6d24d204879d2ca00e03d4c16e01c599e24768049.jpg ADDED

Git LFS Details

  • SHA256: e204a6dd38b6d7243f7a58be68e8ed26e2248ed3ee58817a7f8a61d2f8f9ce62
  • Pointer size: 129 Bytes
  • Size of remote file: 6.2 kB
data/2025/2503_12xxx/2503.12933/images/bd83a371d793f92d12e8cfe908d31d13c777cba3ad2142f6936b839ce6a0da4b.jpg ADDED

Git LFS Details

  • SHA256: f5a732d5f9f432ae265dab358ac9a6e1ce0ae5c79d82b8b2221f1144829414eb
  • Pointer size: 130 Bytes
  • Size of remote file: 82.3 kB
data/2025/2503_12xxx/2503.12933/images/ed586f433128be2511fc2fdefb49a6de472fe84bdb9ff54f955321bebd4e5d7b.jpg ADDED

Git LFS Details

  • SHA256: c69a1457ef315cbcc1435120f6f6f51161092aaa5263215302f63406f3f5473e
  • Pointer size: 130 Bytes
  • Size of remote file: 20.4 kB
data/2025/2503_12xxx/2503.12933/images/f48189648e06342f7107918350098ce5353869f4381a97f5a5c27617bfa2b7bc.jpg ADDED

Git LFS Details

  • SHA256: 005015d4ea0f195c6f77d8fdde439feb602e4e235770eb588f1056a58b6cf565
  • Pointer size: 129 Bytes
  • Size of remote file: 9.78 kB
data/2025/2503_12xxx/2503.12933/images/f4fadf609d2c2cfe536a1d5dee2467428b592246da75a7f0e69d720eb5161084.jpg ADDED

Git LFS Details

  • SHA256: afff8327c795d8f885c6cb3ff65720a8d7c4f18e8961f21a2c2975da39ab3094
  • Pointer size: 130 Bytes
  • Size of remote file: 14.6 kB
data/2025/2503_12xxx/2503.12933/images/f63e1c9d8dc06359caf5a4b12930ff0b0d5eaf774c06aa36bff6f66e42c719c4.jpg ADDED

Git LFS Details

  • SHA256: e8828df63ef42a35458766e87be9444438ea54f1a9743f57967518af8883b80d
  • Pointer size: 130 Bytes
  • Size of remote file: 34.6 kB
data/2025/2503_12xxx/2503.12933/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_content_list.json ADDED
@@ -0,0 +1,1741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "R1-VL: Learning to Reason with Multimodal Large Language Models via Step-wise Group Relative Policy Optimization",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 127,
8
+ 128,
9
+ 870,
10
+ 176
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Jingyi Zhang Jiaxing Huang Huanjin Yao Shunyu Liu Xikun Zhang Shijian Lu Dacheng Tao Nanyang Technological University, Singapore",
17
+ "bbox": [
18
+ 99,
19
+ 210,
20
+ 895,
21
+ 250
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract",
28
+ "text_level": 1,
29
+ "bbox": [
30
+ 248,
31
+ 282,
32
+ 326,
33
+ 299
34
+ ],
35
+ "page_idx": 0
36
+ },
37
+ {
38
+ "type": "text",
39
+ "text": "Recent studies generally enhance MLLMs' reasoning capabilities via supervised fine-tuning on high-quality chain-of-thought reasoning data, which often leads models to merely imitate successful reasoning paths without understanding what the wrong reasoning paths are. In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. To this end, we design Step-wise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding. Specifically, StepGRPO introduces two novel rule-based reasoning rewards: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards the reasoning paths that contain necessary intermediate reasoning steps via a soft key-step matching technique, while StepRAR rewards reasoning paths that follow a well-structured and logically consistent reasoning process through a reasoning completeness and logic evaluation strategy. With the proposed StepGRPO, we introduce R1-VL, a series of MLLMs with outstanding capabilities in step-by-step reasoning. Extensive experiments over 8 benchmarks demonstrate the superiority of our methods. Code is available at link.",
40
+ "bbox": [
41
+ 89,
42
+ 316,
43
+ 483,
44
+ 680
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "1. Introduction",
51
+ "text_level": 1,
52
+ "bbox": [
53
+ 91,
54
+ 726,
55
+ 220,
56
+ 742
57
+ ],
58
+ "page_idx": 0
59
+ },
60
+ {
61
+ "type": "text",
62
+ "text": "Multimodal large language models (MLLMs) have achieved significant progress in vision-language understanding [1, 8, 15, 18, 21, 38, 43, 51]. Recent efforts generally enhance MLLMs' reasoning capabilities by employing supervised fine-tuning (SFT) on high-quality chain-of-thought (CoT) reasoning data generated by powerful models (e.g., GPT4) [37, 44, 46, 55]. For example, Mulberry [46] introduces CoMCTS, which utilizes multiple",
63
+ "bbox": [
64
+ 89,
65
+ 752,
66
+ 482,
67
+ 875
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "image",
73
+ "img_path": "images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg",
74
+ "image_caption": [
75
+ "Figure 1. For MLLMs, online reinforcement learning with outcome-level reward, like in Deepseek-R1's GRPO [34], often suffers from sparse reward issues, where only a few reasoning paths can receive positive/high rewards during training, ultimately leading to poor exploration efficiency and unstable learning process. To tackle this, we propose a novel online reinforcement learning framework that incorporates step-wise reasoning rewards in addition to outcome-level rewards, encouraging MLLMs to iteratively refine their reasoning with dense rewards and resulting in a more stable training process and improved reasoning capability. The experiments are conducted on Qwen2-VL-7b over MathVista."
76
+ ],
77
+ "image_footnote": [],
78
+ "bbox": [
79
+ 540,
80
+ 282,
81
+ 883,
82
+ 452
83
+ ],
84
+ "page_idx": 0
85
+ },
86
+ {
87
+ "type": "text",
88
+ "text": "models to collectively search and identify effective reasoning paths, followed by SFT on the collected reasoning data. However, SFT approaches focus solely on positive reasoning paths (i.e., those leading to correct answers), while the negative reasoning paths are largely neglected. This limitation may cause the model to merely imitate successful reasoning paths without understanding what the flawed and wrong reasoning paths are.",
89
+ "bbox": [
90
+ 511,
91
+ 652,
92
+ 906,
93
+ 776
94
+ ],
95
+ "page_idx": 0
96
+ },
97
+ {
98
+ "type": "text",
99
+ "text": "In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. Recent advancements in NLP, such as Deepseek-R1 [13] and Kimi-K1.5 [36], have shown great potential in incentivizing the reasoning capability of LLMs via actively selfexploring. The core design of these advances (e.g., GRPO in Deepseek-R1) lies in online reinforcement learning without the need for reward models, which encourages an LLM",
100
+ "bbox": [
101
+ 511,
102
+ 779,
103
+ 908,
104
+ 902
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "aside_text",
110
+ "text": "arXiv:2503.12937v2 [cs.AI] 4 Aug 2025",
111
+ "bbox": [
112
+ 22,
113
+ 284,
114
+ 60,
115
+ 710
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "page_footnote",
121
+ "text": "Correspondence to: Jiaxing Huang {jiaxing.huang@ntu.edu.sg}.",
122
+ "bbox": [
123
+ 112,
124
+ 886,
125
+ 457,
126
+ 901
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "to generate a group of reasoning paths and iteratively refine its reasoning process by rewarding the generated reasoning paths based on a rule-based reward function. Typically, an outcome-level reward strategy is used: reasoning paths leading to correct answers receive higher rewards, while those leading to incorrect answers receive lower ones.",
133
+ "bbox": [
134
+ 89,
135
+ 90,
136
+ 480,
137
+ 181
138
+ ],
139
+ "page_idx": 1
140
+ },
141
+ {
142
+ "type": "text",
143
+ "text": "An intuitive idea is to directly apply these simple and effective LLM online reinforcement learning methods for MLLMs. However, relying solely on outcome-level rewards, like in Deepseek-R1's GRPO, often suffers from sparse reward issues on MLLM reasoning learning, resulting in suboptimal performance. Specifically, most MLLMs, especially smaller ones, exhibit very limited capability in long-chain reasoning accuracy and validity, whereas only a few MLLM-generated reasoning paths can receive positive/high rewards. This lack of positive reward signals reduces exploration efficiency and leads to an unstable learning process, as illustrated in Fig. 1.",
144
+ "bbox": [
145
+ 89,
146
+ 184,
147
+ 480,
148
+ 364
149
+ ],
150
+ "page_idx": 1
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "We propose to tackle this sparse reward issue by introducing dense step-wise reasoning rewards in addition to sparse outcome-level rewards. To this end, we design Stepwise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding while using no additional process reward models. Specifically, StepGRPO introduces two novel rule-based reasoning reward mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR).",
155
+ "bbox": [
156
+ 89,
157
+ 367,
158
+ 480,
159
+ 532
160
+ ],
161
+ "page_idx": 1
162
+ },
163
+ {
164
+ "type": "text",
165
+ "text": "StepRAR rewards the reasoning path using a soft key-step matching technique that evaluates whether the reasoning path contains key intermediate reasoning steps (i.e., the necessary steps to reach the correct final solution). StepRVR rewards the reasoning path based on a reasoning completeness and logic evaluation method, which assesses whether the reasoning process is well-structured and logically consistent. In this way, StepRAR and StepRVR help mitigate the sparse reward issue by providing informative rewards, even when the reasoning path does not produce the correct final answer – as long as it includes key intermediate reasoning steps or follows a structured and logical reasoning process. With StepRAR and StepRVR, StepGRPO takes the average step-wise reasoning rewards of a group of sampled reasoning paths as a baseline to estimate the advantage for policy optimization. Using the proposed StepGRPO, we develop R1-VL, a series of MLLMs with R1-like step-by-step reasoning capabilities.",
166
+ "bbox": [
167
+ 89,
168
+ 535,
169
+ 482,
170
+ 806
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "The proposed StepGRPO offers two key advantages. 1) Effectiveness. StepGRPO introduces two step-wise reasoning reward mechanisms with group relative optimization, which provide rich and fine-grained step-wise reasoning rewards along the whole reasoning trajectory beyond the final answer. This mitigates the sparse reward issue and encour",
177
+ "bbox": [
178
+ 89,
179
+ 810,
180
+ 480,
181
+ 900
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "text",
187
+ "text": "ages more structured, logically consistent reasoning trajectories. 2) Efficiency. StepGRPO achieves step-wise reasoning rewarding in a rule-based manner, which provides step-wise reasoning rewards while eliminating the need of process reward models. This significantly reduces computational overhead while maintaining fine-grained step-wise supervisions.",
188
+ "bbox": [
189
+ 511,
190
+ 90,
191
+ 903,
192
+ 196
193
+ ],
194
+ "page_idx": 1
195
+ },
196
+ {
197
+ "type": "text",
198
+ "text": "The main contributions of this work are threefold. First, we propose StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via a simple, effective and dense step-wise rewarding. Second, we design two novel rule-based reasoning reward mechanisms, i.e., step-wise reasoning accuracy reward and step-wise reasoning validity reward, which effectively mitigate the sparse reward issue for MLLMs without the need of process reward models. Third, with the proposed StepGRPO, we develop R1-VL, a series MLLMs that have superior reasoning capabilities. Forth, extensive experiments over multiple benchmarks show that R1-VL achieves superior performance compared with state-of-the-art MLLMs.",
199
+ "bbox": [
200
+ 511,
201
+ 198,
202
+ 903,
203
+ 407
204
+ ],
205
+ "page_idx": 1
206
+ },
207
+ {
208
+ "type": "text",
209
+ "text": "2. Related Work",
210
+ "text_level": 1,
211
+ "bbox": [
212
+ 511,
213
+ 424,
214
+ 653,
215
+ 440
216
+ ],
217
+ "page_idx": 1
218
+ },
219
+ {
220
+ "type": "text",
221
+ "text": "2.1. Multimodal Large Language Model",
222
+ "text_level": 1,
223
+ "bbox": [
224
+ 511,
225
+ 450,
226
+ 823,
227
+ 465
228
+ ],
229
+ "page_idx": 1
230
+ },
231
+ {
232
+ "type": "text",
233
+ "text": "Multimodal Large Language Models (MLLMs) [1, 8, 15, 18, 21, 38, 43, 51, 52] have shown remarkable advancements across a wide range of vision-language understanding tasks, demonstrating their capabilities in comprehending and analyzing visual contents across various application domains. Early research on MLLMs primarily focuses on text generation based on text prompts and input multiple modalities such as images [20, 21, 53], videos [9, 35]. Recent advancements further enhance the capabilities of MLLMs from various aspects. For example, recent models [25, 42] incorporate multimodal inputs and outputs such as video, audio, and point cloud inputs beyond text and images. In addition, some efforts attempt to adapt MLLMs for domain-specific tasks, such as medical image understanding [17, 19, 56] and document analysis [22, 49]. In this work, we focus on enhancing the reasoning ability of MLLMs in tackling complex reasoning tasks and introduce R1-VL, a series of MLLMs that have superior reasoning capability.",
234
+ "bbox": [
235
+ 511,
236
+ 473,
237
+ 903,
238
+ 760
239
+ ],
240
+ "page_idx": 1
241
+ },
242
+ {
243
+ "type": "text",
244
+ "text": "2.2. MLLM Reasoning",
245
+ "text_level": 1,
246
+ "bbox": [
247
+ 511,
248
+ 771,
249
+ 692,
250
+ 787
251
+ ],
252
+ "page_idx": 1
253
+ },
254
+ {
255
+ "type": "text",
256
+ "text": "Inspired by the advances in NLP that show great potential in learning to reason and tackling complex language tasks [29], recent studies attempt to enhance the reasoning capability of MLLM. Generally, current MLLM reasoning methods improve the reasoning capability of MLLM by generating high-quality chain-of-thoughts (CoT) data using powerful model (e.g., GPT-4) and performing supervised",
257
+ "bbox": [
258
+ 511,
259
+ 794,
260
+ 903,
261
+ 902
262
+ ],
263
+ "page_idx": 1
264
+ },
265
+ {
266
+ "type": "text",
267
+ "text": "fine-tuning with the collected data [10, 37, 44, 46, 55]. For example, Mulberry [46] introduces Collective Monte Carlo Tree Search (MCTS) into MLLM and proposes CoMCTS which leverages complementary knowledge from multiple models to collaboratively search and identify effective reasoning paths. In addition, recent works [14, 27, 30, 47] attempt to explore online reinforcement learning to improve the MLLMs' reasoning ability. Different from these works, we design StepGRPO that enables MLLM to self-improve the reasoning ability with step-wise reward signals.",
268
+ "bbox": [
269
+ 89,
270
+ 90,
271
+ 480,
272
+ 242
273
+ ],
274
+ "page_idx": 2
275
+ },
276
+ {
277
+ "type": "text",
278
+ "text": "2.3. Reinforcement Learning",
279
+ "text_level": 1,
280
+ "bbox": [
281
+ 89,
282
+ 251,
283
+ 316,
284
+ 267
285
+ ],
286
+ "page_idx": 2
287
+ },
288
+ {
289
+ "type": "text",
290
+ "text": "Reinforcement Learning (RL) [16] is a fundamental approach in machine learning, where an agent learns to interact with an environment by taking actions, receiving rewards, and updating its policy to maximize the long-term return. With the rise of large language models (LLMs) [4, 28, 31], Reinforcement Learning with Human Feedback (RLHF) [3] has emerged as a key technique for fine-tuning models using human preference data. RLHF leverages algorithms like Proximal Policy Optimization (PPO) [33] and Direct Preference Optimization (DPO) [32] to guide model behavior for improving the alignment, coherence and helpfulness in response generation.",
291
+ "bbox": [
292
+ 89,
293
+ 272,
294
+ 482,
295
+ 454
296
+ ],
297
+ "page_idx": 2
298
+ },
299
+ {
300
+ "type": "text",
301
+ "text": "Recently, RL is increasingly adopted to enhance LLMs' reasoning capabilities [5, 7, 13, 24, 36, 50], especially for mathematical problem solving. The core is to adopt an appropriate reward function or model that evaluates and reinforces high-quality reasoning paths while penalizing low-quality ones, guiding the model's optimization towards more structured and coherent reasoning trajectories using the RL algorithm. For example, ReST-MCTS* [50] trains a process reward model (PRM) for determining the correctness of each reasoning step within reasoning paths. Recent methods have found that using a simple outcome-level rule-based reward function (i.e., the reasoning trajectories leading to correct answer are rewarded with higher score) can already provide an effective and reliable reward signal during the RL process [13, 24, 36]. For example, DeepSeek-R1 [13] demonstrates that group relative policy optimization (GRPO) [34] with outcome-level reward effectively enhances the reasoning capability of LLMs. In this work, we aim for improving the reasoning capability of MLLMs through reinforcement learning and propose StepGRPO, which effectively tackles the sparse reward issue in MLLMs, leading to stable training process and better reasoning capability.",
302
+ "bbox": [
303
+ 89,
304
+ 454,
305
+ 482,
306
+ 801
307
+ ],
308
+ "page_idx": 2
309
+ },
310
+ {
311
+ "type": "text",
312
+ "text": "3. Method",
313
+ "text_level": 1,
314
+ "bbox": [
315
+ 89,
316
+ 814,
317
+ 181,
318
+ 830
319
+ ],
320
+ "page_idx": 2
321
+ },
322
+ {
323
+ "type": "text",
324
+ "text": "This section first presents the task formulation, and then introduces the proposed Step-wise Group Relative Policy Optimization (StepGRPO). More details to be elaborated in the ensuing subsections.",
325
+ "bbox": [
326
+ 89,
327
+ 839,
328
+ 482,
329
+ 901
330
+ ],
331
+ "page_idx": 2
332
+ },
333
+ {
334
+ "type": "text",
335
+ "text": "3.1. Task Formulation",
336
+ "text_level": 1,
337
+ "bbox": [
338
+ 513,
339
+ 90,
340
+ 687,
341
+ 104
342
+ ],
343
+ "page_idx": 2
344
+ },
345
+ {
346
+ "type": "text",
347
+ "text": "In this paper, we consider a pre-trained MLLM and denote it as a policy model $\\pi_{\\theta}$ . Given a multimodal question $Q$ consisting of an image and a textual task instruction, i.e., $Q = \\{\\text{text}, \\text{image}\\}$ , the policy model $\\pi$ generates response $\\mathbf{c}$ with a step-by-step reasoning trajectory. Generally, this process can be formulated as a sequence of next token prediction actions, i.e., $\\mathbf{c} = (a_1, a_2, \\dots, a_t, \\dots, a_T)$ , where each action $a_t$ is sampled from the policy model $\\pi_{\\theta}$ and $T$ represents the maximum sequence length. After each action, the new state $s_{t+1}$ is determined by updating the current state $s_t$ with the newly generated action $a_t$ , i.e., $s_{t+1} = (s_t, a_t)$ , $1 \\leq t \\leq T$ .",
348
+ "bbox": [
349
+ 511,
350
+ 112,
351
+ 903,
352
+ 294
353
+ ],
354
+ "page_idx": 2
355
+ },
356
+ {
357
+ "type": "text",
358
+ "text": "Considering this formulation, the objective of our task is to optimize the policy model $\\pi_{\\theta}$ such that it can select better actions based on the previous states, thereby improving reasoning quality. In the context of reinforcement learning (RL), the policy model is generally optimized by maximizing the cumulative reward, where the reward for taking action $a_{t}$ at state $s_t$ is denoted as $r(s_t,a_t,s_{t + 1})$ . Following prior studies [46], we define an action in this paper as generating a reasoning step, which consists of one or more sentences containing multiple word tokens.",
359
+ "bbox": [
360
+ 511,
361
+ 294,
362
+ 905,
363
+ 445
364
+ ],
365
+ "page_idx": 2
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "3.2. Step-wise Group Relative Policy Optimization",
370
+ "text_level": 1,
371
+ "bbox": [
372
+ 511,
373
+ 453,
374
+ 901,
375
+ 469
376
+ ],
377
+ "page_idx": 2
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "We propose Step-wise Group Relative Policy Optimization (StepGRPO), a novel online reinforcement fine-tuning framework that mitigates the sparse reward issue for MLLMs and encourages self-improvement in reasoning ability through simple, effective and dense step-wise reward mechanisms. As illustrated in Fig. 2, StepGRPO consists of two phases: (1) a policy warm-up phase and (2) a step-wise online policy optimization phase. The overall algorithm is shown in Algorithm 1.",
382
+ "bbox": [
383
+ 511,
384
+ 474,
385
+ 905,
386
+ 611
387
+ ],
388
+ "page_idx": 2
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "3.2.1. Policy Warm-up",
393
+ "text_level": 1,
394
+ "bbox": [
395
+ 511,
396
+ 619,
397
+ 671,
398
+ 633
399
+ ],
400
+ "page_idx": 2
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "This phase equips the policy model with fundamental reasoning capabilities, ensuring it can generate proper stepwise reasoning paths before reinforcement learning. During the warm-up phase, the policy model is fine-tuned using a multimodal dataset $D_{s}$ with Chain-of-Thought (CoT) reasoning path, where each data consists of a multimodal question $Q$ and a step-by-step reasoning path $\\tau$ , i.e., $D_{s} = \\{Q^{n}, \\tau^{n}\\}_{n=1}^{N}$ :",
405
+ "bbox": [
406
+ 511,
407
+ 638,
408
+ 903,
409
+ 760
410
+ ],
411
+ "page_idx": 2
412
+ },
413
+ {
414
+ "type": "equation",
415
+ "text": "\n$$\n\\mathcal {L} _ {\\text {w a r m - u p}} = - \\mathbb {E} _ {\\tau \\sim D _ {s}} [ \\sum_ {t = 1} ^ {T} \\log (\\pi_ {\\theta} (a _ {t} | s _ {t})) ]. \\tag {1}\n$$\n",
416
+ "text_format": "latex",
417
+ "bbox": [
418
+ 562,
419
+ 768,
420
+ 903,
421
+ 811
422
+ ],
423
+ "page_idx": 2
424
+ },
425
+ {
426
+ "type": "text",
427
+ "text": "3.2.2. Step-wise Online Policy Optimization",
428
+ "text_level": 1,
429
+ "bbox": [
430
+ 511,
431
+ 821,
432
+ 818,
433
+ 835
434
+ ],
435
+ "page_idx": 2
436
+ },
437
+ {
438
+ "type": "text",
439
+ "text": "This phase enables MLLMs to self-improve their reasoning ability via online reinforcement learning, mitigating the sparse reward issue through step-wise reasoning rewards. As illustrated in Fig. 2, for each question $Q \\in D_{s}$ ,",
440
+ "bbox": [
441
+ 511,
442
+ 839,
443
+ 903,
444
+ 901
445
+ ],
446
+ "page_idx": 2
447
+ },
448
+ {
449
+ "type": "image",
450
+ "img_path": "images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg",
451
+ "image_caption": [
452
+ "Question: In the given diagram, triangle ABC has AD as its median and point E is the midpoint of AD. If the area of triangle ABC is 12, what is the area of triangle ABE?"
453
+ ],
454
+ "image_footnote": [],
455
+ "bbox": [
456
+ 99,
457
+ 88,
458
+ 205,
459
+ 146
460
+ ],
461
+ "page_idx": 3
462
+ },
463
+ {
464
+ "type": "image",
465
+ "img_path": "images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg",
466
+ "image_caption": [
467
+ "Answer: Step 1: Since AD is a median, it divides triangle ABC into two equal areas: ABD and ACD. Step 2: Segment AE is half of AD, splitting triangle ABD into two triangles of equal area: ABE and BED. Step 3: The area of triangle ABD is half of triangle ABC, which is $\\frac{\\text{frac}}{12} \\left\\{ \\begin{array}{l} 2 \\end{array} \\right\\} = 6$ . Step 4: Since E is the midpoint of AD, triangle ABE is half of triangle ABD. Therefore, the area of triangle ABE is $\\frac{\\text{frac}}{6} \\left\\{ \\begin{array}{l} 2 \\end{array} \\right\\} = 3$ . The final answer is 3.",
468
+ "(a) Step-wise Reasoning Accuracy Reward",
469
+ "Figure 2. Overview of the proposed StepGRPO. StepGRPO consists of two phases: a policy warm-up phase and a step-wise online policy optimization phase. After the warm-up, the policy model $\\pi_{\\theta}$ generates a group of reasoning paths $\\{\\mathbf{c}^i\\}_{i=1}^M$ and assigns step-wise rewards using two proposed mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards reasoning paths that contain key intermediate steps, identified using a soft key-step matching technique. StepRVR rewards reasoning paths based on completeness and logical consistency, ensuring they are well-structured. StepGRPO then estimates the advantage $\\hat{A}$ for policy optimization by using the average step-wise reasoning reward of a group of sampled reasoning paths as a baseline. Examples for StepRAR and StepRVR are illustrated in (a) and (b), respectively."
470
+ ],
471
+ "image_footnote": [],
472
+ "bbox": [
473
+ 99,
474
+ 152,
475
+ 893,
476
+ 308
477
+ ],
478
+ "page_idx": 3
479
+ },
480
+ {
481
+ "type": "text",
482
+ "text": "Pre-extracted key steps with Augmentations:",
483
+ "text_level": 1,
484
+ "bbox": [
485
+ 106,
486
+ 330,
487
+ 259,
488
+ 354
489
+ ],
490
+ "page_idx": 3
491
+ },
492
+ {
493
+ "type": "list",
494
+ "sub_type": "text",
495
+ "list_items": [
496
+ "1. AD is a median; median is $AD$",
497
+ "2. equal area; ...",
498
+ "3. AE is half of AD; $AE = 1 / 2AD$",
499
+ "4. frac{12}{2} {2} = 6; $\\underline{12 / 2} = 6,\\dots$",
500
+ "5. E is the midpoint; ..",
501
+ "6. frac{6}{2} = 3; 6/2 = 3. ..."
502
+ ],
503
+ "bbox": [
504
+ 104,
505
+ 357,
506
+ 259,
507
+ 420
508
+ ],
509
+ "page_idx": 3
510
+ },
511
+ {
512
+ "type": "text",
513
+ "text": "Soft key-step matching :",
514
+ "text_level": 1,
515
+ "bbox": [
516
+ 274,
517
+ 330,
518
+ 405,
519
+ 342
520
+ ],
521
+ "page_idx": 3
522
+ },
523
+ {
524
+ "type": "text",
525
+ "text": "Description: The image shows ...; #Rationale: The question asks for the area...; #Step1: ... we find AD is a median of ...; #Step2: ... AE splits triangle ABD ...; #Step3: ... The area of triangle ABD is $12/2 = 6$ , ..., and the area of triangle ABE is frac{6}{2} = 3. #The final answer is: 3. Step-wise Matching score: 3/6",
526
+ "bbox": [
527
+ 274,
528
+ 342,
529
+ 519,
530
+ 422
531
+ ],
532
+ "page_idx": 3
533
+ },
534
+ {
535
+ "type": "text",
536
+ "text": "(b) Step-wise Reasoning Validity Reward",
537
+ "text_level": 1,
538
+ "bbox": [
539
+ 527,
540
+ 314,
541
+ 746,
542
+ 325
543
+ ],
544
+ "page_idx": 3
545
+ },
546
+ {
547
+ "type": "text",
548
+ "text": "Description $\\rightarrow$ #Rationale $\\rightarrow$ # Step1 $\\rightarrow$ ... $\\rightarrow$ #Step $N\\rightarrow$ #Answer.",
549
+ "bbox": [
550
+ 532,
551
+ 330,
552
+ 885,
553
+ 343
554
+ ],
555
+ "page_idx": 3
556
+ },
557
+ {
558
+ "type": "text",
559
+ "text": "i. Reasoning completeness",
560
+ "bbox": [
561
+ 532,
562
+ 345,
563
+ 668,
564
+ 356
565
+ ],
566
+ "page_idx": 3
567
+ },
568
+ {
569
+ "type": "text",
570
+ "text": "Description $\\rightarrow$ #Rationale $\\rightarrow$ #Answer. Missing reasoning steps",
571
+ "bbox": [
572
+ 532,
573
+ 357,
574
+ 877,
575
+ 369
576
+ ],
577
+ "page_idx": 3
578
+ },
579
+ {
580
+ "type": "text",
581
+ "text": "Description $\\rightarrow$ # Step1 $\\rightarrow$ ... $\\rightarrow$ #Step $N\\rightarrow$ #Answer. Missing rationale",
582
+ "bbox": [
583
+ 532,
584
+ 369,
585
+ 895,
586
+ 383
587
+ ],
588
+ "page_idx": 3
589
+ },
590
+ {
591
+ "type": "text",
592
+ "text": "ii. Reasoning logic",
593
+ "bbox": [
594
+ 532,
595
+ 386,
596
+ 629,
597
+ 397
598
+ ],
599
+ "page_idx": 3
600
+ },
601
+ {
602
+ "type": "text",
603
+ "text": "Description $\\rightarrow$ #Rationale $\\rightarrow$ #Answer $\\rightarrow$ #Step1... $\\rightarrow$ #StepN. X \n#Description $\\rightarrow$ #Step3 $\\rightarrow$ #Rationale $\\rightarrow$ ... $\\rightarrow$ #Step I $\\rightarrow$ #Answer X",
604
+ "bbox": [
605
+ 532,
606
+ 397,
607
+ 885,
608
+ 422
609
+ ],
610
+ "page_idx": 3
611
+ },
612
+ {
613
+ "type": "text",
614
+ "text": "the policy model $\\pi_{\\theta}$ first generates a group of $M$ reasoning trajectories via multiple rollouts, i.e., $\\{\\mathbf{c}^i\\}_{i=1}^M$ , where $\\mathbf{c}^i = (a_1^i, a_2^i, \\ldots, a_t^i, \\ldots, a_T^i)$ . After obtaining a group of $M$ reasoning trajectories, we employ our proposed step-wise reasoning rewards to evaluate and reward each generated reasoning trajectory. Specifically, we introduce two types of rule-based step-wise rewards, i.e., step-wise reasoning accuracy (StepRAR) reward and step-wise reasoning validity reward (StepRVR).",
615
+ "bbox": [
616
+ 88,
617
+ 565,
618
+ 482,
619
+ 700
620
+ ],
621
+ "page_idx": 3
622
+ },
623
+ {
624
+ "type": "text",
625
+ "text": "Step-wise reasoning accuracy reward (StepRAR) reduces the effect of learning from sparse reward by additionally rewarding reasoning paths that contain correct intermediate reasoning steps contributing to the final solution. Specifically, for each question $Q$ , we pre-extract a set of key reasoning steps $\\mathbf{v} = \\{v_{1}, v_{2}, \\ldots\\}$ from the corresponding reasoning path $\\tau$ in dataset $D_{s}$ . We define key steps as the essential variables and equations that directly contribute to the final solution, and prompt GPT-4 to extract several key steps from the reasoning path for each question. To ensure efficient reward assignment, we refine the extracted steps by removing redundant content and retaining only the core few words necessary for reasoning. Furthermore, we",
626
+ "bbox": [
627
+ 88,
628
+ 704,
629
+ 482,
630
+ 900
631
+ ],
632
+ "page_idx": 3
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "augment each extracted key step into multiple equivalent formats to allow more flexible and accurate matching, preventing missed matches due to math-related formatting differences. For example, a mathematical expression such as \" $\\frac{6}{3} = 2$ \" is augmented to \"6/3 = 2\" or \"6 divided by 3 equals 2\".",
637
+ "bbox": [
638
+ 511,
639
+ 565,
640
+ 906,
641
+ 655
642
+ ],
643
+ "page_idx": 3
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "With the extracted key reasoning steps $\\mathbf{v} = \\{v_{1}, v_{2}, \\ldots\\}$ and such soft marching mechanism, we calculate a match score for each generated reasoning path based on the ratio of matched key steps, i.e., $k^{i} = |\\mathbf{v}_{\\text{match}}| / |\\mathbf{v}|$ . Then, StepRAR for $1 \\leq t \\leq T$ is defined as:",
648
+ "bbox": [
649
+ 511,
650
+ 655,
651
+ 906,
652
+ 731
653
+ ],
654
+ "page_idx": 3
655
+ },
656
+ {
657
+ "type": "equation",
658
+ "text": "\n$$\nr _ {a u c} ^ {i} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\left\\{ \\begin{array}{l l} 1 + \\alpha k ^ {i}, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) = y, \\\\ \\alpha k ^ {i}, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) \\neq \\text {n u l l}, \\neq y, \\\\ 0, & \\operatorname {a n s} \\left(s _ {t + 1}\\right) = \\text {n u l l}, \\end{array} \\right. \\tag {2}\n$$\n",
659
+ "text_format": "latex",
660
+ "bbox": [
661
+ 517,
662
+ 739,
663
+ 903,
664
+ 809
665
+ ],
666
+ "page_idx": 3
667
+ },
668
+ {
669
+ "type": "text",
670
+ "text": "where $y$ is the ground-truth answer extracted from CoT reasoning path.",
671
+ "bbox": [
672
+ 511,
673
+ 810,
674
+ 903,
675
+ 840
676
+ ],
677
+ "page_idx": 3
678
+ },
679
+ {
680
+ "type": "text",
681
+ "text": "By leveraging pre-extracted key reasoning steps, StepRAR efficiently provides additional supervision with a simple soft matching mechanism, ensuring the model learns meaningful reasoning processes instead of guessing",
682
+ "bbox": [
683
+ 511,
684
+ 840,
685
+ 905,
686
+ 901
687
+ ],
688
+ "page_idx": 3
689
+ },
690
+ {
691
+ "type": "text",
692
+ "text": "answers randomly.",
693
+ "bbox": [
694
+ 89,
695
+ 90,
696
+ 217,
697
+ 104
698
+ ],
699
+ "page_idx": 4
700
+ },
701
+ {
702
+ "type": "text",
703
+ "text": "Step-wise reasoning validity reward (StepRVR) aims for ensuring the generated paths adhere to a logically structured and coherent progression beyond the reasoning accuracy. Prior studies [44, 46] have demonstrated structural reasoning, such as problem decomposition and progressive reasoning, facilitates more accurate and interpretable reasoning processes, as they encourage models to break down complex problems into multiple intermediate steps rather than direct answer generation.",
704
+ "bbox": [
705
+ 89,
706
+ 106,
707
+ 483,
708
+ 242
709
+ ],
710
+ "page_idx": 4
711
+ },
712
+ {
713
+ "type": "text",
714
+ "text": "Inspired by these findings, we incorporate step-wise reasoning validity to reinforce well-organized reasoning paths that follow an expected logical flow. Specifically, we define StepRVR using two key criteria: reasoning completeness $\\delta^c$ and reasoning logic $\\delta^l$ . Reasoning completeness requires the response to include three essential components, i.e., a background analysis involving image description and rationale analysis to establish context, a step-by-step reasoning process and a final answer. In addition to the reasoning completeness, reasoning logic ensures the reasoning path to follow a logical progression, where the background analysis must come before solution steps and the final answer should only appear after reasoning steps are complete.",
715
+ "bbox": [
716
+ 89,
717
+ 244,
718
+ 483,
719
+ 440
720
+ ],
721
+ "page_idx": 4
722
+ },
723
+ {
724
+ "type": "text",
725
+ "text": "With these two criteria, we define StepRVR as",
726
+ "bbox": [
727
+ 109,
728
+ 441,
729
+ 418,
730
+ 455
731
+ ],
732
+ "page_idx": 4
733
+ },
734
+ {
735
+ "type": "equation",
736
+ "text": "\n$$\nr _ {v a l} ^ {i} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\left\\{ \\begin{array}{l l} 1, & \\mathbb {I} \\left(\\delta^ {c} \\left(s _ {t + 1}\\right)\\right) \\cdot \\mathbb {I} \\left(\\delta^ {l} \\left(s _ {t + 1}\\right)\\right) = 1, \\\\ 0, & \\text {o t h e r w i s e ,} \\end{array} \\right. \\tag {3}\n$$\n",
737
+ "text_format": "latex",
738
+ "bbox": [
739
+ 96,
740
+ 469,
741
+ 483,
742
+ 521
743
+ ],
744
+ "page_idx": 4
745
+ },
746
+ {
747
+ "type": "text",
748
+ "text": "where the reasoning trajectory is rewarded only if it satisfies both completeness and logical coherence. By enforcing this, StepRVR helps the model produce structured, interpretable and logically sound reasoning trajectories, enhancing both the quality and reliability of generated responses.",
749
+ "bbox": [
750
+ 89,
751
+ 522,
752
+ 483,
753
+ 598
754
+ ],
755
+ "page_idx": 4
756
+ },
757
+ {
758
+ "type": "text",
759
+ "text": "Optimization with the step-wise rewards. After obtaining two types of step-wise rewards, we compute the overall reward for each reasoning path as $r^i = r_{auc}^i + r_{val}^i$ , and repeatedly compute the rewards for all generated reasoning paths, i.e., $\\{r^1, r^2, \\dots, r^M\\}$ .",
760
+ "bbox": [
761
+ 89,
762
+ 599,
763
+ 483,
764
+ 675
765
+ ],
766
+ "page_idx": 4
767
+ },
768
+ {
769
+ "type": "text",
770
+ "text": "To estimate the advantage of each reasoning trajectory, we normalize its reward relative to the group as follow:",
771
+ "bbox": [
772
+ 89,
773
+ 676,
774
+ 482,
775
+ 705
776
+ ],
777
+ "page_idx": 4
778
+ },
779
+ {
780
+ "type": "equation",
781
+ "text": "\n$$\n\\hat {A} ^ {i} = \\frac {r ^ {i} - \\operatorname {m e a n} \\left(\\left\\{r ^ {1} , r ^ {2} , \\dots , r ^ {M} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r ^ {1} , r ^ {2} , \\dots , r ^ {M} \\right\\}\\right)}, \\tag {4}\n$$\n",
782
+ "text_format": "latex",
783
+ "bbox": [
784
+ 166,
785
+ 717,
786
+ 482,
787
+ 753
788
+ ],
789
+ "page_idx": 4
790
+ },
791
+ {
792
+ "type": "text",
793
+ "text": "where the mean group reward serves as the baseline, and $\\hat{A}_i$ measures how much better or worse $r_i$ is compared to other reasoning trajectories within the group. Following this, we optimize the policy model with the loss defined as:",
794
+ "bbox": [
795
+ 89,
796
+ 767,
797
+ 483,
798
+ 829
799
+ ],
800
+ "page_idx": 4
801
+ },
802
+ {
803
+ "type": "equation",
804
+ "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\text {S t e p R L}} = - \\underset {Q \\in D _ {s}} {\\mathbb {E}} \\left[ \\frac {1}{M} \\sum_ {i = 1} ^ {M} \\left(\\frac {\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\left[ \\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right) \\right] _ {\\text {n o g r a d}}} \\hat {A} ^ {i} \\right. \\right. \\tag {5} \\\\ - \\beta D _ {K L} \\left(\\pi_ {\\theta} | | \\pi_ {r e f}\\right) ], \\\\ \\end{array}\n$$\n",
805
+ "text_format": "latex",
806
+ "bbox": [
807
+ 116,
808
+ 842,
809
+ 482,
810
+ 902
811
+ ],
812
+ "page_idx": 4
813
+ },
814
+ {
815
+ "type": "code",
816
+ "sub_type": "algorithm",
817
+ "code_caption": [
818
+ "Algorithm 1 Step-wise Group Relative Policy Optimization"
819
+ ],
820
+ "code_body": "Input: Policy model $\\pi_{\\theta}$ initialized by a pre-trained \nMLLM; a multimodal dataset $D_{s} = \\{Q^{n},\\tau^{n}\\}_{n = 1}^{N}$ \nOutput: Trained policy model $\\pi_{\\theta}$ \nPolicy warm-up: \nfor iter $= 1$ to $N$ do Sample $\\{Q,\\tau \\} \\in D_s$ Optimize policy model $\\pi_{\\theta}$ by Eq. 1 \nend for \nStep-wise online policy optimization: \nfor iter $= 1$ to $N$ do Sample $\\{Q,\\tau \\} \\in D_s$ Generate a group of reasoning paths $\\{\\mathbf{c}^i\\}_{i = 1}^M\\sim \\pi_\\theta$ Obtain step-wise rewards $\\{r^i\\}_{i = 1}^M$ by Eqs. 2-3 Obtain relative advantages $\\{\\hat{A}^i\\}_{i = 1}^M$ by Eq. 4 Optimize policy model $\\pi_{\\theta}$ by Eqs. 5-6 \nend for \nreturn policy model $\\pi_{\\theta}$",
821
+ "bbox": [
822
+ 516,
823
+ 108,
824
+ 903,
825
+ 372
826
+ ],
827
+ "page_idx": 4
828
+ },
829
+ {
830
+ "type": "text",
831
+ "text": "where KL divergence is adopted to regularize the policy model, preventing excessive deviation from the reference model. The reference model is typically initialized as the same model as the policy model but remains frozen during RL training. The KL divergence between the policy model and the reference model is estimated as in [34]:",
832
+ "bbox": [
833
+ 511,
834
+ 397,
835
+ 906,
836
+ 489
837
+ ],
838
+ "page_idx": 4
839
+ },
840
+ {
841
+ "type": "equation",
842
+ "text": "\n$$\nD _ {K L} \\left(\\pi_ {\\theta} \\right\\| \\pi_ {r e f} = \\frac {\\pi_ {r e f} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)} - \\log \\frac {\\pi_ {r e f} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)}{\\pi_ {\\theta} \\left(\\mathbf {c} ^ {i} \\mid Q\\right)} - 1. \\tag {6}\n$$\n",
843
+ "text_format": "latex",
844
+ "bbox": [
845
+ 521,
846
+ 498,
847
+ 906,
848
+ 532
849
+ ],
850
+ "page_idx": 4
851
+ },
852
+ {
853
+ "type": "text",
854
+ "text": "4. Experiment",
855
+ "text_level": 1,
856
+ "bbox": [
857
+ 513,
858
+ 541,
859
+ 638,
860
+ 558
861
+ ],
862
+ "page_idx": 4
863
+ },
864
+ {
865
+ "type": "text",
866
+ "text": "This section presents experiments including datasets and implementation details, main experimental results, ablation studies and discussion, respectively. More details are to be described in the ensuing subsections.",
867
+ "bbox": [
868
+ 511,
869
+ 566,
870
+ 906,
871
+ 627
872
+ ],
873
+ "page_idx": 4
874
+ },
875
+ {
876
+ "type": "text",
877
+ "text": "4.1. Datasets",
878
+ "text_level": 1,
879
+ "bbox": [
880
+ 511,
881
+ 636,
882
+ 614,
883
+ 650
884
+ ],
885
+ "page_idx": 4
886
+ },
887
+ {
888
+ "type": "text",
889
+ "text": "For policy warm-up, we adopt Mulberry-260k [46] for supervised fine-tuning. For step-wise online policy optimization, we randomly sample 10K data from Mulberry-260k as our training data. For evaluation, we adopt 8 widely-used multimodal benchmarks for comprehensively evaluating our proposed StepGRPO, including MathVista [23], MMStar [6], Math-Vision [40], ChartQA [26], DynaMath [57], HallusionBench [12], MathVerse [54], MME [11] and MM-Reason [45]. These multimodal benchmarks cover a wide range of tasks from mathematical reasoning, chart understanding, visual hallucination and general visual understanding.",
890
+ "bbox": [
891
+ 511,
892
+ 657,
893
+ 906,
894
+ 839
895
+ ],
896
+ "page_idx": 4
897
+ },
898
+ {
899
+ "type": "text",
900
+ "text": "4.2. Implementation Details",
901
+ "text_level": 1,
902
+ "bbox": [
903
+ 511,
904
+ 848,
905
+ 730,
906
+ 863
907
+ ],
908
+ "page_idx": 4
909
+ },
910
+ {
911
+ "type": "text",
912
+ "text": "Our proposed StepGRPO is generally applicable to different MLLMs. In our experiments, we adopt two state-of-the-art",
913
+ "bbox": [
914
+ 511,
915
+ 869,
916
+ 906,
917
+ 900
918
+ ],
919
+ "page_idx": 4
920
+ },
921
+ {
922
+ "type": "table",
923
+ "img_path": "images/08a395e3dfac9af5c8b9b4bcea8c772a6dd987bb1bf0054d2e9907a99f461acd.jpg",
924
+ "table_caption": [],
925
+ "table_footnote": [],
926
+ "table_body": "<table><tr><td>Method</td><td>MathVista</td><td>MMStar</td><td>Math-V</td><td>ChartQA</td><td>DynaMath</td><td>HallBench</td><td>MathVerse</td><td>MMEsum</td><td>MMReason</td><td>AVG</td></tr><tr><td colspan=\"11\">Closed-Source Model</td></tr><tr><td>GPT-4o [15]</td><td>63.8</td><td>63.9</td><td>30.3</td><td>85.7</td><td>63.7</td><td>55.0</td><td>39.4</td><td>2329</td><td>21.1</td><td>56.2</td></tr><tr><td>Claude-3.5 Sonnet [1]</td><td>67.7</td><td>62.2</td><td>-</td><td>90.8</td><td>64.8</td><td>55.0</td><td>-</td><td>1920</td><td>-</td><td>-</td></tr><tr><td colspan=\"11\">Open-Source Model</td></tr><tr><td>Cambrain-1-8B [38]</td><td>49.0</td><td>-</td><td>-</td><td>73.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>MM-1.5-7B [51]</td><td>47.6</td><td>-</td><td>-</td><td>78.6</td><td>-</td><td>-</td><td>-</td><td>1861</td><td>-</td><td>-</td></tr><tr><td>Idefics3-LLaMA3-8B [18]</td><td>58.4</td><td>55.9</td><td>-</td><td>74.8</td><td>-</td><td>-</td><td>-</td><td>1937</td><td>-</td><td>-</td></tr><tr><td>InternVL2-8B [8]</td><td>58.3</td><td>61.5</td><td>-</td><td>83.3</td><td>39.7</td><td>-</td><td>-</td><td>2210</td><td>-</td><td>-</td></tr><tr><td>MiniCPM-V-2.6-8B [48]</td><td>60.6</td><td>57.5</td><td>-</td><td>-</td><td>-</td><td>48.1</td><td>-</td><td>2348</td><td>-</td><td>-</td></tr><tr><td>DeepSeek-VL2-MOE-4.5B [43]</td><td>62.8</td><td>61.3</td><td>-</td><td>86.0</td><td>-</td><td>-</td><td>-</td><td>2253</td><td>11.5</td><td>-</td></tr><tr><td colspan=\"11\">Reasoning Model</td></tr><tr><td>LLaVA-CoT-11B [44]</td><td>54.8</td><td>57.6</td><td>-</td><td>-</td><td>-</td><td>47.8</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LLaVA-Reasoner-8B [55]</td><td>50.6</td><td>54.0</td><td>-</td><td>83.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Insight-V-8B [10]</td><td>49.8</td><td>57.4</td><td>-</td><td>77.4</td><td>-</td><td>-</td><td>-</td><td>2069</td><td>-</td><td>-</td></tr><tr><td>Mulberry-7B [46]</td><td>63.1</td><td>61.3</td><td>-</td><td>83.9</td><td>45.1</td><td>54.1</td><td>-</td><td>2396</td><td>11.8</td><td>-</td></tr><tr><td>LlamaV-o1-11B [37]</td><td>54.4</td><td>59.4</td><td>-</td><td>-</td><td>-</td><td>63.5</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Vision-R1-7B [14]</td><td>73.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>52.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LMM-R1 [30]</td><td>63.2</td><td>58.0</td><td>26.3</td><td>-</td><td>-</td><td>-</td><td>41.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>R1-ShareVL-7B [47]</td><td>75.4</td><td>67.0</td><td>29.5</td><td>-</td><td>-</td><td>-</td><td>52.8</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Qwen2-VL-2B [41]</td><td>43.0</td><td>48.0</td><td>12.4</td><td>73.5</td><td>24.9</td><td>41.7</td><td>19.7</td><td>1872</td><td>7.7</td><td>37.5</td></tr><tr><td>R1-VL-2B (Ours)</td><td>52.1</td><td>49.8</td><td>17.1</td><td>75.2</td><td>29.4</td><td>44.0</td><td>26.2</td><td>2048</td><td>8.3</td><td>41.6</td></tr><tr><td>Qwen2-VL-7B [41]</td><td>58.2</td><td>60.7</td><td>16.3</td><td>83.0</td><td>42.1</td><td>50.6</td><td>32.5</td><td>2327</td><td>11.9</td><td>48.7</td></tr><tr><td>R1-VL-7B (Ours)</td><td>63.5</td><td>60.0</td><td>24.7</td><td>83.9</td><td>45.2</td><td>54.7</td><td>40.0</td><td>2376</td><td>12.5</td><td>52.1</td></tr><tr><td>Qwen2.5-VL-7B [2]</td><td>68.2</td><td>63.9</td><td>25.1</td><td>87.3</td><td>53.2</td><td>52.1</td><td>49.2</td><td>2347</td><td>17.3</td><td>55.5</td></tr><tr><td>R1-VL-7B* (Ours)</td><td>74.3</td><td>66.2</td><td>28.2</td><td>87.7</td><td>56.5</td><td>57.2</td><td>52.2</td><td>2395</td><td>17.9</td><td>58.4</td></tr></table>",
927
+ "bbox": [
928
+ 96,
929
+ 88,
930
+ 911,
931
+ 486
932
+ ],
933
+ "page_idx": 5
934
+ },
935
+ {
936
+ "type": "table",
937
+ "img_path": "images/5606d2223621250bdaa6c74f2d34a58e7f44e544758016ac5a16d0b60f1acd4f.jpg",
938
+ "table_caption": [
939
+ "Table 1. Main experimental results. To comprehensively examine the proposed StepGRPO, we conduct extensive experiments with two baseline models on eight benchmarks, and compare StepGRPO with various state-of-the-art MLLMs.* indicates that the model is trained using Qwen2.5-VL-7B as the base model with the data from [47]."
940
+ ],
941
+ "table_footnote": [],
942
+ "table_body": "<table><tr><td rowspan=\"2\">Warm-up</td><td colspan=\"2\">Step-wise reasoning rewards</td><td rowspan=\"2\">MathVista</td></tr><tr><td>StepRAR</td><td>StepRVR</td></tr><tr><td rowspan=\"2\">✓</td><td></td><td></td><td>58.2</td></tr><tr><td></td><td></td><td>61.2</td></tr><tr><td>✓</td><td>✓</td><td></td><td>62.4</td></tr><tr><td>✓</td><td></td><td>✓</td><td>61.9</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>63.5</td></tr></table>",
943
+ "bbox": [
944
+ 101,
945
+ 563,
946
+ 478,
947
+ 681
948
+ ],
949
+ "page_idx": 5
950
+ },
951
+ {
952
+ "type": "text",
953
+ "text": "Table 2. Ablation study of StepGRPO over Qwen2-VL-7B.",
954
+ "bbox": [
955
+ 109,
956
+ 691,
957
+ 462,
958
+ 705
959
+ ],
960
+ "page_idx": 5
961
+ },
962
+ {
963
+ "type": "text",
964
+ "text": "open-source MLLMs, i.e., Qwen2-VL-2B and Qwen2-VL-7B [41]. For the policy warm-up phase, we set the training batch size to 128. Following prior work [46], we use a learning rate of $1\\mathrm{e}^{-5}$ for Qwen2-VL-2B and $5\\mathrm{e}^{-6}$ for Qwen2-VL-7B, respectively.",
965
+ "bbox": [
966
+ 89,
967
+ 733,
968
+ 482,
969
+ 809
970
+ ],
971
+ "page_idx": 5
972
+ },
973
+ {
974
+ "type": "text",
975
+ "text": "For the step-wise online policy optimization phase, we perform 4 rollouts per question $(M = 4)$ and set the sampling temperature to 1.2 to encourage diverse reasoning paths. The maximum sequence length is set to $L = 1024$ , ensuring that the model can generate complete reasoning paths. Both the policy model and reference model are ini",
976
+ "bbox": [
977
+ 89,
978
+ 810,
979
+ 483,
980
+ 901
981
+ ],
982
+ "page_idx": 5
983
+ },
984
+ {
985
+ "type": "text",
986
+ "text": "tialized from the model after the warm-up, with the reference model frozen during RL training. The policy model's learning rate is $1\\mathrm{e}^{-6}$ , and we set the batch size to 4. We set the coefficient of match score $\\alpha$ to 0.1 to balance its effect. Following [39], the KL divergence coefficient $\\beta$ in Eq. 5 is set to 0.04 by default. All experiments are conducted on 4 H100-80GB GPUs.",
987
+ "bbox": [
988
+ 511,
989
+ 566,
990
+ 906,
991
+ 672
992
+ ],
993
+ "page_idx": 5
994
+ },
995
+ {
996
+ "type": "text",
997
+ "text": "4.3. Main Experimental Results",
998
+ "text_level": 1,
999
+ "bbox": [
1000
+ 511,
1001
+ 681,
1002
+ 761,
1003
+ 698
1004
+ ],
1005
+ "page_idx": 5
1006
+ },
1007
+ {
1008
+ "type": "text",
1009
+ "text": "We conduct a comprehensive evaluation of R1-VL across eight widely used benchmarks, comparing it with various state-of-the-art MLLMs, as shown in Table 1.",
1010
+ "bbox": [
1011
+ 511,
1012
+ 704,
1013
+ 906,
1014
+ 748
1015
+ ],
1016
+ "page_idx": 5
1017
+ },
1018
+ {
1019
+ "type": "text",
1020
+ "text": "We first compare R1-VL with its baseline models, Qwen2-VL-2B and Qwen2-VL-7B. The baseline models exhibit limited reasoning capability, leading to very few reasoning paths receiving rewards, which negatively impacts the reasoning capability. In contrast, R1-VL with our proposed StepGRPO consistently improves the baseline models by significant margins, achieving $4.6\\%$ improvement over Qwen2-VL-2B and $3.8\\%$ over Qwen2-VL-7B. This improvement is largely attributed to that StepGRPO introduces step-wise reasoning accuracy and validity rewards,",
1021
+ "bbox": [
1022
+ 509,
1023
+ 750,
1024
+ 908,
1025
+ 902
1026
+ ],
1027
+ "page_idx": 5
1028
+ },
1029
+ {
1030
+ "type": "table",
1031
+ "img_path": "images/30454a921be72743b7012a32c0b65488666b790c3263f8df6ab07a0349fea014.jpg",
1032
+ "table_caption": [],
1033
+ "table_footnote": [],
1034
+ "table_body": "<table><tr><td></td><td colspan=\"5\">Number of generations M per question</td></tr><tr><td>Method</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td></tr><tr><td>R1-VL-7B</td><td>62.5</td><td>62.8</td><td>63.5</td><td>63.2</td><td>63.7</td></tr></table>",
1035
+ "bbox": [
1036
+ 94,
1037
+ 88,
1038
+ 480,
1039
+ 154
1040
+ ],
1041
+ "page_idx": 6
1042
+ },
1043
+ {
1044
+ "type": "text",
1045
+ "text": "which provide rich and informative supervision at each reasoning step, effectively mitigating the sparse reward issue for MLLMs.",
1046
+ "bbox": [
1047
+ 89,
1048
+ 219,
1049
+ 482,
1050
+ 263
1051
+ ],
1052
+ "page_idx": 6
1053
+ },
1054
+ {
1055
+ "type": "text",
1056
+ "text": "In addition, we compare R1-VL with existing state-of-the-art reasoning MLLMs. As shown in Table 1, R1-VL achieves better performance on most benchmarks, particularly in mathematical reasoning tasks. For example, R1-VL-7B surpasses Mulberry-7B and LlamaV-o1-11B by $0.6\\%$ and $9.3\\%$ respectively on the reasoning-intensive benchmark MathVista. Notably, R1-VL-2B even outperforms larger MLLMs. For instance, R1-VL-2B largely outperforms LLaVA-Reasoner-8B and LLaVA-CoT-11B by $13.1\\%$ and $9.3\\%$ on MathVista, respectively. This superior performance demonstrates that StepGRPO effectively enhances MLLMs' reasoning abilities by encouraging self-improvement via step-wise online reinforcement learning, rather than merely imitating positive reasoning paths.",
1057
+ "bbox": [
1058
+ 88,
1059
+ 265,
1060
+ 482,
1061
+ 476
1062
+ ],
1063
+ "page_idx": 6
1064
+ },
1065
+ {
1066
+ "type": "text",
1067
+ "text": "Additionally, we benchmark R1-VL against general MLLMs, including closed-source models such as GPT-4o and Claude-3.5 Sonnet, as well as open-source models like Cambrain-1-8B and DeepSeek-VL2-MOE-4.5B. We observe that R1-VL outperforms most open-source MLLMs and achieves competitive results against closed-source models. For example, R1-VL-7B achieves 63.7 accuracy on MathVista, closely matching GPT-4o's accuracy of 63.8. These results further validate StepGRPO's effectiveness in enhancing the reasoning capabilities of MLLMs.",
1068
+ "bbox": [
1069
+ 88,
1070
+ 477,
1071
+ 482,
1072
+ 628
1073
+ ],
1074
+ "page_idx": 6
1075
+ },
1076
+ {
1077
+ "type": "text",
1078
+ "text": "4.4. Ablation Study",
1079
+ "text_level": 1,
1080
+ "bbox": [
1081
+ 89,
1082
+ 637,
1083
+ 243,
1084
+ 652
1085
+ ],
1086
+ "page_idx": 6
1087
+ },
1088
+ {
1089
+ "type": "text",
1090
+ "text": "We conduct ablation studies for StepGRPO on Qwen2-VL-7B over MathVista benchmark for examining the effect of step-wise reasoning rewards including step-wise reasoning accuracy reward (StepRAR) and step-wise reasoning validity reward (StepRVR), as well as the role of the warm-up phase. As shown in Table 2, involving a warm-up stage improves baseline model to $61.2\\%$ , allowing the model to learn basic reasoning knowledge before reinforcement learning. In addition, including either StepRAR or StepRVR into the online reinforcement learning process outperforms the model with warm-up by large margins, demonstrating that both two types of step-wise rewards contribute to enhancing step-by-step reasoning capabilities. The best performance (i.e., $63.7\\%$ ) is achieved when both StepRAR and StepRVR are applied together. This shows that StepGRPO effectively improves complex",
1091
+ "bbox": [
1092
+ 88,
1093
+ 657,
1094
+ 482,
1095
+ 901
1096
+ ],
1097
+ "page_idx": 6
1098
+ },
1099
+ {
1100
+ "type": "table",
1101
+ "img_path": "images/c4dcc9464dd8f5bae7a084695aadece73c6c64879dbae0c0fa3ed1632dd6f628.jpg",
1102
+ "table_caption": [
1103
+ "Table 3. Parameter analysis of $M$ . The experiments are conducted on Qwen2-VL-7B over MathVista."
1104
+ ],
1105
+ "table_footnote": [],
1106
+ "table_body": "<table><tr><td>Method</td><td>MathVista</td></tr><tr><td>Warm-up</td><td>61.7</td></tr><tr><td>Warm-up + Outcome-level reward</td><td>62.3</td></tr><tr><td>Warm-up + Step-wise reward (Ours)</td><td>63.5</td></tr></table>",
1107
+ "bbox": [
1108
+ 517,
1109
+ 88,
1110
+ 903,
1111
+ 162
1112
+ ],
1113
+ "page_idx": 6
1114
+ },
1115
+ {
1116
+ "type": "text",
1117
+ "text": "Table 4. Effectiveness of the step-wise reasoning rewards. The experiments are conducted on Qwen2-VL-7B over MathVista.",
1118
+ "bbox": [
1119
+ 511,
1120
+ 174,
1121
+ 906,
1122
+ 202
1123
+ ],
1124
+ "page_idx": 6
1125
+ },
1126
+ {
1127
+ "type": "image",
1128
+ "img_path": "images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg",
1129
+ "image_caption": [
1130
+ "Figure 3. Comparison between StepGRPO and SFT. The experiments are conducted on Qwen2-VL-7B over MathVista."
1131
+ ],
1132
+ "image_footnote": [],
1133
+ "bbox": [
1134
+ 537,
1135
+ 219,
1136
+ 857,
1137
+ 392
1138
+ ],
1139
+ "page_idx": 6
1140
+ },
1141
+ {
1142
+ "type": "text",
1143
+ "text": "reasoning tasks by reinforcing both the correctness of intermediate steps and the overall logical structure of the reasoning process.",
1144
+ "bbox": [
1145
+ 511,
1146
+ 460,
1147
+ 905,
1148
+ 506
1149
+ ],
1150
+ "page_idx": 6
1151
+ },
1152
+ {
1153
+ "type": "text",
1154
+ "text": "4.5. Discussion",
1155
+ "text_level": 1,
1156
+ "bbox": [
1157
+ 511,
1158
+ 516,
1159
+ 632,
1160
+ 531
1161
+ ],
1162
+ "page_idx": 6
1163
+ },
1164
+ {
1165
+ "type": "text",
1166
+ "text": "Parameter analysis. We conduct the parameter analysis on the number of generations $M$ in a group with Qwen2-VL7B over benchmark MathVista, analyzing its impact on reasoning performance. As described in Section 3, $M$ controls the number of generated reasoning trajectories per question during the RL phase. Table 3 shows that a larger $M$ generally leads to better performance. This is because, in group relative optimization, the baseline reward is estimated as the average reward of all generated reasoning paths. A larger $M$ results in a more stable and accurate baseline estimation, whereas a small $M$ may lead to high variance in baseline estimation, making the optimization process less reliable. However, increasing $M$ also introduces higher computational costs. Therefore, we set $M = 4$ as the default to balance performance and computational efficiency.",
1167
+ "bbox": [
1168
+ 511,
1169
+ 537,
1170
+ 906,
1171
+ 763
1172
+ ],
1173
+ "page_idx": 6
1174
+ },
1175
+ {
1176
+ "type": "text",
1177
+ "text": "Effectiveness of the step-wise reward. Our proposed stepwise reward mechanism plays a crucial role in mitigating the sparse reward issue by providing fine-grained supervision at each reasoning step. To further validate its effectiveness, we conduct an experiment comparing outcome-level reward against our step-wise reward. Specifically, we evaluate three settings: (1) Warm-up only; (2) Warm-up + Outcome-level Reward, where the model is optimized with outcome-level reward; and (3) Warm-up + Step-wise Re",
1178
+ "bbox": [
1179
+ 511,
1180
+ 765,
1181
+ 908,
1182
+ 902
1183
+ ],
1184
+ "page_idx": 6
1185
+ },
1186
+ {
1187
+ "type": "image",
1188
+ "img_path": "images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg",
1189
+ "image_caption": [
1190
+ "Question: Subtract all red things. \nSubtract all tiny matte balls. How many objects are left?",
1191
+ "Correct answer: 5",
1192
+ "Figure 4. Qualitative comparison."
1193
+ ],
1194
+ "image_footnote": [],
1195
+ "bbox": [
1196
+ 96,
1197
+ 155,
1198
+ 259,
1199
+ 244
1200
+ ],
1201
+ "page_idx": 7
1202
+ },
1203
+ {
1204
+ "type": "text",
1205
+ "text": "Qwen2-VL-7B:",
1206
+ "bbox": [
1207
+ 279,
1208
+ 93,
1209
+ 349,
1210
+ 103
1211
+ ],
1212
+ "page_idx": 7
1213
+ },
1214
+ {
1215
+ "type": "text",
1216
+ "text": "First, identify the red objects in the image. There is 1 red object, which is a small red sphere. Incorrect reasoning",
1217
+ "bbox": [
1218
+ 279,
1219
+ 106,
1220
+ 566,
1221
+ 127
1222
+ ],
1223
+ "page_idx": 7
1224
+ },
1225
+ {
1226
+ "type": "text",
1227
+ "text": "Then, subtract the red object from the total number of objects. There are 7 objects in total and subtracting the 1 red object leaves 6 objects. Identify the tiny matte balls in the image. There are 2 tiny matte balls, which are the small green cylinder and the cyan sphere. Subtract the tiny matte balls from the remaining objects.",
1228
+ "bbox": [
1229
+ 279,
1230
+ 127,
1231
+ 571,
1232
+ 176
1233
+ ],
1234
+ "page_idx": 7
1235
+ },
1236
+ {
1237
+ "type": "text",
1238
+ "text": "After subtracting the red object, there are 6 objects left. Subtracting the 2 tiny matte balls leaves 4 objects. Answer: 4",
1239
+ "bbox": [
1240
+ 279,
1241
+ 176,
1242
+ 550,
1243
+ 196
1244
+ ],
1245
+ "page_idx": 7
1246
+ },
1247
+ {
1248
+ "type": "text",
1249
+ "text": "Mulberry-7B:",
1250
+ "text_level": 1,
1251
+ "bbox": [
1252
+ 279,
1253
+ 205,
1254
+ 341,
1255
+ 215
1256
+ ],
1257
+ "page_idx": 7
1258
+ },
1259
+ {
1260
+ "type": "text",
1261
+ "text": "Image Description: The image shows several 3D objects on a flat surface. There are two red objects: a small red sphere and a larger green cylinder. Other objects include a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a teal sphere.",
1262
+ "bbox": [
1263
+ 277,
1264
+ 219,
1265
+ 581,
1266
+ 253
1267
+ ],
1268
+ "page_idx": 7
1269
+ },
1270
+ {
1271
+ "type": "text",
1272
+ "text": "Rationales: The question asks to subtract all red things and all tiny matte balls. We need to identify these objects in the image and count the remaining ones.",
1273
+ "bbox": [
1274
+ 277,
1275
+ 255,
1276
+ 576,
1277
+ 281
1278
+ ],
1279
+ "page_idx": 7
1280
+ },
1281
+ {
1282
+ "type": "text",
1283
+ "text": "Let's think step by step.",
1284
+ "bbox": [
1285
+ 279,
1286
+ 282,
1287
+ 401,
1288
+ 292
1289
+ ],
1290
+ "page_idx": 7
1291
+ },
1292
+ {
1293
+ "type": "text",
1294
+ "text": "Step 1: Identify and count all red objects. There is one red sphere. Incorrect reasoning Step 2: Identify and count all tiny matte balls. There is one teal sphere, which is not red, so it is not subtracted.",
1295
+ "bbox": [
1296
+ 279,
1297
+ 294,
1298
+ 578,
1299
+ 333
1300
+ ],
1301
+ "page_idx": 7
1302
+ },
1303
+ {
1304
+ "type": "text",
1305
+ "text": "Step 3: Subtract the red objects and tiny matte balls from the total number of objects: $7 - 1 = 6$",
1306
+ "bbox": [
1307
+ 279,
1308
+ 333,
1309
+ 571,
1310
+ 354
1311
+ ],
1312
+ "page_idx": 7
1313
+ },
1314
+ {
1315
+ "type": "text",
1316
+ "text": "The final answer is: 6",
1317
+ "bbox": [
1318
+ 279,
1319
+ 356,
1320
+ 393,
1321
+ 364
1322
+ ],
1323
+ "page_idx": 7
1324
+ },
1325
+ {
1326
+ "type": "text",
1327
+ "text": "R1-VL-7B (Ours):",
1328
+ "text_level": 1,
1329
+ "bbox": [
1330
+ 602,
1331
+ 95,
1332
+ 679,
1333
+ 106
1334
+ ],
1335
+ "page_idx": 7
1336
+ },
1337
+ {
1338
+ "type": "text",
1339
+ "text": "Image Description:",
1340
+ "bbox": [
1341
+ 602,
1342
+ 112,
1343
+ 705,
1344
+ 121
1345
+ ],
1346
+ "page_idx": 7
1347
+ },
1348
+ {
1349
+ "type": "text",
1350
+ "text": "The image shows a collection of 3D geometric objects on a flat surface. There are various colored shapes, including a red sphere, a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a cyan sphere.",
1351
+ "bbox": [
1352
+ 602,
1353
+ 121,
1354
+ 890,
1355
+ 152
1356
+ ],
1357
+ "page_idx": 7
1358
+ },
1359
+ {
1360
+ "type": "text",
1361
+ "text": "Rationales:",
1362
+ "bbox": [
1363
+ 602,
1364
+ 160,
1365
+ 671,
1366
+ 169
1367
+ ],
1368
+ "page_idx": 7
1369
+ },
1370
+ {
1371
+ "type": "text",
1372
+ "text": "The question asks to subtract all red things and all tiny matte balls, then determine how many objects are left. This involves identifying and counting specific objects based on their color and size.",
1373
+ "bbox": [
1374
+ 602,
1375
+ 169,
1376
+ 888,
1377
+ 200
1378
+ ],
1379
+ "page_idx": 7
1380
+ },
1381
+ {
1382
+ "type": "text",
1383
+ "text": "Let's think step by step.",
1384
+ "bbox": [
1385
+ 602,
1386
+ 208,
1387
+ 725,
1388
+ 218
1389
+ ],
1390
+ "page_idx": 7
1391
+ },
1392
+ {
1393
+ "type": "text",
1394
+ "text": "Step 1:",
1395
+ "bbox": [
1396
+ 602,
1397
+ 224,
1398
+ 653,
1399
+ 232
1400
+ ],
1401
+ "page_idx": 7
1402
+ },
1403
+ {
1404
+ "type": "text",
1405
+ "text": "Identify and count all objects in the image. There are seven objects: a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, a red sphere, and a cyan sphere.",
1406
+ "bbox": [
1407
+ 602,
1408
+ 232,
1409
+ 885,
1410
+ 258
1411
+ ],
1412
+ "page_idx": 7
1413
+ },
1414
+ {
1415
+ "type": "text",
1416
+ "text": "Step 2:",
1417
+ "bbox": [
1418
+ 602,
1419
+ 263,
1420
+ 653,
1421
+ 272
1422
+ ],
1423
+ "page_idx": 7
1424
+ },
1425
+ {
1426
+ "type": "text",
1427
+ "text": "Subtract all red things. There is one red sphere.",
1428
+ "bbox": [
1429
+ 602,
1430
+ 272,
1431
+ 808,
1432
+ 281
1433
+ ],
1434
+ "page_idx": 7
1435
+ },
1436
+ {
1437
+ "type": "image",
1438
+ "img_path": "images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg",
1439
+ "image_caption": [],
1440
+ "image_footnote": [],
1441
+ "bbox": [
1442
+ 816,
1443
+ 270,
1444
+ 831,
1445
+ 281
1446
+ ],
1447
+ "page_idx": 7
1448
+ },
1449
+ {
1450
+ "type": "text",
1451
+ "text": "Step 3:",
1452
+ "bbox": [
1453
+ 602,
1454
+ 287,
1455
+ 653,
1456
+ 296
1457
+ ],
1458
+ "page_idx": 7
1459
+ },
1460
+ {
1461
+ "type": "text",
1462
+ "text": "Subtract all tiny matte balls from the remaining objects. There is one cyan sphere, which is matte and small.",
1463
+ "bbox": [
1464
+ 602,
1465
+ 296,
1466
+ 885,
1467
+ 314
1468
+ ],
1469
+ "page_idx": 7
1470
+ },
1471
+ {
1472
+ "type": "text",
1473
+ "text": "Step 4:",
1474
+ "bbox": [
1475
+ 602,
1476
+ 319,
1477
+ 653,
1478
+ 328
1479
+ ],
1480
+ "page_idx": 7
1481
+ },
1482
+ {
1483
+ "type": "text",
1484
+ "text": "Count the remaining objects after removing the red sphere and the cyan sphere.",
1485
+ "bbox": [
1486
+ 602,
1487
+ 328,
1488
+ 880,
1489
+ 345
1490
+ ],
1491
+ "page_idx": 7
1492
+ },
1493
+ {
1494
+ "type": "text",
1495
+ "text": "The final answer is: 5",
1496
+ "bbox": [
1497
+ 602,
1498
+ 351,
1499
+ 735,
1500
+ 361
1501
+ ],
1502
+ "page_idx": 7
1503
+ },
1504
+ {
1505
+ "type": "text",
1506
+ "text": "ward, where the model is optimized with our proposed stepwise reasoning reward. As shown in Table 4, both outcome-level reward and our step-wise reward improve the warm-up model's performance, while our step-wise reward achieves better performance. This further demonstrates that stepwise rewards are more effective in enhancing MLLMs' reasoning capabilities, as they provide more fine-grained supervision and largely mitigate the sparse reward issue.",
1507
+ "bbox": [
1508
+ 88,
1509
+ 424,
1510
+ 482,
1511
+ 545
1512
+ ],
1513
+ "page_idx": 7
1514
+ },
1515
+ {
1516
+ "type": "text",
1517
+ "text": "Comparison to supervised fine-tuning (SFT). As discussed before, StepGRPO encourages MLLM to self-improve the reasoning ability with step-wise reward signals rather than merely imitating the successful reasoning paths. Here, we conduct experiments to further compare StepGRPO with SFT. Specifically, we start with the model after the warm-up and conduct the experiments with Qwen2-VL-7B over MathVista. As shown in Fig. 3, under the same number of training steps, StepGRPO consistently outperforms SFT, demonstrating the effectiveness of step-wise reinforcement learning. This is largely attributed to StepGRPO's ability to refine reasoning trajectories through self-exploration and reward-guided optimization, rather than solely relying on passive imitation of reasoning paths. By leveraging step-wise reasoning rewards, StepGRPO provides more rich and informative supervision, leading to better reasoning processes compared to SFT.",
1518
+ "bbox": [
1519
+ 89,
1520
+ 547,
1521
+ 482,
1522
+ 805
1523
+ ],
1524
+ "page_idx": 7
1525
+ },
1526
+ {
1527
+ "type": "text",
1528
+ "text": "Qualitative comparison. We provide qualitative comparison of Qwen2VL-7B, Mulberry-7B and our R1-VL-7B. As shown in Fig. 4, Qwen2-VL-7B generates relatively short responses, lacking a thorough reasoning process. While Mulberry-7B generates detailed reasoning paths, its intermediate steps contain errors, leading to incorrect final an",
1529
+ "bbox": [
1530
+ 89,
1531
+ 810,
1532
+ 483,
1533
+ 901
1534
+ ],
1535
+ "page_idx": 7
1536
+ },
1537
+ {
1538
+ "type": "text",
1539
+ "text": "svers. In contrast, R1-VL-7B enables more accurate step-by-step reasoning process.",
1540
+ "bbox": [
1541
+ 511,
1542
+ 424,
1543
+ 903,
1544
+ 454
1545
+ ],
1546
+ "page_idx": 7
1547
+ },
1548
+ {
1549
+ "type": "text",
1550
+ "text": "We provide more discussions, experimental results and qualitative analysis in the appendix.",
1551
+ "bbox": [
1552
+ 511,
1553
+ 454,
1554
+ 903,
1555
+ 484
1556
+ ],
1557
+ "page_idx": 7
1558
+ },
1559
+ {
1560
+ "type": "text",
1561
+ "text": "5. Conclusion",
1562
+ "text_level": 1,
1563
+ "bbox": [
1564
+ 511,
1565
+ 497,
1566
+ 633,
1567
+ 513
1568
+ ],
1569
+ "page_idx": 7
1570
+ },
1571
+ {
1572
+ "type": "text",
1573
+ "text": "This paper presents StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise reward mechanism. Specifically, StepGRPO introduces two rule-based reasoning reward mechanisms, i.e., Step-wise Reasoning Accuracy Reward that rewards the intermediate reasoning steps based on a soft key-step matching technique and Step-wise Reasoning Validity Reward that rewards the reasoning path's reasoning structure and logical consistency though a reasoning completeness and logic evaluation method. In this way, StepGRPO enables to effectively mitigate the sparse reward issue for MLLMs without the need of process reward models and encourages more structured and logically consistent reasoning process. With the proposed StepGRPO, we develop R1-VL, a series of MLLMs with superior reasoning capability. Extensive experiments over eight benchmarks demonstrate the superiority of the proposed StepGRPO compared with the state-of-the-art MLLMs.",
1574
+ "bbox": [
1575
+ 511,
1576
+ 523,
1577
+ 906,
1578
+ 808
1579
+ ],
1580
+ "page_idx": 7
1581
+ },
1582
+ {
1583
+ "type": "text",
1584
+ "text": "Acknowledgement. This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL).",
1585
+ "bbox": [
1586
+ 511,
1587
+ 810,
1588
+ 905,
1589
+ 900
1590
+ ],
1591
+ "page_idx": 7
1592
+ },
1593
+ {
1594
+ "type": "text",
1595
+ "text": "References",
1596
+ "text_level": 1,
1597
+ "bbox": [
1598
+ 91,
1599
+ 90,
1600
+ 187,
1601
+ 104
1602
+ ],
1603
+ "page_idx": 8
1604
+ },
1605
+ {
1606
+ "type": "list",
1607
+ "sub_type": "ref_text",
1608
+ "list_items": [
1609
+ "[1] Anthropic. Claude 3.5 sonnet, 2024. 1, 2, 6",
1610
+ "[2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 6",
1611
+ "[3] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022. 3",
1612
+ "[4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3",
1613
+ "[5] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. arXiv preprint arXiv:2406.10858, 2024. 3",
1614
+ "[6] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.5",
1615
+ "[7] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024. 3",
1616
+ "[8] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 2, 6",
1617
+ "[9] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 2",
1618
+ "[10] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024. 3, 6",
1619
+ "[11] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 5",
1620
+ "[12] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 5",
1621
+ "[13] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint"
1622
+ ],
1623
+ "bbox": [
1624
+ 93,
1625
+ 114,
1626
+ 482,
1627
+ 888
1628
+ ],
1629
+ "page_idx": 8
1630
+ },
1631
+ {
1632
+ "type": "list",
1633
+ "sub_type": "ref_text",
1634
+ "list_items": [
1635
+ "arXiv:2501.12948,2025.1,3",
1636
+ "[14] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Zhe Xu, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 3, 6",
1637
+ "[15] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 2, 6",
1638
+ "[16] Leslie Pack Kaelbling, Michael L Littman, and Andrew W Moore. Reinforcement learning: A survey. Journal of artificial intelligence research, 4:237-285, 1996. 3",
1639
+ "[17] Xiang Lan, Feng Wu, Kai He, Qinghao Zhao, Shenda Hong, and Mengling Feng. Gem: Empowering mllm for grounded ecg understanding with time series and images. arXiv preprint arXiv:2503.06073, 2025. 2",
1640
+ "[18] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. In Workshop on Responsibly Building the Next Generation of Multimodal Foundational Models, 2024. 1, 2, 6",
1641
+ "[19] Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. arXiv preprint arXiv:2306.00890, 2023. 2",
1642
+ "[20] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024. 2",
1643
+ "[21] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2",
1644
+ "[22] Yuliang Liu, Biao Yang, Qiang Liu, Zhang Li, Zhiyin Ma, Shuo Zhang, and Xiang Bai. Textmonkey: AnOCR-free large multimodal model for understanding document. arXiv preprint arXiv:2403.04473, 2024. 2",
1645
+ "[23] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5",
1646
+ "[24] Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. Reft: Reasoning with reinforced fine-tuning. arXiv preprint arXiv:2401.08967, 2024. 3",
1647
+ "[25] Chenyang Lyu, Minghao Wu, Longyue Wang, Xinting Huang, Bingshuai Liu, Zefeng Du, Shuming Shi, and Zhaopeng Tu. Macaw-llm: Multi-modal language modeling with image, audio, video, and text integration. arXiv preprint arXiv:2306.09093, 2023. 2",
1648
+ "[26] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5",
1649
+ "[27] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Tiancheng Han, Botian Shi, Wenhai Wang, Junjun He, et al. Mm-eureka: Exploring the frontiers of multimodal reasoning with rule-based reinforce"
1650
+ ],
1651
+ "bbox": [
1652
+ 516,
1653
+ 92,
1654
+ 903,
1655
+ 891
1656
+ ],
1657
+ "page_idx": 8
1658
+ },
1659
+ {
1660
+ "type": "list",
1661
+ "sub_type": "ref_text",
1662
+ "list_items": [
1663
+ "ment learning. arXiv preprint arXiv:2503.07365, 2025. 3",
1664
+ "[28] OpenAI. Gpt-4 technical report, 2023. 3",
1665
+ "[29] OpenAI. Introducing openai o1, 2024. 2",
1666
+ "[30] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 3, 6",
1667
+ "[31] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018. 3",
1668
+ "[32] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 3",
1669
+ "[33] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. 3",
1670
+ "[34] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 1, 3, 5",
1671
+ "[35] Guangzhi Sun, Wenyi Yu, Changli Tang, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, Yuxuan Wang, and Chao Zhang. video-salmonn: Speech-enhanced audio-visual large language models. arXiv preprint arXiv:2406.15704, 2024. 2",
1672
+ "[36] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 1, 3",
1673
+ "[37] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamavol: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025. 1, 3, 6",
1674
+ "[38] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 1, 2, 6",
1675
+ "[39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.6",
1676
+ "[40] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2025. 5",
1677
+ "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 6",
1678
+ "[42] Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng"
1679
+ ],
1680
+ "bbox": [
1681
+ 91,
1682
+ 90,
1683
+ 482,
1684
+ 890
1685
+ ],
1686
+ "page_idx": 9
1687
+ },
1688
+ {
1689
+ "type": "list",
1690
+ "sub_type": "ref_text",
1691
+ "list_items": [
1692
+ "Chua. Next-gpt: Any-to-any multimodal lIm. arXiv preprint arXiv:2309.05519, 2023. 2",
1693
+ "[43] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024. 1, 2, 6",
1694
+ "[44] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024. 1, 3, 5, 6",
1695
+ "[45] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. MMreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025. 5",
1696
+ "[46] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024. 1, 3, 5, 6",
1697
+ "[47] Huanjin Yao, Qixiang Yin, Jingyi Zhang, Min Yang, Yibo Wang, Wenhao Wu, Fei Su, Li Shen, Minghui Qiu, Dacheng Tao, et al. R1-sharev1: Incentivizing reasoning capability of multimodal large language models via share-grpo. arXiv preprint arXiv:2505.16673, 2025. 3, 6",
1698
+ "[48] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 6",
1699
+ "[49] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Yuhao Dan, Chenlin Zhao, Guohai Xu, Chenliang Li, Junfeng Tian, et al. mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499, 2023. 2",
1700
+ "[50] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024. 3",
1701
+ "[51] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 1, 2, 6",
1702
+ "[52] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2",
1703
+ "[53] Jingyi Zhang, Jiaxing Huang, Xiaoqin Zhang, Ling Shao, and Shijian Lu. Historical test-time prompt tuning for vision foundation models. Advances in Neural Information Processing Systems, 37:12872-12896, 2024. 2",
1704
+ "[54] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186."
1705
+ ],
1706
+ "bbox": [
1707
+ 516,
1708
+ 92,
1709
+ 905,
1710
+ 893
1711
+ ],
1712
+ "page_idx": 9
1713
+ },
1714
+ {
1715
+ "type": "ref_text",
1716
+ "text": "Springer, 2024. 5",
1717
+ "bbox": [
1718
+ 125,
1719
+ 90,
1720
+ 230,
1721
+ 104
1722
+ ],
1723
+ "page_idx": 10
1724
+ },
1725
+ {
1726
+ "type": "list",
1727
+ "sub_type": "ref_text",
1728
+ "list_items": [
1729
+ "[55] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024. 1, 3, 6",
1730
+ "[56] Xiaoman Zhang, Chaoyi Wu, Ziheng Zhao, Weixiong Lin, Ya Zhang, Yanfeng Wang, and Weidi Xie. Pmc-vqa: Visual instruction tuning for medical visual question answering. arXiv preprint arXiv:2305.10415, 2023. 2",
1731
+ "[57] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 5"
1732
+ ],
1733
+ "bbox": [
1734
+ 93,
1735
+ 106,
1736
+ 480,
1737
+ 294
1738
+ ],
1739
+ "page_idx": 10
1740
+ }
1741
+ ]
data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_12xxx/2503.12937/939affdd-0491-441c-956b-3cebb8540abd_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cac011a66aa3b0fd0b99a20490ee23528ed74524bd9ad8c98f9adb95d1907bbb
3
+ size 832366
data/2025/2503_12xxx/2503.12937/full.md ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # R1-VL: Learning to Reason with Multimodal Large Language Models via Step-wise Group Relative Policy Optimization
2
+
3
+ Jingyi Zhang Jiaxing Huang Huanjin Yao Shunyu Liu Xikun Zhang Shijian Lu Dacheng Tao Nanyang Technological University, Singapore
4
+
5
+ # Abstract
6
+
7
+ Recent studies generally enhance MLLMs' reasoning capabilities via supervised fine-tuning on high-quality chain-of-thought reasoning data, which often leads models to merely imitate successful reasoning paths without understanding what the wrong reasoning paths are. In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. To this end, we design Step-wise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding. Specifically, StepGRPO introduces two novel rule-based reasoning rewards: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards the reasoning paths that contain necessary intermediate reasoning steps via a soft key-step matching technique, while StepRAR rewards reasoning paths that follow a well-structured and logically consistent reasoning process through a reasoning completeness and logic evaluation strategy. With the proposed StepGRPO, we introduce R1-VL, a series of MLLMs with outstanding capabilities in step-by-step reasoning. Extensive experiments over 8 benchmarks demonstrate the superiority of our methods. Code is available at link.
8
+
9
+ # 1. Introduction
10
+
11
+ Multimodal large language models (MLLMs) have achieved significant progress in vision-language understanding [1, 8, 15, 18, 21, 38, 43, 51]. Recent efforts generally enhance MLLMs' reasoning capabilities by employing supervised fine-tuning (SFT) on high-quality chain-of-thought (CoT) reasoning data generated by powerful models (e.g., GPT4) [37, 44, 46, 55]. For example, Mulberry [46] introduces CoMCTS, which utilizes multiple
12
+
13
+ ![](images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg)
14
+ Figure 1. For MLLMs, online reinforcement learning with outcome-level reward, like in Deepseek-R1's GRPO [34], often suffers from sparse reward issues, where only a few reasoning paths can receive positive/high rewards during training, ultimately leading to poor exploration efficiency and unstable learning process. To tackle this, we propose a novel online reinforcement learning framework that incorporates step-wise reasoning rewards in addition to outcome-level rewards, encouraging MLLMs to iteratively refine their reasoning with dense rewards and resulting in a more stable training process and improved reasoning capability. The experiments are conducted on Qwen2-VL-7b over MathVista.
15
+
16
+ models to collectively search and identify effective reasoning paths, followed by SFT on the collected reasoning data. However, SFT approaches focus solely on positive reasoning paths (i.e., those leading to correct answers), while the negative reasoning paths are largely neglected. This limitation may cause the model to merely imitate successful reasoning paths without understanding what the flawed and wrong reasoning paths are.
17
+
18
+ In this work, we aim to enhance the MLLMs' reasoning ability beyond passively imitating positive reasoning paths. Recent advancements in NLP, such as Deepseek-R1 [13] and Kimi-K1.5 [36], have shown great potential in incentivizing the reasoning capability of LLMs via actively selfexploring. The core design of these advances (e.g., GRPO in Deepseek-R1) lies in online reinforcement learning without the need for reward models, which encourages an LLM
19
+
20
+ to generate a group of reasoning paths and iteratively refine its reasoning process by rewarding the generated reasoning paths based on a rule-based reward function. Typically, an outcome-level reward strategy is used: reasoning paths leading to correct answers receive higher rewards, while those leading to incorrect answers receive lower ones.
21
+
22
+ An intuitive idea is to directly apply these simple and effective LLM online reinforcement learning methods for MLLMs. However, relying solely on outcome-level rewards, like in Deepseek-R1's GRPO, often suffers from sparse reward issues on MLLM reasoning learning, resulting in suboptimal performance. Specifically, most MLLMs, especially smaller ones, exhibit very limited capability in long-chain reasoning accuracy and validity, whereas only a few MLLM-generated reasoning paths can receive positive/high rewards. This lack of positive reward signals reduces exploration efficiency and leads to an unstable learning process, as illustrated in Fig. 1.
23
+
24
+ We propose to tackle this sparse reward issue by introducing dense step-wise reasoning rewards in addition to sparse outcome-level rewards. To this end, we design Stepwise Group Relative Policy Optimization (StepGRPO), a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise rewarding while using no additional process reward models. Specifically, StepGRPO introduces two novel rule-based reasoning reward mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR).
25
+
26
+ StepRAR rewards the reasoning path using a soft key-step matching technique that evaluates whether the reasoning path contains key intermediate reasoning steps (i.e., the necessary steps to reach the correct final solution). StepRVR rewards the reasoning path based on a reasoning completeness and logic evaluation method, which assesses whether the reasoning process is well-structured and logically consistent. In this way, StepRAR and StepRVR help mitigate the sparse reward issue by providing informative rewards, even when the reasoning path does not produce the correct final answer – as long as it includes key intermediate reasoning steps or follows a structured and logical reasoning process. With StepRAR and StepRVR, StepGRPO takes the average step-wise reasoning rewards of a group of sampled reasoning paths as a baseline to estimate the advantage for policy optimization. Using the proposed StepGRPO, we develop R1-VL, a series of MLLMs with R1-like step-by-step reasoning capabilities.
27
+
28
+ The proposed StepGRPO offers two key advantages. 1) Effectiveness. StepGRPO introduces two step-wise reasoning reward mechanisms with group relative optimization, which provide rich and fine-grained step-wise reasoning rewards along the whole reasoning trajectory beyond the final answer. This mitigates the sparse reward issue and encour
29
+
30
+ ages more structured, logically consistent reasoning trajectories. 2) Efficiency. StepGRPO achieves step-wise reasoning rewarding in a rule-based manner, which provides step-wise reasoning rewards while eliminating the need of process reward models. This significantly reduces computational overhead while maintaining fine-grained step-wise supervisions.
31
+
32
+ The main contributions of this work are threefold. First, we propose StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via a simple, effective and dense step-wise rewarding. Second, we design two novel rule-based reasoning reward mechanisms, i.e., step-wise reasoning accuracy reward and step-wise reasoning validity reward, which effectively mitigate the sparse reward issue for MLLMs without the need of process reward models. Third, with the proposed StepGRPO, we develop R1-VL, a series MLLMs that have superior reasoning capabilities. Forth, extensive experiments over multiple benchmarks show that R1-VL achieves superior performance compared with state-of-the-art MLLMs.
33
+
34
+ # 2. Related Work
35
+
36
+ # 2.1. Multimodal Large Language Model
37
+
38
+ Multimodal Large Language Models (MLLMs) [1, 8, 15, 18, 21, 38, 43, 51, 52] have shown remarkable advancements across a wide range of vision-language understanding tasks, demonstrating their capabilities in comprehending and analyzing visual contents across various application domains. Early research on MLLMs primarily focuses on text generation based on text prompts and input multiple modalities such as images [20, 21, 53], videos [9, 35]. Recent advancements further enhance the capabilities of MLLMs from various aspects. For example, recent models [25, 42] incorporate multimodal inputs and outputs such as video, audio, and point cloud inputs beyond text and images. In addition, some efforts attempt to adapt MLLMs for domain-specific tasks, such as medical image understanding [17, 19, 56] and document analysis [22, 49]. In this work, we focus on enhancing the reasoning ability of MLLMs in tackling complex reasoning tasks and introduce R1-VL, a series of MLLMs that have superior reasoning capability.
39
+
40
+ # 2.2. MLLM Reasoning
41
+
42
+ Inspired by the advances in NLP that show great potential in learning to reason and tackling complex language tasks [29], recent studies attempt to enhance the reasoning capability of MLLM. Generally, current MLLM reasoning methods improve the reasoning capability of MLLM by generating high-quality chain-of-thoughts (CoT) data using powerful model (e.g., GPT-4) and performing supervised
43
+
44
+ fine-tuning with the collected data [10, 37, 44, 46, 55]. For example, Mulberry [46] introduces Collective Monte Carlo Tree Search (MCTS) into MLLM and proposes CoMCTS which leverages complementary knowledge from multiple models to collaboratively search and identify effective reasoning paths. In addition, recent works [14, 27, 30, 47] attempt to explore online reinforcement learning to improve the MLLMs' reasoning ability. Different from these works, we design StepGRPO that enables MLLM to self-improve the reasoning ability with step-wise reward signals.
45
+
46
+ # 2.3. Reinforcement Learning
47
+
48
+ Reinforcement Learning (RL) [16] is a fundamental approach in machine learning, where an agent learns to interact with an environment by taking actions, receiving rewards, and updating its policy to maximize the long-term return. With the rise of large language models (LLMs) [4, 28, 31], Reinforcement Learning with Human Feedback (RLHF) [3] has emerged as a key technique for fine-tuning models using human preference data. RLHF leverages algorithms like Proximal Policy Optimization (PPO) [33] and Direct Preference Optimization (DPO) [32] to guide model behavior for improving the alignment, coherence and helpfulness in response generation.
49
+
50
+ Recently, RL is increasingly adopted to enhance LLMs' reasoning capabilities [5, 7, 13, 24, 36, 50], especially for mathematical problem solving. The core is to adopt an appropriate reward function or model that evaluates and reinforces high-quality reasoning paths while penalizing low-quality ones, guiding the model's optimization towards more structured and coherent reasoning trajectories using the RL algorithm. For example, ReST-MCTS* [50] trains a process reward model (PRM) for determining the correctness of each reasoning step within reasoning paths. Recent methods have found that using a simple outcome-level rule-based reward function (i.e., the reasoning trajectories leading to correct answer are rewarded with higher score) can already provide an effective and reliable reward signal during the RL process [13, 24, 36]. For example, DeepSeek-R1 [13] demonstrates that group relative policy optimization (GRPO) [34] with outcome-level reward effectively enhances the reasoning capability of LLMs. In this work, we aim for improving the reasoning capability of MLLMs through reinforcement learning and propose StepGRPO, which effectively tackles the sparse reward issue in MLLMs, leading to stable training process and better reasoning capability.
51
+
52
+ # 3. Method
53
+
54
+ This section first presents the task formulation, and then introduces the proposed Step-wise Group Relative Policy Optimization (StepGRPO). More details to be elaborated in the ensuing subsections.
55
+
56
+ # 3.1. Task Formulation
57
+
58
+ In this paper, we consider a pre-trained MLLM and denote it as a policy model $\pi_{\theta}$ . Given a multimodal question $Q$ consisting of an image and a textual task instruction, i.e., $Q = \{\text{text}, \text{image}\}$ , the policy model $\pi$ generates response $\mathbf{c}$ with a step-by-step reasoning trajectory. Generally, this process can be formulated as a sequence of next token prediction actions, i.e., $\mathbf{c} = (a_1, a_2, \dots, a_t, \dots, a_T)$ , where each action $a_t$ is sampled from the policy model $\pi_{\theta}$ and $T$ represents the maximum sequence length. After each action, the new state $s_{t+1}$ is determined by updating the current state $s_t$ with the newly generated action $a_t$ , i.e., $s_{t+1} = (s_t, a_t)$ , $1 \leq t \leq T$ .
59
+
60
+ Considering this formulation, the objective of our task is to optimize the policy model $\pi_{\theta}$ such that it can select better actions based on the previous states, thereby improving reasoning quality. In the context of reinforcement learning (RL), the policy model is generally optimized by maximizing the cumulative reward, where the reward for taking action $a_{t}$ at state $s_t$ is denoted as $r(s_t,a_t,s_{t + 1})$ . Following prior studies [46], we define an action in this paper as generating a reasoning step, which consists of one or more sentences containing multiple word tokens.
61
+
62
+ # 3.2. Step-wise Group Relative Policy Optimization
63
+
64
+ We propose Step-wise Group Relative Policy Optimization (StepGRPO), a novel online reinforcement fine-tuning framework that mitigates the sparse reward issue for MLLMs and encourages self-improvement in reasoning ability through simple, effective and dense step-wise reward mechanisms. As illustrated in Fig. 2, StepGRPO consists of two phases: (1) a policy warm-up phase and (2) a step-wise online policy optimization phase. The overall algorithm is shown in Algorithm 1.
65
+
66
+ # 3.2.1. Policy Warm-up
67
+
68
+ This phase equips the policy model with fundamental reasoning capabilities, ensuring it can generate proper stepwise reasoning paths before reinforcement learning. During the warm-up phase, the policy model is fine-tuned using a multimodal dataset $D_{s}$ with Chain-of-Thought (CoT) reasoning path, where each data consists of a multimodal question $Q$ and a step-by-step reasoning path $\tau$ , i.e., $D_{s} = \{Q^{n}, \tau^{n}\}_{n=1}^{N}$ :
69
+
70
+ $$
71
+ \mathcal {L} _ {\text {w a r m - u p}} = - \mathbb {E} _ {\tau \sim D _ {s}} [ \sum_ {t = 1} ^ {T} \log (\pi_ {\theta} (a _ {t} | s _ {t})) ]. \tag {1}
72
+ $$
73
+
74
+ # 3.2.2. Step-wise Online Policy Optimization
75
+
76
+ This phase enables MLLMs to self-improve their reasoning ability via online reinforcement learning, mitigating the sparse reward issue through step-wise reasoning rewards. As illustrated in Fig. 2, for each question $Q \in D_{s}$ ,
77
+
78
+ ![](images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg)
79
+ Question: In the given diagram, triangle ABC has AD as its median and point E is the midpoint of AD. If the area of triangle ABC is 12, what is the area of triangle ABE?
80
+
81
+ ![](images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg)
82
+ Answer: Step 1: Since AD is a median, it divides triangle ABC into two equal areas: ABD and ACD. Step 2: Segment AE is half of AD, splitting triangle ABD into two triangles of equal area: ABE and BED. Step 3: The area of triangle ABD is half of triangle ABC, which is $\frac{\text{frac}}{12} \left\{ \begin{array}{l} 2 \end{array} \right\} = 6$ . Step 4: Since E is the midpoint of AD, triangle ABE is half of triangle ABD. Therefore, the area of triangle ABE is $\frac{\text{frac}}{6} \left\{ \begin{array}{l} 2 \end{array} \right\} = 3$ . The final answer is 3.
83
+ (a) Step-wise Reasoning Accuracy Reward
84
+ Figure 2. Overview of the proposed StepGRPO. StepGRPO consists of two phases: a policy warm-up phase and a step-wise online policy optimization phase. After the warm-up, the policy model $\pi_{\theta}$ generates a group of reasoning paths $\{\mathbf{c}^i\}_{i=1}^M$ and assigns step-wise rewards using two proposed mechanisms: Step-wise Reasoning Accuracy Reward (StepRAR) and Step-wise Reasoning Validity Reward (StepRVR). StepRAR rewards reasoning paths that contain key intermediate steps, identified using a soft key-step matching technique. StepRVR rewards reasoning paths based on completeness and logical consistency, ensuring they are well-structured. StepGRPO then estimates the advantage $\hat{A}$ for policy optimization by using the average step-wise reasoning reward of a group of sampled reasoning paths as a baseline. Examples for StepRAR and StepRVR are illustrated in (a) and (b), respectively.
85
+
86
+ # Pre-extracted key steps with Augmentations:
87
+
88
+ 1. AD is a median; median is $AD$
89
+ 2. equal area; ...
90
+ 3. AE is half of AD; $AE = 1 / 2AD$
91
+ 4. frac{12}{2} {2} = 6; $\underline{12 / 2} = 6,\dots$
92
+ 5. E is the midpoint; ..
93
+ 6. frac{6}{2} = 3; 6/2 = 3. ...
94
+
95
+ # Soft key-step matching :
96
+
97
+ Description: The image shows ...; #Rationale: The question asks for the area...; #Step1: ... we find AD is a median of ...; #Step2: ... AE splits triangle ABD ...; #Step3: ... The area of triangle ABD is $12/2 = 6$ , ..., and the area of triangle ABE is frac{6}{2} = 3. #The final answer is: 3. Step-wise Matching score: 3/6
98
+
99
+ # (b) Step-wise Reasoning Validity Reward
100
+
101
+ Description $\rightarrow$ #Rationale $\rightarrow$ # Step1 $\rightarrow$ ... $\rightarrow$ #Step $N\rightarrow$ #Answer.
102
+
103
+ i. Reasoning completeness
104
+
105
+ Description $\rightarrow$ #Rationale $\rightarrow$ #Answer. Missing reasoning steps
106
+
107
+ Description $\rightarrow$ # Step1 $\rightarrow$ ... $\rightarrow$ #Step $N\rightarrow$ #Answer. Missing rationale
108
+
109
+ ii. Reasoning logic
110
+
111
+ Description $\rightarrow$ #Rationale $\rightarrow$ #Answer $\rightarrow$ #Step1... $\rightarrow$ #StepN. X
112
+ #Description $\rightarrow$ #Step3 $\rightarrow$ #Rationale $\rightarrow$ ... $\rightarrow$ #Step I $\rightarrow$ #Answer X
113
+
114
+ the policy model $\pi_{\theta}$ first generates a group of $M$ reasoning trajectories via multiple rollouts, i.e., $\{\mathbf{c}^i\}_{i=1}^M$ , where $\mathbf{c}^i = (a_1^i, a_2^i, \ldots, a_t^i, \ldots, a_T^i)$ . After obtaining a group of $M$ reasoning trajectories, we employ our proposed step-wise reasoning rewards to evaluate and reward each generated reasoning trajectory. Specifically, we introduce two types of rule-based step-wise rewards, i.e., step-wise reasoning accuracy (StepRAR) reward and step-wise reasoning validity reward (StepRVR).
115
+
116
+ Step-wise reasoning accuracy reward (StepRAR) reduces the effect of learning from sparse reward by additionally rewarding reasoning paths that contain correct intermediate reasoning steps contributing to the final solution. Specifically, for each question $Q$ , we pre-extract a set of key reasoning steps $\mathbf{v} = \{v_{1}, v_{2}, \ldots\}$ from the corresponding reasoning path $\tau$ in dataset $D_{s}$ . We define key steps as the essential variables and equations that directly contribute to the final solution, and prompt GPT-4 to extract several key steps from the reasoning path for each question. To ensure efficient reward assignment, we refine the extracted steps by removing redundant content and retaining only the core few words necessary for reasoning. Furthermore, we
117
+
118
+ augment each extracted key step into multiple equivalent formats to allow more flexible and accurate matching, preventing missed matches due to math-related formatting differences. For example, a mathematical expression such as " $\frac{6}{3} = 2$ " is augmented to "6/3 = 2" or "6 divided by 3 equals 2".
119
+
120
+ With the extracted key reasoning steps $\mathbf{v} = \{v_{1}, v_{2}, \ldots\}$ and such soft marching mechanism, we calculate a match score for each generated reasoning path based on the ratio of matched key steps, i.e., $k^{i} = |\mathbf{v}_{\text{match}}| / |\mathbf{v}|$ . Then, StepRAR for $1 \leq t \leq T$ is defined as:
121
+
122
+ $$
123
+ r _ {a u c} ^ {i} \left(s _ {t}, a _ {t}, s _ {t + 1}\right) = \left\{ \begin{array}{l l} 1 + \alpha k ^ {i}, & \operatorname {a n s} \left(s _ {t + 1}\right) = y, \\ \alpha k ^ {i}, & \operatorname {a n s} \left(s _ {t + 1}\right) \neq \text {n u l l}, \neq y, \\ 0, & \operatorname {a n s} \left(s _ {t + 1}\right) = \text {n u l l}, \end{array} \right. \tag {2}
124
+ $$
125
+
126
+ where $y$ is the ground-truth answer extracted from CoT reasoning path.
127
+
128
+ By leveraging pre-extracted key reasoning steps, StepRAR efficiently provides additional supervision with a simple soft matching mechanism, ensuring the model learns meaningful reasoning processes instead of guessing
129
+
130
+ answers randomly.
131
+
132
+ Step-wise reasoning validity reward (StepRVR) aims for ensuring the generated paths adhere to a logically structured and coherent progression beyond the reasoning accuracy. Prior studies [44, 46] have demonstrated structural reasoning, such as problem decomposition and progressive reasoning, facilitates more accurate and interpretable reasoning processes, as they encourage models to break down complex problems into multiple intermediate steps rather than direct answer generation.
133
+
134
+ Inspired by these findings, we incorporate step-wise reasoning validity to reinforce well-organized reasoning paths that follow an expected logical flow. Specifically, we define StepRVR using two key criteria: reasoning completeness $\delta^c$ and reasoning logic $\delta^l$ . Reasoning completeness requires the response to include three essential components, i.e., a background analysis involving image description and rationale analysis to establish context, a step-by-step reasoning process and a final answer. In addition to the reasoning completeness, reasoning logic ensures the reasoning path to follow a logical progression, where the background analysis must come before solution steps and the final answer should only appear after reasoning steps are complete.
135
+
136
+ With these two criteria, we define StepRVR as
137
+
138
+ $$
139
+ r _ {v a l} ^ {i} \left(s _ {t}, a _ {t}, s _ {t + 1}\right) = \left\{ \begin{array}{l l} 1, & \mathbb {I} \left(\delta^ {c} \left(s _ {t + 1}\right)\right) \cdot \mathbb {I} \left(\delta^ {l} \left(s _ {t + 1}\right)\right) = 1, \\ 0, & \text {o t h e r w i s e ,} \end{array} \right. \tag {3}
140
+ $$
141
+
142
+ where the reasoning trajectory is rewarded only if it satisfies both completeness and logical coherence. By enforcing this, StepRVR helps the model produce structured, interpretable and logically sound reasoning trajectories, enhancing both the quality and reliability of generated responses.
143
+
144
+ Optimization with the step-wise rewards. After obtaining two types of step-wise rewards, we compute the overall reward for each reasoning path as $r^i = r_{auc}^i + r_{val}^i$ , and repeatedly compute the rewards for all generated reasoning paths, i.e., $\{r^1, r^2, \dots, r^M\}$ .
145
+
146
+ To estimate the advantage of each reasoning trajectory, we normalize its reward relative to the group as follow:
147
+
148
+ $$
149
+ \hat {A} ^ {i} = \frac {r ^ {i} - \operatorname {m e a n} \left(\left\{r ^ {1} , r ^ {2} , \dots , r ^ {M} \right\}\right)}{\operatorname {s t d} \left(\left\{r ^ {1} , r ^ {2} , \dots , r ^ {M} \right\}\right)}, \tag {4}
150
+ $$
151
+
152
+ where the mean group reward serves as the baseline, and $\hat{A}_i$ measures how much better or worse $r_i$ is compared to other reasoning trajectories within the group. Following this, we optimize the policy model with the loss defined as:
153
+
154
+ $$
155
+ \begin{array}{l} \mathcal {L} _ {\text {S t e p R L}} = - \underset {Q \in D _ {s}} {\mathbb {E}} \left[ \frac {1}{M} \sum_ {i = 1} ^ {M} \left(\frac {\pi_ {\theta} \left(\mathbf {c} ^ {i} \mid Q\right)}{\left[ \pi_ {\theta} \left(\mathbf {c} ^ {i} \mid Q\right) \right] _ {\text {n o g r a d}}} \hat {A} ^ {i} \right. \right. \tag {5} \\ - \beta D _ {K L} \left(\pi_ {\theta} | | \pi_ {r e f}\right) ], \\ \end{array}
156
+ $$
157
+
158
+ Algorithm 1 Step-wise Group Relative Policy Optimization
159
+ Input: Policy model $\pi_{\theta}$ initialized by a pre-trained
160
+ MLLM; a multimodal dataset $D_{s} = \{Q^{n},\tau^{n}\}_{n = 1}^{N}$
161
+ Output: Trained policy model $\pi_{\theta}$
162
+ Policy warm-up:
163
+ for iter $= 1$ to $N$ do Sample $\{Q,\tau \} \in D_s$ Optimize policy model $\pi_{\theta}$ by Eq. 1
164
+ end for
165
+ Step-wise online policy optimization:
166
+ for iter $= 1$ to $N$ do Sample $\{Q,\tau \} \in D_s$ Generate a group of reasoning paths $\{\mathbf{c}^i\}_{i = 1}^M\sim \pi_\theta$ Obtain step-wise rewards $\{r^i\}_{i = 1}^M$ by Eqs. 2-3 Obtain relative advantages $\{\hat{A}^i\}_{i = 1}^M$ by Eq. 4 Optimize policy model $\pi_{\theta}$ by Eqs. 5-6
167
+ end for
168
+ return policy model $\pi_{\theta}$
169
+
170
+ where KL divergence is adopted to regularize the policy model, preventing excessive deviation from the reference model. The reference model is typically initialized as the same model as the policy model but remains frozen during RL training. The KL divergence between the policy model and the reference model is estimated as in [34]:
171
+
172
+ $$
173
+ D _ {K L} \left(\pi_ {\theta} \right\| \pi_ {r e f} = \frac {\pi_ {r e f} \left(\mathbf {c} ^ {i} \mid Q\right)}{\pi_ {\theta} \left(\mathbf {c} ^ {i} \mid Q\right)} - \log \frac {\pi_ {r e f} \left(\mathbf {c} ^ {i} \mid Q\right)}{\pi_ {\theta} \left(\mathbf {c} ^ {i} \mid Q\right)} - 1. \tag {6}
174
+ $$
175
+
176
+ # 4. Experiment
177
+
178
+ This section presents experiments including datasets and implementation details, main experimental results, ablation studies and discussion, respectively. More details are to be described in the ensuing subsections.
179
+
180
+ # 4.1. Datasets
181
+
182
+ For policy warm-up, we adopt Mulberry-260k [46] for supervised fine-tuning. For step-wise online policy optimization, we randomly sample 10K data from Mulberry-260k as our training data. For evaluation, we adopt 8 widely-used multimodal benchmarks for comprehensively evaluating our proposed StepGRPO, including MathVista [23], MMStar [6], Math-Vision [40], ChartQA [26], DynaMath [57], HallusionBench [12], MathVerse [54], MME [11] and MM-Reason [45]. These multimodal benchmarks cover a wide range of tasks from mathematical reasoning, chart understanding, visual hallucination and general visual understanding.
183
+
184
+ # 4.2. Implementation Details
185
+
186
+ Our proposed StepGRPO is generally applicable to different MLLMs. In our experiments, we adopt two state-of-the-art
187
+
188
+ <table><tr><td>Method</td><td>MathVista</td><td>MMStar</td><td>Math-V</td><td>ChartQA</td><td>DynaMath</td><td>HallBench</td><td>MathVerse</td><td>MMEsum</td><td>MMReason</td><td>AVG</td></tr><tr><td colspan="11">Closed-Source Model</td></tr><tr><td>GPT-4o [15]</td><td>63.8</td><td>63.9</td><td>30.3</td><td>85.7</td><td>63.7</td><td>55.0</td><td>39.4</td><td>2329</td><td>21.1</td><td>56.2</td></tr><tr><td>Claude-3.5 Sonnet [1]</td><td>67.7</td><td>62.2</td><td>-</td><td>90.8</td><td>64.8</td><td>55.0</td><td>-</td><td>1920</td><td>-</td><td>-</td></tr><tr><td colspan="11">Open-Source Model</td></tr><tr><td>Cambrain-1-8B [38]</td><td>49.0</td><td>-</td><td>-</td><td>73.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>MM-1.5-7B [51]</td><td>47.6</td><td>-</td><td>-</td><td>78.6</td><td>-</td><td>-</td><td>-</td><td>1861</td><td>-</td><td>-</td></tr><tr><td>Idefics3-LLaMA3-8B [18]</td><td>58.4</td><td>55.9</td><td>-</td><td>74.8</td><td>-</td><td>-</td><td>-</td><td>1937</td><td>-</td><td>-</td></tr><tr><td>InternVL2-8B [8]</td><td>58.3</td><td>61.5</td><td>-</td><td>83.3</td><td>39.7</td><td>-</td><td>-</td><td>2210</td><td>-</td><td>-</td></tr><tr><td>MiniCPM-V-2.6-8B [48]</td><td>60.6</td><td>57.5</td><td>-</td><td>-</td><td>-</td><td>48.1</td><td>-</td><td>2348</td><td>-</td><td>-</td></tr><tr><td>DeepSeek-VL2-MOE-4.5B [43]</td><td>62.8</td><td>61.3</td><td>-</td><td>86.0</td><td>-</td><td>-</td><td>-</td><td>2253</td><td>11.5</td><td>-</td></tr><tr><td colspan="11">Reasoning Model</td></tr><tr><td>LLaVA-CoT-11B [44]</td><td>54.8</td><td>57.6</td><td>-</td><td>-</td><td>-</td><td>47.8</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LLaVA-Reasoner-8B [55]</td><td>50.6</td><td>54.0</td><td>-</td><td>83.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Insight-V-8B [10]</td><td>49.8</td><td>57.4</td><td>-</td><td>77.4</td><td>-</td><td>-</td><td>-</td><td>2069</td><td>-</td><td>-</td></tr><tr><td>Mulberry-7B [46]</td><td>63.1</td><td>61.3</td><td>-</td><td>83.9</td><td>45.1</td><td>54.1</td><td>-</td><td>2396</td><td>11.8</td><td>-</td></tr><tr><td>LlamaV-o1-11B [37]</td><td>54.4</td><td>59.4</td><td>-</td><td>-</td><td>-</td><td>63.5</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Vision-R1-7B [14]</td><td>73.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>52.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LMM-R1 [30]</td><td>63.2</td><td>58.0</td><td>26.3</td><td>-</td><td>-</td><td>-</td><td>41.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>R1-ShareVL-7B [47]</td><td>75.4</td><td>67.0</td><td>29.5</td><td>-</td><td>-</td><td>-</td><td>52.8</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Qwen2-VL-2B [41]</td><td>43.0</td><td>48.0</td><td>12.4</td><td>73.5</td><td>24.9</td><td>41.7</td><td>19.7</td><td>1872</td><td>7.7</td><td>37.5</td></tr><tr><td>R1-VL-2B (Ours)</td><td>52.1</td><td>49.8</td><td>17.1</td><td>75.2</td><td>29.4</td><td>44.0</td><td>26.2</td><td>2048</td><td>8.3</td><td>41.6</td></tr><tr><td>Qwen2-VL-7B [41]</td><td>58.2</td><td>60.7</td><td>16.3</td><td>83.0</td><td>42.1</td><td>50.6</td><td>32.5</td><td>2327</td><td>11.9</td><td>48.7</td></tr><tr><td>R1-VL-7B (Ours)</td><td>63.5</td><td>60.0</td><td>24.7</td><td>83.9</td><td>45.2</td><td>54.7</td><td>40.0</td><td>2376</td><td>12.5</td><td>52.1</td></tr><tr><td>Qwen2.5-VL-7B [2]</td><td>68.2</td><td>63.9</td><td>25.1</td><td>87.3</td><td>53.2</td><td>52.1</td><td>49.2</td><td>2347</td><td>17.3</td><td>55.5</td></tr><tr><td>R1-VL-7B* (Ours)</td><td>74.3</td><td>66.2</td><td>28.2</td><td>87.7</td><td>56.5</td><td>57.2</td><td>52.2</td><td>2395</td><td>17.9</td><td>58.4</td></tr></table>
189
+
190
+ Table 1. Main experimental results. To comprehensively examine the proposed StepGRPO, we conduct extensive experiments with two baseline models on eight benchmarks, and compare StepGRPO with various state-of-the-art MLLMs.* indicates that the model is trained using Qwen2.5-VL-7B as the base model with the data from [47].
191
+
192
+ <table><tr><td rowspan="2">Warm-up</td><td colspan="2">Step-wise reasoning rewards</td><td rowspan="2">MathVista</td></tr><tr><td>StepRAR</td><td>StepRVR</td></tr><tr><td rowspan="2">✓</td><td></td><td></td><td>58.2</td></tr><tr><td></td><td></td><td>61.2</td></tr><tr><td>✓</td><td>✓</td><td></td><td>62.4</td></tr><tr><td>✓</td><td></td><td>✓</td><td>61.9</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>63.5</td></tr></table>
193
+
194
+ Table 2. Ablation study of StepGRPO over Qwen2-VL-7B.
195
+
196
+ open-source MLLMs, i.e., Qwen2-VL-2B and Qwen2-VL-7B [41]. For the policy warm-up phase, we set the training batch size to 128. Following prior work [46], we use a learning rate of $1\mathrm{e}^{-5}$ for Qwen2-VL-2B and $5\mathrm{e}^{-6}$ for Qwen2-VL-7B, respectively.
197
+
198
+ For the step-wise online policy optimization phase, we perform 4 rollouts per question $(M = 4)$ and set the sampling temperature to 1.2 to encourage diverse reasoning paths. The maximum sequence length is set to $L = 1024$ , ensuring that the model can generate complete reasoning paths. Both the policy model and reference model are ini
199
+
200
+ tialized from the model after the warm-up, with the reference model frozen during RL training. The policy model's learning rate is $1\mathrm{e}^{-6}$ , and we set the batch size to 4. We set the coefficient of match score $\alpha$ to 0.1 to balance its effect. Following [39], the KL divergence coefficient $\beta$ in Eq. 5 is set to 0.04 by default. All experiments are conducted on 4 H100-80GB GPUs.
201
+
202
+ # 4.3. Main Experimental Results
203
+
204
+ We conduct a comprehensive evaluation of R1-VL across eight widely used benchmarks, comparing it with various state-of-the-art MLLMs, as shown in Table 1.
205
+
206
+ We first compare R1-VL with its baseline models, Qwen2-VL-2B and Qwen2-VL-7B. The baseline models exhibit limited reasoning capability, leading to very few reasoning paths receiving rewards, which negatively impacts the reasoning capability. In contrast, R1-VL with our proposed StepGRPO consistently improves the baseline models by significant margins, achieving $4.6\%$ improvement over Qwen2-VL-2B and $3.8\%$ over Qwen2-VL-7B. This improvement is largely attributed to that StepGRPO introduces step-wise reasoning accuracy and validity rewards,
207
+
208
+ <table><tr><td></td><td colspan="5">Number of generations M per question</td></tr><tr><td>Method</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td></tr><tr><td>R1-VL-7B</td><td>62.5</td><td>62.8</td><td>63.5</td><td>63.2</td><td>63.7</td></tr></table>
209
+
210
+ which provide rich and informative supervision at each reasoning step, effectively mitigating the sparse reward issue for MLLMs.
211
+
212
+ In addition, we compare R1-VL with existing state-of-the-art reasoning MLLMs. As shown in Table 1, R1-VL achieves better performance on most benchmarks, particularly in mathematical reasoning tasks. For example, R1-VL-7B surpasses Mulberry-7B and LlamaV-o1-11B by $0.6\%$ and $9.3\%$ respectively on the reasoning-intensive benchmark MathVista. Notably, R1-VL-2B even outperforms larger MLLMs. For instance, R1-VL-2B largely outperforms LLaVA-Reasoner-8B and LLaVA-CoT-11B by $13.1\%$ and $9.3\%$ on MathVista, respectively. This superior performance demonstrates that StepGRPO effectively enhances MLLMs' reasoning abilities by encouraging self-improvement via step-wise online reinforcement learning, rather than merely imitating positive reasoning paths.
213
+
214
+ Additionally, we benchmark R1-VL against general MLLMs, including closed-source models such as GPT-4o and Claude-3.5 Sonnet, as well as open-source models like Cambrain-1-8B and DeepSeek-VL2-MOE-4.5B. We observe that R1-VL outperforms most open-source MLLMs and achieves competitive results against closed-source models. For example, R1-VL-7B achieves 63.7 accuracy on MathVista, closely matching GPT-4o's accuracy of 63.8. These results further validate StepGRPO's effectiveness in enhancing the reasoning capabilities of MLLMs.
215
+
216
+ # 4.4. Ablation Study
217
+
218
+ We conduct ablation studies for StepGRPO on Qwen2-VL-7B over MathVista benchmark for examining the effect of step-wise reasoning rewards including step-wise reasoning accuracy reward (StepRAR) and step-wise reasoning validity reward (StepRVR), as well as the role of the warm-up phase. As shown in Table 2, involving a warm-up stage improves baseline model to $61.2\%$ , allowing the model to learn basic reasoning knowledge before reinforcement learning. In addition, including either StepRAR or StepRVR into the online reinforcement learning process outperforms the model with warm-up by large margins, demonstrating that both two types of step-wise rewards contribute to enhancing step-by-step reasoning capabilities. The best performance (i.e., $63.7\%$ ) is achieved when both StepRAR and StepRVR are applied together. This shows that StepGRPO effectively improves complex
219
+
220
+ Table 3. Parameter analysis of $M$ . The experiments are conducted on Qwen2-VL-7B over MathVista.
221
+
222
+ <table><tr><td>Method</td><td>MathVista</td></tr><tr><td>Warm-up</td><td>61.7</td></tr><tr><td>Warm-up + Outcome-level reward</td><td>62.3</td></tr><tr><td>Warm-up + Step-wise reward (Ours)</td><td>63.5</td></tr></table>
223
+
224
+ Table 4. Effectiveness of the step-wise reasoning rewards. The experiments are conducted on Qwen2-VL-7B over MathVista.
225
+
226
+ ![](images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg)
227
+ Figure 3. Comparison between StepGRPO and SFT. The experiments are conducted on Qwen2-VL-7B over MathVista.
228
+
229
+ reasoning tasks by reinforcing both the correctness of intermediate steps and the overall logical structure of the reasoning process.
230
+
231
+ # 4.5. Discussion
232
+
233
+ Parameter analysis. We conduct the parameter analysis on the number of generations $M$ in a group with Qwen2-VL7B over benchmark MathVista, analyzing its impact on reasoning performance. As described in Section 3, $M$ controls the number of generated reasoning trajectories per question during the RL phase. Table 3 shows that a larger $M$ generally leads to better performance. This is because, in group relative optimization, the baseline reward is estimated as the average reward of all generated reasoning paths. A larger $M$ results in a more stable and accurate baseline estimation, whereas a small $M$ may lead to high variance in baseline estimation, making the optimization process less reliable. However, increasing $M$ also introduces higher computational costs. Therefore, we set $M = 4$ as the default to balance performance and computational efficiency.
234
+
235
+ Effectiveness of the step-wise reward. Our proposed stepwise reward mechanism plays a crucial role in mitigating the sparse reward issue by providing fine-grained supervision at each reasoning step. To further validate its effectiveness, we conduct an experiment comparing outcome-level reward against our step-wise reward. Specifically, we evaluate three settings: (1) Warm-up only; (2) Warm-up + Outcome-level Reward, where the model is optimized with outcome-level reward; and (3) Warm-up + Step-wise Re
236
+
237
+ ![](images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg)
238
+ Question: Subtract all red things.
239
+ Subtract all tiny matte balls. How many objects are left?
240
+ Correct answer: 5
241
+ Figure 4. Qualitative comparison.
242
+
243
+ Qwen2-VL-7B:
244
+
245
+ First, identify the red objects in the image. There is 1 red object, which is a small red sphere. Incorrect reasoning
246
+
247
+ Then, subtract the red object from the total number of objects. There are 7 objects in total and subtracting the 1 red object leaves 6 objects. Identify the tiny matte balls in the image. There are 2 tiny matte balls, which are the small green cylinder and the cyan sphere. Subtract the tiny matte balls from the remaining objects.
248
+
249
+ After subtracting the red object, there are 6 objects left. Subtracting the 2 tiny matte balls leaves 4 objects. Answer: 4
250
+
251
+ # Mulberry-7B:
252
+
253
+ Image Description: The image shows several 3D objects on a flat surface. There are two red objects: a small red sphere and a larger green cylinder. Other objects include a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a teal sphere.
254
+
255
+ Rationales: The question asks to subtract all red things and all tiny matte balls. We need to identify these objects in the image and count the remaining ones.
256
+
257
+ Let's think step by step.
258
+
259
+ Step 1: Identify and count all red objects. There is one red sphere. Incorrect reasoning Step 2: Identify and count all tiny matte balls. There is one teal sphere, which is not red, so it is not subtracted.
260
+
261
+ Step 3: Subtract the red objects and tiny matte balls from the total number of objects: $7 - 1 = 6$
262
+
263
+ The final answer is: 6
264
+
265
+ # R1-VL-7B (Ours):
266
+
267
+ Image Description:
268
+
269
+ The image shows a collection of 3D geometric objects on a flat surface. There are various colored shapes, including a red sphere, a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, and a cyan sphere.
270
+
271
+ Rationales:
272
+
273
+ The question asks to subtract all red things and all tiny matte balls, then determine how many objects are left. This involves identifying and counting specific objects based on their color and size.
274
+
275
+ Let's think step by step.
276
+
277
+ Step 1:
278
+
279
+ Identify and count all objects in the image. There are seven objects: a yellow cube, a green cylinder, a blue cylinder, a purple cylinder, a red sphere, and a cyan sphere.
280
+
281
+ Step 2:
282
+
283
+ Subtract all red things. There is one red sphere.
284
+
285
+ ![](images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg)
286
+
287
+ Step 3:
288
+
289
+ Subtract all tiny matte balls from the remaining objects. There is one cyan sphere, which is matte and small.
290
+
291
+ Step 4:
292
+
293
+ Count the remaining objects after removing the red sphere and the cyan sphere.
294
+
295
+ The final answer is: 5
296
+
297
+ ward, where the model is optimized with our proposed stepwise reasoning reward. As shown in Table 4, both outcome-level reward and our step-wise reward improve the warm-up model's performance, while our step-wise reward achieves better performance. This further demonstrates that stepwise rewards are more effective in enhancing MLLMs' reasoning capabilities, as they provide more fine-grained supervision and largely mitigate the sparse reward issue.
298
+
299
+ Comparison to supervised fine-tuning (SFT). As discussed before, StepGRPO encourages MLLM to self-improve the reasoning ability with step-wise reward signals rather than merely imitating the successful reasoning paths. Here, we conduct experiments to further compare StepGRPO with SFT. Specifically, we start with the model after the warm-up and conduct the experiments with Qwen2-VL-7B over MathVista. As shown in Fig. 3, under the same number of training steps, StepGRPO consistently outperforms SFT, demonstrating the effectiveness of step-wise reinforcement learning. This is largely attributed to StepGRPO's ability to refine reasoning trajectories through self-exploration and reward-guided optimization, rather than solely relying on passive imitation of reasoning paths. By leveraging step-wise reasoning rewards, StepGRPO provides more rich and informative supervision, leading to better reasoning processes compared to SFT.
300
+
301
+ Qualitative comparison. We provide qualitative comparison of Qwen2VL-7B, Mulberry-7B and our R1-VL-7B. As shown in Fig. 4, Qwen2-VL-7B generates relatively short responses, lacking a thorough reasoning process. While Mulberry-7B generates detailed reasoning paths, its intermediate steps contain errors, leading to incorrect final an
302
+
303
+ svers. In contrast, R1-VL-7B enables more accurate step-by-step reasoning process.
304
+
305
+ We provide more discussions, experimental results and qualitative analysis in the appendix.
306
+
307
+ # 5. Conclusion
308
+
309
+ This paper presents StepGRPO, a new online reinforcement learning framework that enables MLLMs to self-improve reasoning ability via simple, effective and dense step-wise reward mechanism. Specifically, StepGRPO introduces two rule-based reasoning reward mechanisms, i.e., Step-wise Reasoning Accuracy Reward that rewards the intermediate reasoning steps based on a soft key-step matching technique and Step-wise Reasoning Validity Reward that rewards the reasoning path's reasoning structure and logical consistency though a reasoning completeness and logic evaluation method. In this way, StepGRPO enables to effectively mitigate the sparse reward issue for MLLMs without the need of process reward models and encourages more structured and logically consistent reasoning process. With the proposed StepGRPO, we develop R1-VL, a series of MLLMs with superior reasoning capability. Extensive experiments over eight benchmarks demonstrate the superiority of the proposed StepGRPO compared with the state-of-the-art MLLMs.
310
+
311
+ Acknowledgement. This research is supported by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\*STAR, as well as supported by Alibaba Group and NTU Singapore through Alibaba-NTU Global e-Sustainability CorpLab (ANGEL).
312
+
313
+ # References
314
+
315
+ [1] Anthropic. Claude 3.5 sonnet, 2024. 1, 2, 6
316
+ [2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 6
317
+ [3] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022. 3
318
+ [4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3
319
+ [5] Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Step-level value preference optimization for mathematical reasoning. arXiv preprint arXiv:2406.10858, 2024. 3
320
+ [6] Lin Chen, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Jiaqi Wang, Yu Qiao, Dahua Lin, et al. Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330, 2024.5
321
+ [7] Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play fine-tuning converts weak language models to strong language models. arXiv preprint arXiv:2401.01335, 2024. 3
322
+ [8] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 1, 2, 6
323
+ [9] Zesen Cheng, Sicong Leng, Hang Zhang, Yifei Xin, Xin Li, Guanzheng Chen, Yongxin Zhu, Wenqi Zhang, Ziyang Luo, Deli Zhao, et al. Videollama 2: Advancing spatial-temporal modeling and audio understanding in video-llms. arXiv preprint arXiv:2406.07476, 2024. 2
324
+ [10] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024. 3, 6
325
+ [11] Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Jinrui Yang, Xiawu Zheng, Ke Li, Xing Sun, et al. Mme: A comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394, 2023. 5
326
+ [12] Tianrui Guan, Fuxiao Liu, Xiyang Wu, Ruiqi Xian, Zongxia Li, Xiaoyu Liu, Xijun Wang, Lichang Chen, Furong Huang, Yaser Yacoob, et al. Hallusionbench: An advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. arXiv preprint arXiv:2310.14566, 2023. 5
327
+ [13] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint
328
+
329
+ arXiv:2501.12948,2025.1,3
330
+ [14] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Zhe Xu, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 3, 6
331
+ [15] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. 1, 2, 6
332
+ [16] Leslie Pack Kaelbling, Michael L Littman, and Andrew W Moore. Reinforcement learning: A survey. Journal of artificial intelligence research, 4:237-285, 1996. 3
333
+ [17] Xiang Lan, Feng Wu, Kai He, Qinghao Zhao, Shenda Hong, and Mengling Feng. Gem: Empowering mllm for grounded ecg understanding with time series and images. arXiv preprint arXiv:2503.06073, 2025. 2
334
+ [18] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. In Workshop on Responsibly Building the Next Generation of Multimodal Foundational Models, 2024. 1, 2, 6
335
+ [19] Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. arXiv preprint arXiv:2306.00890, 2023. 2
336
+ [20] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024. 2
337
+ [21] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 1, 2
338
+ [22] Yuliang Liu, Biao Yang, Qiang Liu, Zhang Li, Zhiyin Ma, Shuo Zhang, and Xiang Bai. Textmonkey: AnOCR-free large multimodal model for understanding document. arXiv preprint arXiv:2403.04473, 2024. 2
339
+ [23] Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255, 2023. 5
340
+ [24] Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. Reft: Reasoning with reinforced fine-tuning. arXiv preprint arXiv:2401.08967, 2024. 3
341
+ [25] Chenyang Lyu, Minghao Wu, Longyue Wang, Xinting Huang, Bingshuai Liu, Zefeng Du, Shuming Shi, and Zhaopeng Tu. Macaw-llm: Multi-modal language modeling with image, audio, video, and text integration. arXiv preprint arXiv:2306.09093, 2023. 2
342
+ [26] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022. 5
343
+ [27] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Tiancheng Han, Botian Shi, Wenhai Wang, Junjun He, et al. Mm-eureka: Exploring the frontiers of multimodal reasoning with rule-based reinforce
344
+
345
+ ment learning. arXiv preprint arXiv:2503.07365, 2025. 3
346
+ [28] OpenAI. Gpt-4 technical report, 2023. 3
347
+ [29] OpenAI. Introducing openai o1, 2024. 2
348
+ [30] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 3, 6
349
+ [31] Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever, et al. Improving language understanding by generative pre-training. 2018. 3
350
+ [32] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. 3
351
+ [33] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. 3
352
+ [34] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 1, 3, 5
353
+ [35] Guangzhi Sun, Wenyi Yu, Changli Tang, Xianzhao Chen, Tian Tan, Wei Li, Lu Lu, Zejun Ma, Yuxuan Wang, and Chao Zhang. video-salmonn: Speech-enhanced audio-visual large language models. arXiv preprint arXiv:2406.15704, 2024. 2
354
+ [36] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 1, 3
355
+ [37] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. Llamavol: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186, 2025. 1, 3, 6
356
+ [38] Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860, 2024. 1, 2, 6
357
+ [39] Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.6
358
+ [40] Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Houxing Ren, Aojun Zhou, Mingjie Zhan, and Hongsheng Li. Measuring multimodal mathematical reasoning with math-vision dataset. Advances in Neural Information Processing Systems, 37:95095-95169, 2025. 5
359
+ [41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 6
360
+ [42] Shengqiong Wu, Hao Fei, Leigang Qu, Wei Ji, and Tat-Seng
361
+
362
+ Chua. Next-gpt: Any-to-any multimodal lIm. arXiv preprint arXiv:2309.05519, 2023. 2
363
+ [43] Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024. 1, 2, 6
364
+ [44] Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. Llava-ol: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440, 2024. 1, 3, 5, 6
365
+ [45] Huanjin Yao, Jiaxing Huang, Yawen Qiu, Michael K Chen, Wenzheng Liu, Wei Zhang, Wenjie Zeng, Xikun Zhang, Jingyi Zhang, Yuxin Song, et al. MMreason: An open-ended multi-modal multi-step reasoning benchmark for mllms toward agi. arXiv preprint arXiv:2506.23563, 2025. 5
366
+ [46] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024. 1, 3, 5, 6
367
+ [47] Huanjin Yao, Qixiang Yin, Jingyi Zhang, Min Yang, Yibo Wang, Wenhao Wu, Fei Su, Li Shen, Minghui Qiu, Dacheng Tao, et al. R1-sharev1: Incentivizing reasoning capability of multimodal large language models via share-grpo. arXiv preprint arXiv:2505.16673, 2025. 3, 6
368
+ [48] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 6
369
+ [49] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Yuhao Dan, Chenlin Zhao, Guohai Xu, Chenliang Li, Junfeng Tian, et al. mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499, 2023. 2
370
+ [50] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024. 3
371
+ [51] Haotian Zhang, Mingfei Gao, Zhe Gan, Philipp Dufter, Nina Wenzel, Forrest Huang, Dhruti Shah, Xianzhi Du, Bowen Zhang, Yanghao Li, et al. Mm1. 5: Methods, analysis & insights from multimodal llm fine-tuning. arXiv preprint arXiv:2409.20566, 2024. 1, 2, 6
372
+ [52] Jingyi Zhang, Jiaxing Huang, Sheng Jin, and Shijian Lu. Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 2
373
+ [53] Jingyi Zhang, Jiaxing Huang, Xiaoqin Zhang, Ling Shao, and Shijian Lu. Historical test-time prompt tuning for vision foundation models. Advances in Neural Information Processing Systems, 37:12872-12896, 2024. 2
374
+ [54] Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Yu Qiao, et al. Mathverse: Does your multi-modal llm truly see the diagrams in visual math problems? In European Conference on Computer Vision, pages 169–186.
375
+
376
+ Springer, 2024. 5
377
+
378
+ [55] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024. 1, 3, 6
379
+ [56] Xiaoman Zhang, Chaoyi Wu, Ziheng Zhao, Weixiong Lin, Ya Zhang, Yanfeng Wang, and Weidi Xie. Pmc-vqa: Visual instruction tuning for medical visual question answering. arXiv preprint arXiv:2305.10415, 2023. 2
380
+ [57] Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836, 2024. 5
data/2025/2503_12xxx/2503.12937/images/08a395e3dfac9af5c8b9b4bcea8c772a6dd987bb1bf0054d2e9907a99f461acd.jpg ADDED

Git LFS Details

  • SHA256: f6cf1f73826e1c61a196547c412e23a297e44fce3e37a08cf7a5a57995dfb18d
  • Pointer size: 131 Bytes
  • Size of remote file: 171 kB
data/2025/2503_12xxx/2503.12937/images/1558e8c3b9e8da8ef6634dfd8880bbdd56fd4f2cc1fe7590b5502e41bf971d92.jpg ADDED

Git LFS Details

  • SHA256: 0d6a81816280f4c15910d1b0ba23d34acf8f130a9b255cba865cad22b2955b18
  • Pointer size: 130 Bytes
  • Size of remote file: 11.8 kB
data/2025/2503_12xxx/2503.12937/images/30454a921be72743b7012a32c0b65488666b790c3263f8df6ab07a0349fea014.jpg ADDED

Git LFS Details

  • SHA256: e0736df9d7cc0840355b36a409407bce639811a499f3fa6af3310b7e6b25d1a8
  • Pointer size: 130 Bytes
  • Size of remote file: 17.1 kB
data/2025/2503_12xxx/2503.12937/images/31ea3d8e55752229dfade1dd6643121292be3f7cec00e13905b27dd70e12926c.jpg ADDED

Git LFS Details

  • SHA256: 7486c7df363c737bea8ce1b8a36715fbaa8bd867cf99798e33892cb5b2d9bc73
  • Pointer size: 129 Bytes
  • Size of remote file: 6.31 kB
data/2025/2503_12xxx/2503.12937/images/342463935f3c59dbe09ff74e62294e3541ac31f9b337d0a50fa17549e79c2968.jpg ADDED

Git LFS Details

  • SHA256: c99b7e567d83368d434777cdd5f593bfe1478264e5571496913ce7aecc13d32a
  • Pointer size: 130 Bytes
  • Size of remote file: 19.8 kB
data/2025/2503_12xxx/2503.12937/images/505609ce5b30e24850e3d0b33b9faa0f2d7fbcfed05b7deb464216876e31c18e.jpg ADDED

Git LFS Details

  • SHA256: 54b3b67a9ca1e4a69c0ac4f69edca094321a5ada7072a6a3b045c7eadc497c87
  • Pointer size: 129 Bytes
  • Size of remote file: 4.87 kB
data/2025/2503_12xxx/2503.12937/images/5606d2223621250bdaa6c74f2d34a58e7f44e544758016ac5a16d0b60f1acd4f.jpg ADDED

Git LFS Details

  • SHA256: 44101c0585fdffdb779a8b7ce35e7c99a973f887249df82f0e62e5e658a1fb40
  • Pointer size: 130 Bytes
  • Size of remote file: 19.9 kB
data/2025/2503_12xxx/2503.12937/images/6b3c67a4c50ce11940655a5fb86d1d6562af7aedeea159567fe508f24e38ba79.jpg ADDED

Git LFS Details

  • SHA256: 69eccbbbd8757dda81ca425a7f74c00e3d5809911d05bcb82fc69c321328d1d2
  • Pointer size: 129 Bytes
  • Size of remote file: 9.08 kB
data/2025/2503_12xxx/2503.12937/images/7ad59bbf786298ad029c17f7fc43fbbfc0ac2a40931846c3527455d40fe2fdb1.jpg ADDED

Git LFS Details

  • SHA256: 3d93e04642c2c57e4f0e0fb60cf05d418c5bf1b5e820735dd9f5d8969558d3a8
  • Pointer size: 130 Bytes
  • Size of remote file: 61.5 kB
data/2025/2503_12xxx/2503.12937/images/89c21106dadd9e892de897a3997bdb6531f3aa0bde3862bde14835d4ccdfd1d5.jpg ADDED

Git LFS Details

  • SHA256: d0c9754e0de33a4c76a80c4274837dbf489fc2c94d5d2443a4b0cbb322f3750e
  • Pointer size: 129 Bytes
  • Size of remote file: 7.54 kB
data/2025/2503_12xxx/2503.12937/images/8f872592d2440c83707b4c948838641a3c2d1471896f5cd20dd8fca83cbb0a62.jpg ADDED

Git LFS Details

  • SHA256: 5a22790f9b4a22d4160ca9a880e4f9b5b2544517b602b177e1c4ae20623d2698
  • Pointer size: 130 Bytes
  • Size of remote file: 26.3 kB
data/2025/2503_12xxx/2503.12937/images/c4dcc9464dd8f5bae7a084695aadece73c6c64879dbae0c0fa3ed1632dd6f628.jpg ADDED

Git LFS Details

  • SHA256: 8842629cc3276791a4e891f38c502d1dcee54d4d1d95283bca8fac66562fdddb
  • Pointer size: 130 Bytes
  • Size of remote file: 19.1 kB
data/2025/2503_12xxx/2503.12937/images/cc8691112c299eff8cc7beb85c16c3122f1c04b330a8141f6be8b51d0884c159.jpg ADDED

Git LFS Details

  • SHA256: 6025cc2ce17b9f718cc5329854a857dcc2e0ba9549fd03dd7fae8fe6d46d176a
  • Pointer size: 129 Bytes
  • Size of remote file: 9.94 kB
data/2025/2503_12xxx/2503.12937/images/dc48c9c847fef2992a2cdf8778ddf43114550130a75e34b67ae5bbaad9c55bea.jpg ADDED

Git LFS Details

  • SHA256: 16743e03246f448bdcf629028f5661e4c1f97319b1d97137cc5982fc7f2c04c2
  • Pointer size: 128 Bytes
  • Size of remote file: 937 Bytes
data/2025/2503_12xxx/2503.12937/images/e5f37c322318e22f5c792d8f69d382aba0a87edb79ed67e1acbc2363b6ff942e.jpg ADDED

Git LFS Details

  • SHA256: a7784ea92d40f6d73e350fa5357f57f0cbb3bfb63d73d3f8ccd15dc595734c4c
  • Pointer size: 129 Bytes
  • Size of remote file: 5.36 kB
data/2025/2503_12xxx/2503.12937/images/f8c2153c05b6d636fd93e2d9701b86feb20c9c13112ae8afc8e353930bce0932.jpg ADDED

Git LFS Details

  • SHA256: ffe87d42baba2f4715201d5d9bf2b548441f22838b74731ef275f71f82233dc8
  • Pointer size: 130 Bytes
  • Size of remote file: 11.7 kB
data/2025/2503_12xxx/2503.12937/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_content_list.json ADDED
@@ -0,0 +1,877 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Performance Analysis and Industry Deployment of Post-Quantum Cryptography Algorithms",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 88,
8
+ 63,
9
+ 911,
10
+ 131
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Elif Dicle Demir",
17
+ "bbox": [
18
+ 171,
19
+ 152,
20
+ 302,
21
+ 167
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Electrical and Electronics Eng. Dept.",
28
+ "bbox": [
29
+ 109,
30
+ 167,
31
+ 362,
32
+ 183
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Koç University",
39
+ "bbox": [
40
+ 184,
41
+ 185,
42
+ 287,
43
+ 198
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Istanbul, Türkiye",
50
+ "bbox": [
51
+ 174,
52
+ 200,
53
+ 292,
54
+ 214
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "elifdemir21@ku.edu.tr",
61
+ "bbox": [
62
+ 156,
63
+ 215,
64
+ 310,
65
+ 228
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Buse Bilgin",
72
+ "bbox": [
73
+ 452,
74
+ 152,
75
+ 544,
76
+ 167
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "6GEN Lab., Next-Gen R&D",
83
+ "bbox": [
84
+ 403,
85
+ 169,
86
+ 594,
87
+ 181
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "Network Technologies, Turkcell",
94
+ "bbox": [
95
+ 392,
96
+ 184,
97
+ 604,
98
+ 198
99
+ ],
100
+ "page_idx": 0
101
+ },
102
+ {
103
+ "type": "text",
104
+ "text": "Istanbul, Türkiye",
105
+ "bbox": [
106
+ 437,
107
+ 200,
108
+ 553,
109
+ 214
110
+ ],
111
+ "page_idx": 0
112
+ },
113
+ {
114
+ "type": "text",
115
+ "text": "buse.bilgin@turkcell.com.tr",
116
+ "bbox": [
117
+ 401,
118
+ 215,
119
+ 589,
120
+ 229
121
+ ],
122
+ "page_idx": 0
123
+ },
124
+ {
125
+ "type": "text",
126
+ "text": "Mehmet Cengiz Onbaşi",
127
+ "bbox": [
128
+ 669,
129
+ 152,
130
+ 852,
131
+ 167
132
+ ],
133
+ "page_idx": 0
134
+ },
135
+ {
136
+ "type": "text",
137
+ "text": "Electrical and Electronics Eng. Dept.",
138
+ "bbox": [
139
+ 633,
140
+ 167,
141
+ 885,
142
+ 183
143
+ ],
144
+ "page_idx": 0
145
+ },
146
+ {
147
+ "type": "text",
148
+ "text": "Koç University",
149
+ "bbox": [
150
+ 709,
151
+ 185,
152
+ 812,
153
+ 199
154
+ ],
155
+ "page_idx": 0
156
+ },
157
+ {
158
+ "type": "text",
159
+ "text": "Istanbul, Türkiye",
160
+ "bbox": [
161
+ 700,
162
+ 200,
163
+ 815,
164
+ 214
165
+ ],
166
+ "page_idx": 0
167
+ },
168
+ {
169
+ "type": "text",
170
+ "text": "monbasli@ku.edu.tr",
171
+ "bbox": [
172
+ 689,
173
+ 215,
174
+ 826,
175
+ 228
176
+ ],
177
+ "page_idx": 0
178
+ },
179
+ {
180
+ "type": "text",
181
+ "text": "Abstract—As quantum computing advances, modern cryptographic standards face an existential threat, necessitating a transition to post-quantum cryptography (PQC). The National Institute of Standards and Technology (NIST) has selected CRYSTALS-Kyber and CRYSTALS-Dilithium as standardized PQC algorithms for secure key exchange and digital signatures, respectively. This study conducts a comprehensive performance analysis of these algorithms by benchmarking execution times across cryptographic operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, the impact of AVX2 optimizations is evaluated to assess hardware acceleration benefits. Our findings demonstrate that Kyber and Dilithium achieve efficient execution times, outperforming classical cryptographic schemes such as RSA and ECDSA at equivalent security levels. Beyond technical performance, the real-world deployment of PQC introduces challenges in telecommunications networks, where large-scale infrastructure upgrades, interoperability with legacy systems, and regulatory constraints must be addressed. This paper examines the feasibility of PQC adoption in telecom environments, highlighting key transition challenges, security risks, and implementation strategies. Through industry case studies, we illustrate how telecom operators are integrating PQC into 5G authentication, subscriber identity protection, and secure communications. Our analysis provides insights into the computational trade-offs, deployment considerations, and standardization efforts shaping the future of quantum-safe cryptographic infrastructure.",
182
+ "bbox": [
183
+ 73,
184
+ 273,
185
+ 491,
186
+ 613
187
+ ],
188
+ "page_idx": 0
189
+ },
190
+ {
191
+ "type": "text",
192
+ "text": "Index Terms—Post-Quantum Cryptography, CRYSTALS-Kyber, CRYSTALS-Dilithium, NIST Standardization, Telecommunications Security, Cryptographic Deployment, Quantum-Safe Networks.",
193
+ "bbox": [
194
+ 73,
195
+ 614,
196
+ 491,
197
+ 665
198
+ ],
199
+ "page_idx": 0
200
+ },
201
+ {
202
+ "type": "text",
203
+ "text": "I. INTRODUCTION",
204
+ "text_level": 1,
205
+ "bbox": [
206
+ 215,
207
+ 676,
208
+ 349,
209
+ 691
210
+ ],
211
+ "page_idx": 0
212
+ },
213
+ {
214
+ "type": "text",
215
+ "text": "Modern cryptographic systems rely on the computational intractability of certain mathematical problems, such as integer factorization and discrete logarithms, to ensure the security of digital communication and data protection [1]. The advent of quantum computing poses a fundamental threat to modern cryptographic systems, as algorithms such as Shor's and Grover's exploit quantum parallelism to break widely used cryptographic primitives. Shor's algorithm efficiently factors large integers and solves the discrete logarithm problem, undermining the security of RSA and Elliptic Curve Cryptography(ECC), while Grover's algorithm accelerates brute-force attacks, significantly reducing the effective security of symmetric encryption schemes [2]. As research continues to refine quantum hardware, the urgency to transition towards",
216
+ "bbox": [
217
+ 73,
218
+ 696,
219
+ 491,
220
+ 907
221
+ ],
222
+ "page_idx": 0
223
+ },
224
+ {
225
+ "type": "text",
226
+ "text": "quantum-resistant cryptographic solutions has become a pressing concern.",
227
+ "bbox": [
228
+ 503,
229
+ 273,
230
+ 921,
231
+ 303
232
+ ],
233
+ "page_idx": 0
234
+ },
235
+ {
236
+ "type": "text",
237
+ "text": "To address these emerging threats, the National Institute of Standards and Technology (NIST) initiated the Post-Quantum Cryptography (PQC) Standardization process to develop cryptographic algorithms resilient to quantum threats. The evaluation criteria for candidate algorithms include security against both classical and quantum attacks, cost and performance efficiency, and implementation characteristics such as flexibility and resistance to side-channel attacks [3]. As a result of the NIST standardization process, CRYSTALS-Kyber and HQC were selected as key encapsulation mechanisms (KEMs), while CRYSTALS-Dilithium, Falcon, and SPHINCS+ were chosen as digital signature schemes due to their strong security foundations, computational efficiency, and real-world applicability. Kyber is a lattice-based KEM, while HQC is a code-based KEM, both ensuring secure key exchange over insecure communication channels. Similarly, Dilithium and Falcon are lattice-based digital signature schemes designed for message authenticity and integrity, whereas SPHINCS+ is a hash-based scheme. [4]",
238
+ "bbox": [
239
+ 501,
240
+ 303,
241
+ 921,
242
+ 589
243
+ ],
244
+ "page_idx": 0
245
+ },
246
+ {
247
+ "type": "text",
248
+ "text": "This study focuses on the performance evaluation of postquantum cryptographic algorithms, specifically Kyber and Dilithium, by benchmarking their execution times across key cryptographic operations. Given the critical role of computational efficiency in the real-world adoption of PQC, our analysis provides insights into their feasibility for practical deployment. Additionally, as the transition to quantum-safe cryptography involves not only technical performance but also industry-wide adoption challenges, we extend our study to include an industry perspective, assessing the implications of PQC deployment in telecommunications and broader enterprise environments.",
249
+ "bbox": [
250
+ 501,
251
+ 590,
252
+ 921,
253
+ 772
254
+ ],
255
+ "page_idx": 0
256
+ },
257
+ {
258
+ "type": "text",
259
+ "text": "II. TESTING METHODOLOGY AND ENVIRONMENT SETUP FOR PERFORMANCE ANALYSIS",
260
+ "text_level": 1,
261
+ "bbox": [
262
+ 509,
263
+ 781,
264
+ 915,
265
+ 811
266
+ ],
267
+ "page_idx": 0
268
+ },
269
+ {
270
+ "type": "text",
271
+ "text": "To understand their computational feasibility, we conducted a detailed performance analysis of Kyber and Dilithium under controlled benchmarking conditions. The performance of cryptographic algorithms is a critical factor in their real-world adoption, particularly in PQC, where computational efficiency directly impacts practical deployment in constrained",
272
+ "bbox": [
273
+ 501,
274
+ 816,
275
+ 921,
276
+ 907
277
+ ],
278
+ "page_idx": 0
279
+ },
280
+ {
281
+ "type": "aside_text",
282
+ "text": "arXiv:2503.12952v2 [cs.CR] 31 Mar 2025",
283
+ "bbox": [
284
+ 22,
285
+ 234,
286
+ 57,
287
+ 681
288
+ ],
289
+ "page_idx": 0
290
+ },
291
+ {
292
+ "type": "text",
293
+ "text": "environments. This section presents a benchmarking study of Kyber and Dilithium, evaluating their execution time across key operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, optimizations leveraging AVX2 vector instructions are examined to assess the impact of hardware acceleration on performance. Furthermore, we compare these PQC algorithms with widely used classical cryptographic schemes—Elliptic Curve Diffie-Hellman (ECDH), Elliptic Curve Digital Signature Algorithm (ECDSA), and RSA—to analyze the trade-offs in execution time and efficiency when transitioning to quantum-resistant cryptography.",
294
+ "bbox": [
295
+ 73,
296
+ 61,
297
+ 491,
298
+ 243
299
+ ],
300
+ "page_idx": 1
301
+ },
302
+ {
303
+ "type": "text",
304
+ "text": "Each cryptographic operation was executed 1,000 times to ensure consistency, with median and average execution times recorded. The benchmarking methodology follows standard cryptographic evaluation practices, converting measured CPU cycles to execution time using a fixed $3.3\\mathrm{GHz}$ clock. We evaluated both reference and AVX2-optimized implementations of Kyber and Dilithium to assess the performance gains from vectorized instructions. Additionally, to compare PQC with classical cryptography, we tested ECDH, ECDSA, and RSA under the same conditions using OpenSSL libraries.",
305
+ "bbox": [
306
+ 73,
307
+ 243,
308
+ 491,
309
+ 393
310
+ ],
311
+ "page_idx": 1
312
+ },
313
+ {
314
+ "type": "text",
315
+ "text": "III. PERFORMANCE EVALUATION OF KYBER AND DILITHIUM",
316
+ "text_level": 1,
317
+ "bbox": [
318
+ 107,
319
+ 404,
320
+ 459,
321
+ 433
322
+ ],
323
+ "page_idx": 1
324
+ },
325
+ {
326
+ "type": "text",
327
+ "text": "Table I presents performance metrics for Kyber, a key encapsulation mechanism (KEM). It includes the secret key (sk), public key (pk), and ciphertext (ct) sizes for different security levels, reflecting storage and transmission overhead. The listed cryptographic operations are key generation (gen), responsible for producing the key pair; encapsulation (enc), encrypting a shared secret using the recipient's public key; and decapsulation (dec), recovering the shared secret with the private key. Table II provides results for Dilithium, a digital signature scheme. It reports public key (pk) and signature (sig) sizes, which indicate storage costs for authentication. The benchmarked operations include key generation (gen), used to create the signing key pair; signing (sign), which generates digital signatures for message integrity; and verification (verify), ensuring the validity of signatures. The AVX2 speedup rate in Tables I and II represents the performance improvement of the AVX2-optimized implementation compared to the reference implementation. It is calculated as the ratio of execution times, indicating how many times faster the AVX2 implementation performs a given cryptographic operation. A higher speedup value signifies greater efficiency gains achieved through vectorized polynomial arithmetic in AVX2-enabled processors.",
328
+ "bbox": [
329
+ 73,
330
+ 439,
331
+ 493,
332
+ 786
333
+ ],
334
+ "page_idx": 1
335
+ },
336
+ {
337
+ "type": "text",
338
+ "text": "As indicated in Table I, the execution times of Kyber increase with higher security levels across all three operations: key generation, encapsulation, and decapsulation. Notably, Kyber-512 completes execution in $0.127\\mathrm{ms}$ , whereas Kyber-1024 requires $0.294\\mathrm{ms}$ , demonstrating the expected computational cost of increased cryptographic strength. However, the scaling is nonlinear, as the increase from Kyber-768 to Kyber-1024 is smaller than from Kyber-512 to Kyber-768.",
339
+ "bbox": [
340
+ 73,
341
+ 786,
342
+ 491,
343
+ 907
344
+ ],
345
+ "page_idx": 1
346
+ },
347
+ {
348
+ "type": "text",
349
+ "text": "The AVX2 optimization significantly reduces execution time, yielding an average speedup of $5.98 \\times$ across different security levels. The most substantial gains occur in decapsulation, which is reduced by up to $6.65 \\times$ due to the vectorized polynomial arithmetic enabled by AVX2 instructions. This demonstrates that Kyber benefits greatly from parallelization, making it well-suited for optimized hardware implementations.",
350
+ "bbox": [
351
+ 501,
352
+ 61,
353
+ 921,
354
+ 167
355
+ ],
356
+ "page_idx": 1
357
+ },
358
+ {
359
+ "type": "text",
360
+ "text": "Similarly, as shown in Table II, the execution time of Dilithium scales with security levels, with Dilithium-2 executing in 0.643 ms while Dilithium-5 requires 1.36 ms. Unlike Kyber, where operations are relatively balanced, Dilithium's signing step dominates execution time—accounting for over $60\\%$ of the total runtime in all security levels. This is due to the structured lattice sampling required for signature generation, which is inherently more computationally expensive than verification.",
361
+ "bbox": [
362
+ 503,
363
+ 169,
364
+ 921,
365
+ 305
366
+ ],
367
+ "page_idx": 1
368
+ },
369
+ {
370
+ "type": "text",
371
+ "text": "The AVX2 speedup for Dilithium is lower than for Kyber $(4.8\\times$ on average), but still significant, particularly in the signing operation, which achieves up to a $5.83\\times$ reduction in execution time. The verification step sees the smallest speedup $(3.76\\times)$ , reflecting its already efficient nature. The results emphasize that while Dilithium is computationally heavier than Kyber, its AVX2-optimized variant brings notable efficiency improvements, making it feasible for real-world applications.",
372
+ "bbox": [
373
+ 501,
374
+ 306,
375
+ 921,
376
+ 444
377
+ ],
378
+ "page_idx": 1
379
+ },
380
+ {
381
+ "type": "text",
382
+ "text": "Overall, the results in Tables I and II underscore the computational viability of Kyber and Dilithium, demonstrating that hardware optimizations (e.g., AVX2) significantly enhance performance. These findings highlight the practicality of post-quantum cryptography (PQC) deployment, as even without specialized hardware accelerators, Kyber and Dilithium achieve efficient execution times while maintaining high security.",
383
+ "bbox": [
384
+ 501,
385
+ 446,
386
+ 923,
387
+ 566
388
+ ],
389
+ "page_idx": 1
390
+ },
391
+ {
392
+ "type": "table",
393
+ "img_path": "images/9c0eacd157e54f2d5f632c6c7f77df6b42ee84b9d3b991b5c9429c568a4961ff.jpg",
394
+ "table_caption": [
395
+ "TABLEI KEY AND CIPHERTEXT SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF KYBER."
396
+ ],
397
+ "table_footnote": [],
398
+ "table_body": "<table><tr><td colspan=\"4\">KYBER 512</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 1632</td><td>gen: 0.035</td><td>gen: 0.007</td><td>5.00</td></tr><tr><td>pk: 800</td><td>enc: 0.040</td><td>enc: 0.007</td><td>5.71</td></tr><tr><td>ct: 768</td><td>dec: 0.052</td><td>dec: 0.008</td><td>6.50</td></tr><tr><td>Total</td><td>0.127</td><td>0.022</td><td>5.77</td></tr><tr><td colspan=\"4\">KYBER 768</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 2400</td><td>gen: 0.058</td><td>gen: 0.011</td><td>5.27</td></tr><tr><td>pk: 1184</td><td>enc: 0.063</td><td>enc: 0.011</td><td>5.73</td></tr><tr><td>ct: 1088</td><td>dec: 0.080</td><td>dec: 0.012</td><td>6.67</td></tr><tr><td>Total</td><td>0.201</td><td>0.034</td><td>5.91</td></tr><tr><td colspan=\"4\">KYBER 1024</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 3168</td><td>gen: 0.089</td><td>gen: 0.015</td><td>5.93</td></tr><tr><td>pk: 1568</td><td>enc: 0.092</td><td>enc: 0.015</td><td>6.13</td></tr><tr><td>ct: 1568</td><td>dec: 0.113</td><td>dec: 0.017</td><td>6.65</td></tr><tr><td>Total</td><td>0.294</td><td>0.047</td><td>6.26</td></tr></table>",
399
+ "bbox": [
400
+ 506,
401
+ 633,
402
+ 923,
403
+ 887
404
+ ],
405
+ "page_idx": 1
406
+ },
407
+ {
408
+ "type": "table",
409
+ "img_path": "images/cd5e919056af76267e409d8fb1057479a9f46d5fb8d12f6435f7d32ac2f38b8c.jpg",
410
+ "table_caption": [
411
+ "TABLE II PUBLIC KEY AND SIGNATURE SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF DILITHIUM."
412
+ ],
413
+ "table_footnote": [],
414
+ "table_body": "<table><tr><td colspan=\"4\">DILITHIUM 2</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 1312</td><td>gen: 0.094</td><td>gen: 0.026</td><td>3.62</td></tr><tr><td>sig: 2420</td><td>sign: 0.445</td><td>sign: 0.077</td><td>5.78</td></tr><tr><td></td><td>verify: 0.104</td><td>verify: 0.028</td><td>3.71</td></tr><tr><td>Total</td><td>0.643</td><td>0.131</td><td>4.91</td></tr><tr><td colspan=\"4\">DILITHIUM 3</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 1952</td><td>gen: 0.167</td><td>gen: 0.045</td><td>3.71</td></tr><tr><td>sig: 3293</td><td>sign: 0.665</td><td>sign: 0.120</td><td>5.54</td></tr><tr><td></td><td>verify: 0.160</td><td>verify: 0.045</td><td>3.56</td></tr><tr><td>Total</td><td>0.992</td><td>0.210</td><td>4.73</td></tr><tr><td colspan=\"4\">DILITHIUM 5</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 2592</td><td>gen: 0.253</td><td>gen: 0.070</td><td>3.61</td></tr><tr><td>sig: 4595</td><td>sign: 0.840</td><td>sign: 0.144</td><td>5.83</td></tr><tr><td></td><td>verify: 0.267</td><td>verify: 0.071</td><td>3.76</td></tr><tr><td>Total</td><td>1.360</td><td>0.285</td><td>4.77</td></tr></table>",
415
+ "bbox": [
416
+ 76,
417
+ 109,
418
+ 498,
419
+ 363
420
+ ],
421
+ "page_idx": 2
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "IV. PERFORMANCE COMPARISON: POST-QUANTUM CRYPTOGRAPHY VS. CLASSICAL CRYPTOGRAPHY",
426
+ "text_level": 1,
427
+ "bbox": [
428
+ 101,
429
+ 393,
430
+ 464,
431
+ 422
432
+ ],
433
+ "page_idx": 2
434
+ },
435
+ {
436
+ "type": "text",
437
+ "text": "Table III presents a comparative analysis of execution times for post-quantum and classical cryptographic algorithms, evaluated under controlled conditions. Both PQC and classical schemes were tested at different security levels, measured in bits, to assess performance variations. The table includes cryptographic algorithms across multiple security configurations, ensuring a direct comparison of execution times. The evaluation focuses on total execution time, measured in milliseconds, to quantify computational cost across different cryptographic operations. While this analysis highlights execution speed, real-world deployment must also consider additional factors such as memory footprint, communication overhead, and hardware compatibility.",
438
+ "bbox": [
439
+ 73,
440
+ 436,
441
+ 491,
442
+ 633
443
+ ],
444
+ "page_idx": 2
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "Kyber exhibits notable performance advantages over both RSA and ECDH, which are commonly employed for key exchange. Kyber-512, offering 128-bit security, achieves execution times that are approximately three times faster than both RSA-2048 and ECDH(P-256), despite these classical schemes providing lower security guarantees. Even Kyber-1024, the most computationally expensive variant, maintains an execution time that is roughly three times faster than RSA-3072, which offers only 128-bit security. At equivalent security levels, Kyber consistently achieves faster execution times than ECDH(P-256, P-384, P-521) while also providing quantum resistance. These efficiency gains are attributed to Kyber's lattice-based cryptographic foundation, which relies on small polynomials and number-theoretic transforms (NTT) rather than large-number modular exponentiation. This mathematical structure enables faster key generation and encapsulation while maintaining strong security guarantees, particularly against quantum adversaries.",
449
+ "bbox": [
450
+ 73,
451
+ 635,
452
+ 491,
453
+ 907
454
+ ],
455
+ "page_idx": 2
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "Dilithium demonstrates significant computational advantages over ECDSA, a widely used classical digital signature scheme. At the 128-bit security level, Dilithium-2 executes signature operations approximately $20\\%$ faster than ECDSA(P-256), with the performance gap increasing at higher security levels. Dilithium-5, the highest-security variant, achieves nearly twice the execution speed of ECDSA(P-512) at the 256-bit security level. A distinct characteristic of Dilithium is that signature generation dominates execution time, accounting for over $60\\%$ of the total runtime, whereas ECDSA exhibits a more balanced distribution between signing and verification. This difference arises from Dilithium's structured lattice sampling, which, while computationally intensive, remains more efficient than ECDSA's elliptic curve discrete logarithm operations. Additionally, Dilithium's deterministic signature generation eliminates nonce-related vulnerabilities, a known weakness in ECDSA implementations.",
460
+ "bbox": [
461
+ 501,
462
+ 61,
463
+ 921,
464
+ 318
465
+ ],
466
+ "page_idx": 2
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "The results indicate that post-quantum cryptographic algorithms do not inherently introduce higher computational costs. On the contrary, Kyber and Dilithium frequently outperform classical cryptographic schemes at equivalent security levels. Kyber consistently demonstrates superior efficiency in key exchange operations compared to RSA and ECDH, even at its highest security configuration. Similarly, Dilithium provides a computationally efficient alternative to ECDSA, particularly as security levels increase. While Dilithium's signing operation remains computationally heavier than verification, it still surpasses ECDSA in signature generation across all tested configurations. These findings highlight the feasibility of transitioning to quantum-resistant cryptographic standards in practical applications, demonstrating that enhanced security can be achieved without compromising computational efficiency.",
471
+ "bbox": [
472
+ 501,
473
+ 319,
474
+ 921,
475
+ 559
476
+ ],
477
+ "page_idx": 2
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "These performance findings highlight the computational feasibility of Kyber and Dilithium as post-quantum cryptographic solutions, demonstrating that quantum resistance does not necessarily come at the cost of execution efficiency. However, execution time is only one aspect of cryptographic feasibility. While our controlled benchmarking showed that Kyber and Dilithium outperform classical schemes in speed, these results were obtained under optimized and isolated conditions. Real-world deployment involves additional complexities, such as infrastructure constraints, interoperability with existing systems, and operational overhead, which can impact practical performance. Thus, while PQC shows strong computational efficiency, its large-scale adoption in telecom networks requires a broader evaluation, considering scalability, integration challenges, and regulatory compliance.",
482
+ "bbox": [
483
+ 503,
484
+ 560,
485
+ 921,
486
+ 787
487
+ ],
488
+ "page_idx": 2
489
+ },
490
+ {
491
+ "type": "text",
492
+ "text": "V. POST-QUANTUM CRYPTOGRAPHY IN TELECOMMUNICATIONS: CHALLENGES, IMPLEMENTATIONS, AND FUTURE OUTLOOK",
493
+ "text_level": 1,
494
+ "bbox": [
495
+ 555,
496
+ 797,
497
+ 870,
498
+ 840
499
+ ],
500
+ "page_idx": 2
501
+ },
502
+ {
503
+ "type": "text",
504
+ "text": "Implementing PQC in telecommunications networks presents significant challenges. Telecom operators must upgrade complex, large-scale infrastructures that currently rely on classical encryption, all while maintaining service",
505
+ "bbox": [
506
+ 503,
507
+ 845,
508
+ 921,
509
+ 907
510
+ ],
511
+ "page_idx": 2
512
+ },
513
+ {
514
+ "type": "table",
515
+ "img_path": "images/bd69976a57eb5e31707407c28643d178dd46223bf5a751e795ecb0e0d3d78495.jpg",
516
+ "table_caption": [
517
+ "TABLE III EXECUTION TIME COMPARISON OF POST-QUANTUM AND CLASSICAL CRYPTOGRAPHIC ALGORITHMS."
518
+ ],
519
+ "table_footnote": [],
520
+ "table_body": "<table><tr><td>Algorithm</td><td>Security Level</td><td>Total Time (ms)</td></tr><tr><td>Kyber-512</td><td>128-bit</td><td>0.127</td></tr><tr><td>Kyber-768</td><td>192-bit</td><td>0.201</td></tr><tr><td>Kyber-1024</td><td>256-bit</td><td>0.294</td></tr><tr><td>Dilithium-2</td><td>128-bit</td><td>0.643</td></tr><tr><td>Dilithium-3</td><td>192-bit</td><td>0.992</td></tr><tr><td>Dilithium-5</td><td>256-bit</td><td>1.360</td></tr><tr><td>ECDSA(P-256)</td><td>128-bit</td><td>0.801</td></tr><tr><td>ECDSA(P-384)</td><td>192-bit</td><td>1.702</td></tr><tr><td>ECDSA(P-512)</td><td>256-bit</td><td>2.398</td></tr><tr><td>RSA-2048</td><td>112-bit</td><td>0.324</td></tr><tr><td>RSA-3072</td><td>128-bit</td><td>0.884</td></tr><tr><td>ECDH(P-256)</td><td>128-bit</td><td>0.102</td></tr><tr><td>ECDH(P-384)</td><td>192-bit</td><td>0.299</td></tr><tr><td>ECDH(P-521)</td><td>256-bit</td><td>0.903</td></tr></table>",
521
+ "bbox": [
522
+ 125,
523
+ 107,
524
+ 441,
525
+ 287
526
+ ],
527
+ "page_idx": 3
528
+ },
529
+ {
530
+ "type": "text",
531
+ "text": "continuity. Key challenges include performance and latency impacts, compatibility with legacy systems, lack of finalized standards, resource and cost constraints, transitional security risks, and vendor readiness issues.",
532
+ "bbox": [
533
+ 76,
534
+ 316,
535
+ 488,
536
+ 375
537
+ ],
538
+ "page_idx": 3
539
+ },
540
+ {
541
+ "type": "text",
542
+ "text": "A. Challenges",
543
+ "text_level": 1,
544
+ "bbox": [
545
+ 76,
546
+ 388,
547
+ 174,
548
+ 402
549
+ ],
550
+ "page_idx": 3
551
+ },
552
+ {
553
+ "type": "text",
554
+ "text": "1) Performance Impact on Existing Infrastructure: PQC algorithms require more computational resources and larger key sizes than classical cryptography. Many schemes are at least an order of magnitude slower or produce larger keys and ciphertexts than RSA or ECC, straining network devices [5]. The increased size of PQC keys, signatures, and ciphertexts taxes bandwidth and memory-constrained hardware; for instance, an additional 1 KB in a TLS handshake can increase response time by $1.5\\%$ [6]. Latency-sensitive telecom applications, such as voice and video, may experience performance degradation due to longer cryptographic operations or larger handshake messages. Operators need to evaluate whether servers, routers, and HSMs can support the increased computational load of PQC, as many may require hardware upgrades specifically for PQC adoption. Especially in radio access networks (RANs) and customer devices with limited processing power, PQC's computational overhead and memory footprint pose a significant deployment challenge.",
555
+ "bbox": [
556
+ 76,
557
+ 409,
558
+ 488,
559
+ 679
560
+ ],
561
+ "page_idx": 3
562
+ },
563
+ {
564
+ "type": "text",
565
+ "text": "2) Interoperability with Legacy Systems: During the transition, not all network elements and partner systems will upgrade to PQC at the same time, raising interoperability issues. If one system uses a PQC-based protocol but the communicating peer does not, secure connections cannot be established [7]. Many telecom protocols use a \"fail secure\" approach, meaning a PQC-enabled node could be cut off from legacy nodes that don't recognize the new algorithms. Due to the interconnected nature of telecom networks, a single non-upgraded component can block migration, creating deployment bottlenecks. A possible solution is hybrid cryptographic modes (combining classical and PQC algorithms), but this adds complexity and requires new protocol standards and careful validation, potentially slowing down the transition. To prevent network partitioning, telecom operators must ensure",
566
+ "bbox": [
567
+ 76,
568
+ 681,
569
+ 488,
570
+ 906
571
+ ],
572
+ "page_idx": 3
573
+ },
574
+ {
575
+ "type": "text",
576
+ "text": "PQC upgrades happen in sync across critical systems or remain backward-compatible.",
577
+ "bbox": [
578
+ 508,
579
+ 64,
580
+ 918,
581
+ 90
582
+ ],
583
+ "page_idx": 3
584
+ },
585
+ {
586
+ "type": "text",
587
+ "text": "3) Standardization and Regulatory Concerns: The telecom industry is highly standardized and regulated, so PQC adoption hinges on mature standards and regulatory guidance. As of 2024, standards bodies like NIST are just publishing the first official PQC algorithm standards [8]. Until international standards (e.g., 3GPP, IETF, ETSI) incorporate PQC, telcos risk adopting interim solutions that might not be interoperable or compliant long-term. There is also regulatory pressure: governments and industry bodies are already setting timelines and mandates for quantum-safe transitions. For example, the U.S. National Security Agency's CNSA 2.0 mandates specific PQC algorithms, aiming all national security systems to be quantum-resistant by 2035. However, inconsistent national strategies pose challenges for global carriers, as many countries have only issued high-level guidance to \"start planning\" with few concrete standards yet. The absence of finalized telecom-specific PQC standards adds uncertainty, requiring operators to closely coordinate with standards organizations to ensure protocols like 5G authentication, IPsec, and TLS integrate PQC effectively.",
588
+ "bbox": [
589
+ 506,
590
+ 92,
591
+ 918,
592
+ 393
593
+ ],
594
+ "page_idx": 3
595
+ },
596
+ {
597
+ "type": "text",
598
+ "text": "4) Cost and Resource Allocation: Upgrading a telecom operator's cryptographic infrastructure to PQC is costly and resource-intensive. Many legacy systems lack the processing power, memory, or bandwidth to support PQC, requiring replacement or retrofitting of equipment such as mobile devices, SIM cards, routers, and base stations. This represents a significant capital expense, with costs extending to PQC-capable HSMs, accelerator cards, software updates, staff training, testing, and parallel system operation during the transition. Smaller operators worry that only large carriers can afford early adoption, but as vendors integrate PQC into products, upgrade costs are expected to decrease. Nonetheless, operators need to allocate substantial resources for cryptographic inventory, upgrade planning, and continuous maintenance to ensure a smooth migration. The cost of inaction could be higher—a quantum-broken network may result in regulatory penalties and customer loss, making early investment crucial.",
599
+ "bbox": [
600
+ 506,
601
+ 393,
602
+ 918,
603
+ 650
604
+ ],
605
+ "page_idx": 3
606
+ },
607
+ {
608
+ "type": "text",
609
+ "text": "5) Security Risks and Transition Challenges: Transitioning to PQC raises security concerns, as these new algorithms have not been tested in real-world deployments for decades like RSA/ECC. There are risks of undiscovered weaknesses or implementation flaws, and some PQC candidates have already been found vulnerable to cryptanalysis and side-channel attacks during standardization. Ensuring side-channel resistance is critical—cryptographic operations must not leak secrets through timing, power, or memory access patterns. Additionally, PQC introduces complex key management and new failure modes; for example, some digital signature schemes require tracking one-time keys, complicating network authentication. Early deployments have exposed issues, such as network middleware and firewalls failing due to large key exchange messages. Misconfigurations, like hybrid mode errors or certificate management lapses, could introduce vulnerabilities. To mitigate these risks, telecom operators must conduct",
610
+ "bbox": [
611
+ 506,
612
+ 651,
613
+ 918,
614
+ 906
615
+ ],
616
+ "page_idx": 3
617
+ },
618
+ {
619
+ "type": "text",
620
+ "text": "extensive testing, use proven implementations, and ensure crypto-agility, allowing algorithm updates when needed.",
621
+ "bbox": [
622
+ 76,
623
+ 63,
624
+ 488,
625
+ 92
626
+ ],
627
+ "page_idx": 4
628
+ },
629
+ {
630
+ "type": "text",
631
+ "text": "6) Vendor Readiness and Supply Chain Considerations: Telecommunications relies on a vast network of vendors for hardware, software, and infrastructure, making PQC adoption a supply chain challenge. Many vendors await finalized standards before integrating PQC, and without support in critical components like SIM cards and routers, full migration is impossible. To address this, telecom operators are updating procurement policies, requiring vendors to support NIST-approved PQC algorithms and crypto-agility. Regulatory bodies may also mandate certification, potentially delaying availability. While some vendors are developing PQC-capable products, widespread readiness will take time. Effective supply chain management and early engagement with suppliers are essential to ensure smooth deployment, coordinated upgrades, and interoperability. Ultimately, achieving a quantum-safe telecom network requires industry-wide collaboration and careful planning.",
632
+ "bbox": [
633
+ 76,
634
+ 92,
635
+ 488,
636
+ 349
637
+ ],
638
+ "page_idx": 4
639
+ },
640
+ {
641
+ "type": "text",
642
+ "text": "B. Successful Implementations and Initiatives of PQC",
643
+ "text_level": 1,
644
+ "bbox": [
645
+ 78,
646
+ 359,
647
+ 442,
648
+ 375
649
+ ],
650
+ "page_idx": 4
651
+ },
652
+ {
653
+ "type": "text",
654
+ "text": "Despite the challenges, there have been several successful implementations and trials of post-quantum cryptography in telecom contexts. Forward-thinking carriers and technology partners around the world have started to integrate PQC into test networks, demonstrating feasibility and gleaning best practices. Below are a few notable examples and case studies highlighting how PQC deployment is being approached in telecommunications:",
655
+ "bbox": [
656
+ 76,
657
+ 378,
658
+ 488,
659
+ 497
660
+ ],
661
+ "page_idx": 4
662
+ },
663
+ {
664
+ "type": "text",
665
+ "text": "1) SoftBank (Japan) – Hybrid PQC Network Trial: SoftBank Corp., a major mobile operator in Japan, partnered with SandboxAQ to test PQC algorithms in a live network environment. In 2023 they conducted a hybrid encryption trial, combining classical elliptic-curve cryptography with lattice-based post-quantum algorithms on live network traffic [9]. The results were encouraging: the hybrid quantum-safe approach was verified to work on existing 4G/5G infrastructure with minimal performance impact. SoftBank reported that lattice-based PQC algorithms (such as those later standardized by NIST) outperformed other quantum-safe alternatives in their tests, providing strong security with only marginal added latency [10]. By adopting a hybrid approach, SoftBank ensured interoperability with existing systems while enhancing security. Their phased deployment, from lab tests to real-world networks, demonstrated that careful algorithm selection and optimization can mitigate future quantum threats without major performance trade-offs. Collaboration with SandboxAQ helped streamline cryptographic inventory and regulatory compliance. SoftBank continues investing in PQC, positioning early adoption as a competitive advantage in secure telecom infrastructure.",
666
+ "bbox": [
667
+ 76,
668
+ 500,
669
+ 488,
670
+ 830
671
+ ],
672
+ "page_idx": 4
673
+ },
674
+ {
675
+ "type": "text",
676
+ "text": "2) SK Telecom (South Korea) - PQC in 5G Standalone Network: Another pioneering effort was led by SK Telecom (SKT) in South Korea, in collaboration with Thales. SKT and Thales carried out a groundbreaking test of postquantum cryptography in a real 5G standalone network environ-",
677
+ "bbox": [
678
+ 76,
679
+ 832,
680
+ 488,
681
+ 907
682
+ ],
683
+ "page_idx": 4
684
+ },
685
+ {
686
+ "type": "text",
687
+ "text": "ronment [11]. In this pilot, SKT deployed quantum-resistant encryption to secure subscriber identities and network traffic. They tested 5G USIM cards implementing the CRYSTALS-Kyber key encapsulation algorithm, ensuring authentication remains secure against quantum threats. The trial demonstrated seamless interoperability between PQC-protected SIMs and the core network, with encrypted calls proving quantum-safe communication. This deployment, one of the first PQC integrations in 5G, underscores the role of carrier-vendor partnerships and informs ongoing standards development.",
688
+ "bbox": [
689
+ 506,
690
+ 61,
691
+ 919,
692
+ 212
693
+ ],
694
+ "page_idx": 4
695
+ },
696
+ {
697
+ "type": "text",
698
+ "text": "3) North American Carriers and Initiatives: U.S. and Canadian telecom operators are preparing for PQC, driven by government directives. AT&T plans to be \"quantum ready\" by 2025, with internal pilots testing PQC in VPNs and TLS. While large-scale deployments are pending, mandates for critical infrastructure are pushing adoption. In Canada and Europe, providers like Verizon, Rogers, Deutsche Telecom, and BT are engaged in research and industry collaborations, focusing on PQC for routing, customer data protection, and inter-carrier security. These efforts emphasize crypto-agility—ensuring networks can transition flexibly as standards evolve. Industry groups, including the GSMA Post-Quantum Telco Network Taskforce and 5G Americas, are developing best practices to guide telecom operators through PQC adoption.",
699
+ "bbox": [
700
+ 506,
701
+ 213,
702
+ 919,
703
+ 424
704
+ ],
705
+ "page_idx": 4
706
+ },
707
+ {
708
+ "type": "text",
709
+ "text": "Across successful implementations, key best practices have emerged. Conducting a cryptographic inventory helps identify necessary upgrades, while pilot deployments in less constrained environments allow for manageable PQC integration. A hybrid approach, running PQC alongside classical encryption, has been widely adopted to maintain continuity. Vendor collaboration with SIM card providers, router manufacturers, and software vendors is crucial for early integration. These trials also confirm that early PQC deployment safeguards critical data from future quantum threats. While performance impacts are generally manageable with optimized algorithms, some operators have even improved network efficiency by modernizing legacy systems. Overall, these case studies demonstrate that with careful planning and phased execution, telcos can begin inserting quantum-resistant cryptography into their networks today, gaining experience and confidence for broader rollouts.",
710
+ "bbox": [
711
+ 506,
712
+ 426,
713
+ 919,
714
+ 666
715
+ ],
716
+ "page_idx": 4
717
+ },
718
+ {
719
+ "type": "text",
720
+ "text": "C. Future Outlook and Recommendations",
721
+ "text_level": 1,
722
+ "bbox": [
723
+ 508,
724
+ 676,
725
+ 790,
726
+ 690
727
+ ],
728
+ "page_idx": 4
729
+ },
730
+ {
731
+ "type": "text",
732
+ "text": "The adoption of post-quantum cryptography (PQC) in telecom networks is shifting from isolated pilots to broader deployments as standards solidify and the quantum threat looms. Telecom operators must act now, as waiting until large-scale quantum computers emerge will be too late. Security organizations stress the urgency of conducting cryptographic inventories to identify where public-key cryptography is used—such as SIM authentication, SSL/TLS links, and PKI certificates—prioritizing critical assets to mitigate \"harvest now, decrypt later\" risks. Awareness and education are also crucial for leadership and technical teams.",
733
+ "bbox": [
734
+ 506,
735
+ 696,
736
+ 919,
737
+ 861
738
+ ],
739
+ "page_idx": 4
740
+ },
741
+ {
742
+ "type": "text",
743
+ "text": "A structured PQC implementation roadmap involves phased deployments, starting with hybrid cryptographic modes alongside classical encryption to maintain compatibility. Initial",
744
+ "bbox": [
745
+ 508,
746
+ 862,
747
+ 919,
748
+ 907
749
+ ],
750
+ "page_idx": 4
751
+ },
752
+ {
753
+ "type": "text",
754
+ "text": "transitions should focus on non-customer-facing segments, expanding as standards mature and interoperability improves. Operators must align migration plans with regulatory requirements, ensuring compliance with evolving mandates. From 2024 onward, telecom providers are expected to integrate PQC into technology refresh cycles, with PQC becoming a standard in 5G-Advanced and 6G networks by the late 2020s. The goal is to achieve full quantum resistance in critical infrastructure by the early 2030s.",
755
+ "bbox": [
756
+ 73,
757
+ 61,
758
+ 491,
759
+ 196
760
+ ],
761
+ "page_idx": 5
762
+ },
763
+ {
764
+ "type": "text",
765
+ "text": "Within the next decade, PQC will likely be as integral to telecom security as TLS and IPsec are today. Once NIST and other bodies finalize standards by 2024-2025, adoption will accelerate, giving early adopters a competitive edge with \"quantum-safe\" services. Given the uncertainty of quantum computing advancements, proactive preparation is essential. Encouragingly, PQC adoption does not necessarily require hardware replacements—many transitions can be done via software updates, reducing costs. As vendors integrate PQC into products, expenses are expected to decrease further.",
766
+ "bbox": [
767
+ 73,
768
+ 198,
769
+ 491,
770
+ 348
771
+ ],
772
+ "page_idx": 5
773
+ },
774
+ {
775
+ "type": "text",
776
+ "text": "By 2030, much of global telecom traffic, particularly sensitive communications, will likely be encrypted using post-quantum or hybrid cryptographic schemes. Collaboration among telecom operators, governments, and the security community will be crucial for interoperability and resilience. With proactive planning and cooperative execution, the telecom industry can secure global communications against quantum threats while maintaining security, efficiency, and compliance.",
777
+ "bbox": [
778
+ 73,
779
+ 348,
780
+ 490,
781
+ 470
782
+ ],
783
+ "page_idx": 5
784
+ },
785
+ {
786
+ "type": "text",
787
+ "text": "VI. CONCLUSION",
788
+ "text_level": 1,
789
+ "bbox": [
790
+ 217,
791
+ 474,
792
+ 346,
793
+ 488
794
+ ],
795
+ "page_idx": 5
796
+ },
797
+ {
798
+ "type": "text",
799
+ "text": "The transition to post-quantum cryptography (PQC) is no longer a theoretical consideration but an imminent necessity for securing digital communications against future quantum threats. This study has demonstrated that CRYSTALS-Kyber and CRYSTALS-Dilithium, the NIST-standardized PQC algorithms, not only provide robust quantum resistance but also achieve competitive execution times compared to classical cryptographic schemes. Benchmarking results highlight their computational efficiency, particularly when optimized with AVX2 vectorization.",
800
+ "bbox": [
801
+ 73,
802
+ 492,
803
+ 490,
804
+ 642
805
+ ],
806
+ "page_idx": 5
807
+ },
808
+ {
809
+ "type": "text",
810
+ "text": "However large-scale deployment in telecommunications networks introduces critical challenges, including infrastructure upgrades, interoperability concerns, regulatory compliance, and cost constraints. The successful implementation of PQC in telecom environments requires a structured, phased migration strategy, leveraging hybrid cryptographic approaches to maintain compatibility with legacy systems. Early industry trials demonstrate the viability of PQC adoption while emphasizing the importance of vendor collaboration, cryptographic agility, and thorough performance validation.",
811
+ "bbox": [
812
+ 73,
813
+ 643,
814
+ 491,
815
+ 792
816
+ ],
817
+ "page_idx": 5
818
+ },
819
+ {
820
+ "type": "text",
821
+ "text": "Looking ahead, PQC is expected to become a fundamental component of telecom security, with adoption accelerating as",
822
+ "bbox": [
823
+ 73,
824
+ 794,
825
+ 491,
826
+ 824
827
+ ],
828
+ "page_idx": 5
829
+ },
830
+ {
831
+ "type": "text",
832
+ "text": "standards solidify and regulatory mandates take effect. As quantum computing advances remain unpredictable, proactive preparation is essential to mitigate risks associated with delayed migration. Encouragingly, the ongoing integration of PQC into security protocols for 5G and 6G networks, along with continued industry cooperation, ensures that telecom infrastructure remains resilient against emerging cryptographic threats. With careful planning and strategic execution, the transition to quantum-safe cryptography can safeguard telecom networks, ensuring their security and adaptability in the quantum era.",
833
+ "bbox": [
834
+ 501,
835
+ 61,
836
+ 921,
837
+ 229
838
+ ],
839
+ "page_idx": 5
840
+ },
841
+ {
842
+ "type": "text",
843
+ "text": "REFERENCES",
844
+ "text_level": 1,
845
+ "bbox": [
846
+ 663,
847
+ 241,
848
+ 761,
849
+ 255
850
+ ],
851
+ "page_idx": 5
852
+ },
853
+ {
854
+ "type": "list",
855
+ "sub_type": "ref_text",
856
+ "list_items": [
857
+ "[1] D. Joseph, R. Misoczki, M. Manzano, J. Tricot, F. D. Pinuaga, O. Lacombe, S. Leichenauer, J. Hiday, P. Venables, and R. Hansen, \"Transitioning organizations to post-quantum cryptography,\" Nature, vol. 605, no. 7909, pp. 237–243, 2022.",
858
+ "[2] D. J. Bernstein and T. Lange, \"Post-quantum cryptography,\" Nature, vol. 549, no. 7671, pp. 188-194, 2017.",
859
+ "[3] G. Alagic, G. Alagic, J. Alperin-Sheriff, D. Apon, D. Cooper, Q. Dang, Y.-K. Liu, C. Miller, D. Moody, R. Peralta et al., \"Status report on the first round of the NIST post-quantum cryptography standardization process,\" 2019.",
860
+ "[4] National Institute of Standards and Technology, \"Post-Quantum Cryptography Standardization,\" 2024, accessed: 2024-03-17. [Online]. Available: https://csrc.nist.gov/projects/post-quantum-cryptography/selected-algorithm",
861
+ "[5] GSM Association, \"Post Quantum Cryptography - Guidelines for Telecom Use Cases,\" GSM Association, Technical Report PQ.03, February 2024, accessed: 2024-03-17. [Online]. Available: https://www.gsma.com/newsroom/wp-content/uploads/PQ.03-Post-Quantum-Cryptography-Guidelines-for-Telecom-Use-v1.0.pdf.",
862
+ "[6] PKI Consortium, \"Key takeaways of the PQC conference in Austin,\" January 30, 2025, accessed: 2025-03-17. [Online]. Available: https://pkic.org/2025/01/30/key-takeaways-of-the-pqc-conference-in-austin/.",
863
+ "[7] U. Government, \"Report on post-quantum cryptography,\" Government Report, The White House, Washington, D.C., Tech. Rep. REF PQC-Report FINAL Send, July 2024, presented to the Senate Committee on Homeland Security and Governmental Affairs and the House Committee on Oversight and Accountability. [Online]. Available: https://bidenwhitehouse.archives.gov/wp-content/uploads/2024/07/REF_PQC-Report_FINAL_Send.pdf",
864
+ "[8] J. Taaffe, \"Are telcos ready for a quantum leap?\" June 2023, accessed: March 17, 2025. [Online]. Available: https://inform.tmforum.org/features-and-opinion/are-tercos-making-a-quantum-leap.",
865
+ "[9] SoftBank Corp. and SandboxAQ, \"SoftBank Corp. and SandboxAQ to Jointly Implement Next-Generation Cryptosystem Resilient to Cyber Attacks from Quantum Computers,\" March 2022, press Release, accessed: March 17, 2025. [Online]. Available: https://www.sandboxaq.com/press/softbank-corp-and-sandbox-aq-to-jointly-implement next-generation-cryptosystem-resilient-to-cyber-attacks-from-quantum-computers.",
866
+ "[10] SoftBank Corp., \"SoftBank Corp. and SandboxAQ Jointly Verify Hybrid Mode Quantum-safe Technology,\" February 2023, blog Post, accessed: March 17, 2025. [Online]. Available: https://www.softbank.jp/en/corp/technology/research/story-event/008/.",
867
+ "[11] Thales Group and SK Telecom, \"Thales and SK Telecom: Pioneering Quantum-Resistant Cryptography for 5G Networks,\" 2024, accessed: March 17, 2025. [Online]. Available: https://www.thalesgroup.com/en/markets/digital-identity-and-security/mobile/5G-skt-post-quantum-user-case."
868
+ ],
869
+ "bbox": [
870
+ 508,
871
+ 263,
872
+ 944,
873
+ 823
874
+ ],
875
+ "page_idx": 5
876
+ }
877
+ ]
data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_model.json ADDED
@@ -0,0 +1,1004 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.236,
8
+ 0.058,
9
+ 0.683
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2503.12952v2 [cs.CR] 31 Mar 2025"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.089,
18
+ 0.064,
19
+ 0.912,
20
+ 0.132
21
+ ],
22
+ "angle": 0,
23
+ "content": "Performance Analysis and Industry Deployment of Post-Quantum Cryptography Algorithms"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.173,
29
+ 0.153,
30
+ 0.303,
31
+ 0.168
32
+ ],
33
+ "angle": 0,
34
+ "content": "Elif Dicle Demir"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.11,
40
+ 0.169,
41
+ 0.364,
42
+ 0.184
43
+ ],
44
+ "angle": 0,
45
+ "content": "Electrical and Electronics Eng. Dept."
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.186,
51
+ 0.186,
52
+ 0.289,
53
+ 0.199
54
+ ],
55
+ "angle": 0,
56
+ "content": "Koç University"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.176,
62
+ 0.201,
63
+ 0.293,
64
+ 0.215
65
+ ],
66
+ "angle": 0,
67
+ "content": "Istanbul, Türkiye"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.158,
73
+ 0.217,
74
+ 0.312,
75
+ 0.229
76
+ ],
77
+ "angle": 0,
78
+ "content": "elifdemir21@ku.edu.tr"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.453,
84
+ 0.153,
85
+ 0.545,
86
+ 0.168
87
+ ],
88
+ "angle": 0,
89
+ "content": "Buse Bilgin"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.404,
95
+ 0.17,
96
+ 0.596,
97
+ 0.183
98
+ ],
99
+ "angle": 0,
100
+ "content": "6GEN Lab., Next-Gen R&D"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.393,
106
+ 0.185,
107
+ 0.605,
108
+ 0.199
109
+ ],
110
+ "angle": 0,
111
+ "content": "Network Technologies, Turkcell"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.438,
117
+ 0.201,
118
+ 0.555,
119
+ 0.215
120
+ ],
121
+ "angle": 0,
122
+ "content": "Istanbul, Türkiye"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.403,
128
+ 0.217,
129
+ 0.591,
130
+ 0.23
131
+ ],
132
+ "angle": 0,
133
+ "content": "buse.bilgin@turkcell.com.tr"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.67,
139
+ 0.153,
140
+ 0.853,
141
+ 0.168
142
+ ],
143
+ "angle": 0,
144
+ "content": "Mehmet Cengiz Onbaşi"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.635,
150
+ 0.169,
151
+ 0.887,
152
+ 0.184
153
+ ],
154
+ "angle": 0,
155
+ "content": "Electrical and Electronics Eng. Dept."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.71,
161
+ 0.186,
162
+ 0.813,
163
+ 0.2
164
+ ],
165
+ "angle": 0,
166
+ "content": "Koç University"
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.701,
172
+ 0.201,
173
+ 0.816,
174
+ 0.215
175
+ ],
176
+ "angle": 0,
177
+ "content": "Istanbul, Türkiye"
178
+ },
179
+ {
180
+ "type": "text",
181
+ "bbox": [
182
+ 0.691,
183
+ 0.217,
184
+ 0.827,
185
+ 0.229
186
+ ],
187
+ "angle": 0,
188
+ "content": "monbasli@ku.edu.tr"
189
+ },
190
+ {
191
+ "type": "text",
192
+ "bbox": [
193
+ 0.075,
194
+ 0.275,
195
+ 0.493,
196
+ 0.614
197
+ ],
198
+ "angle": 0,
199
+ "content": "Abstract—As quantum computing advances, modern cryptographic standards face an existential threat, necessitating a transition to post-quantum cryptography (PQC). The National Institute of Standards and Technology (NIST) has selected CRYSTALS-Kyber and CRYSTALS-Dilithium as standardized PQC algorithms for secure key exchange and digital signatures, respectively. This study conducts a comprehensive performance analysis of these algorithms by benchmarking execution times across cryptographic operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, the impact of AVX2 optimizations is evaluated to assess hardware acceleration benefits. Our findings demonstrate that Kyber and Dilithium achieve efficient execution times, outperforming classical cryptographic schemes such as RSA and ECDSA at equivalent security levels. Beyond technical performance, the real-world deployment of PQC introduces challenges in telecommunications networks, where large-scale infrastructure upgrades, interoperability with legacy systems, and regulatory constraints must be addressed. This paper examines the feasibility of PQC adoption in telecom environments, highlighting key transition challenges, security risks, and implementation strategies. Through industry case studies, we illustrate how telecom operators are integrating PQC into 5G authentication, subscriber identity protection, and secure communications. Our analysis provides insights into the computational trade-offs, deployment considerations, and standardization efforts shaping the future of quantum-safe cryptographic infrastructure."
200
+ },
201
+ {
202
+ "type": "text",
203
+ "bbox": [
204
+ 0.075,
205
+ 0.615,
206
+ 0.492,
207
+ 0.666
208
+ ],
209
+ "angle": 0,
210
+ "content": "Index Terms—Post-Quantum Cryptography, CRYSTALS-Kyber, CRYSTALS-Dilithium, NIST Standardization, Telecommunications Security, Cryptographic Deployment, Quantum-Safe Networks."
211
+ },
212
+ {
213
+ "type": "title",
214
+ "bbox": [
215
+ 0.217,
216
+ 0.678,
217
+ 0.35,
218
+ 0.692
219
+ ],
220
+ "angle": 0,
221
+ "content": "I. INTRODUCTION"
222
+ },
223
+ {
224
+ "type": "text",
225
+ "bbox": [
226
+ 0.074,
227
+ 0.697,
228
+ 0.493,
229
+ 0.909
230
+ ],
231
+ "angle": 0,
232
+ "content": "Modern cryptographic systems rely on the computational intractability of certain mathematical problems, such as integer factorization and discrete logarithms, to ensure the security of digital communication and data protection [1]. The advent of quantum computing poses a fundamental threat to modern cryptographic systems, as algorithms such as Shor's and Grover's exploit quantum parallelism to break widely used cryptographic primitives. Shor's algorithm efficiently factors large integers and solves the discrete logarithm problem, undermining the security of RSA and Elliptic Curve Cryptography(ECC), while Grover's algorithm accelerates brute-force attacks, significantly reducing the effective security of symmetric encryption schemes [2]. As research continues to refine quantum hardware, the urgency to transition towards"
233
+ },
234
+ {
235
+ "type": "text",
236
+ "bbox": [
237
+ 0.504,
238
+ 0.274,
239
+ 0.922,
240
+ 0.304
241
+ ],
242
+ "angle": 0,
243
+ "content": "quantum-resistant cryptographic solutions has become a pressing concern."
244
+ },
245
+ {
246
+ "type": "text",
247
+ "bbox": [
248
+ 0.503,
249
+ 0.304,
250
+ 0.923,
251
+ 0.59
252
+ ],
253
+ "angle": 0,
254
+ "content": "To address these emerging threats, the National Institute of Standards and Technology (NIST) initiated the Post-Quantum Cryptography (PQC) Standardization process to develop cryptographic algorithms resilient to quantum threats. The evaluation criteria for candidate algorithms include security against both classical and quantum attacks, cost and performance efficiency, and implementation characteristics such as flexibility and resistance to side-channel attacks [3]. As a result of the NIST standardization process, CRYSTALS-Kyber and HQC were selected as key encapsulation mechanisms (KEMs), while CRYSTALS-Dilithium, Falcon, and SPHINCS+ were chosen as digital signature schemes due to their strong security foundations, computational efficiency, and real-world applicability. Kyber is a lattice-based KEM, while HQC is a code-based KEM, both ensuring secure key exchange over insecure communication channels. Similarly, Dilithium and Falcon are lattice-based digital signature schemes designed for message authenticity and integrity, whereas SPHINCS+ is a hash-based scheme. [4]"
255
+ },
256
+ {
257
+ "type": "text",
258
+ "bbox": [
259
+ 0.503,
260
+ 0.592,
261
+ 0.923,
262
+ 0.773
263
+ ],
264
+ "angle": 0,
265
+ "content": "This study focuses on the performance evaluation of postquantum cryptographic algorithms, specifically Kyber and Dilithium, by benchmarking their execution times across key cryptographic operations. Given the critical role of computational efficiency in the real-world adoption of PQC, our analysis provides insights into their feasibility for practical deployment. Additionally, as the transition to quantum-safe cryptography involves not only technical performance but also industry-wide adoption challenges, we extend our study to include an industry perspective, assessing the implications of PQC deployment in telecommunications and broader enterprise environments."
266
+ },
267
+ {
268
+ "type": "title",
269
+ "bbox": [
270
+ 0.51,
271
+ 0.782,
272
+ 0.916,
273
+ 0.812
274
+ ],
275
+ "angle": 0,
276
+ "content": "II. TESTING METHODOLOGY AND ENVIRONMENT SETUP FOR PERFORMANCE ANALYSIS"
277
+ },
278
+ {
279
+ "type": "text",
280
+ "bbox": [
281
+ 0.503,
282
+ 0.817,
283
+ 0.922,
284
+ 0.909
285
+ ],
286
+ "angle": 0,
287
+ "content": "To understand their computational feasibility, we conducted a detailed performance analysis of Kyber and Dilithium under controlled benchmarking conditions. The performance of cryptographic algorithms is a critical factor in their real-world adoption, particularly in PQC, where computational efficiency directly impacts practical deployment in constrained"
288
+ }
289
+ ],
290
+ [
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.074,
295
+ 0.063,
296
+ 0.493,
297
+ 0.244
298
+ ],
299
+ "angle": 0,
300
+ "content": "environments. This section presents a benchmarking study of Kyber and Dilithium, evaluating their execution time across key operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, optimizations leveraging AVX2 vector instructions are examined to assess the impact of hardware acceleration on performance. Furthermore, we compare these PQC algorithms with widely used classical cryptographic schemes—Elliptic Curve Diffie-Hellman (ECDH), Elliptic Curve Digital Signature Algorithm (ECDSA), and RSA—to analyze the trade-offs in execution time and efficiency when transitioning to quantum-resistant cryptography."
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.074,
306
+ 0.244,
307
+ 0.493,
308
+ 0.395
309
+ ],
310
+ "angle": 0,
311
+ "content": "Each cryptographic operation was executed 1,000 times to ensure consistency, with median and average execution times recorded. The benchmarking methodology follows standard cryptographic evaluation practices, converting measured CPU cycles to execution time using a fixed \\(3.3\\mathrm{GHz}\\) clock. We evaluated both reference and AVX2-optimized implementations of Kyber and Dilithium to assess the performance gains from vectorized instructions. Additionally, to compare PQC with classical cryptography, we tested ECDH, ECDSA, and RSA under the same conditions using OpenSSL libraries."
312
+ },
313
+ {
314
+ "type": "title",
315
+ "bbox": [
316
+ 0.108,
317
+ 0.405,
318
+ 0.46,
319
+ 0.434
320
+ ],
321
+ "angle": 0,
322
+ "content": "III. PERFORMANCE EVALUATION OF KYBER AND DILITHIUM"
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.074,
328
+ 0.44,
329
+ 0.495,
330
+ 0.787
331
+ ],
332
+ "angle": 0,
333
+ "content": "Table I presents performance metrics for Kyber, a key encapsulation mechanism (KEM). It includes the secret key (sk), public key (pk), and ciphertext (ct) sizes for different security levels, reflecting storage and transmission overhead. The listed cryptographic operations are key generation (gen), responsible for producing the key pair; encapsulation (enc), encrypting a shared secret using the recipient's public key; and decapsulation (dec), recovering the shared secret with the private key. Table II provides results for Dilithium, a digital signature scheme. It reports public key (pk) and signature (sig) sizes, which indicate storage costs for authentication. The benchmarked operations include key generation (gen), used to create the signing key pair; signing (sign), which generates digital signatures for message integrity; and verification (verify), ensuring the validity of signatures. The AVX2 speedup rate in Tables I and II represents the performance improvement of the AVX2-optimized implementation compared to the reference implementation. It is calculated as the ratio of execution times, indicating how many times faster the AVX2 implementation performs a given cryptographic operation. A higher speedup value signifies greater efficiency gains achieved through vectorized polynomial arithmetic in AVX2-enabled processors."
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.075,
339
+ 0.787,
340
+ 0.493,
341
+ 0.909
342
+ ],
343
+ "angle": 0,
344
+ "content": "As indicated in Table I, the execution times of Kyber increase with higher security levels across all three operations: key generation, encapsulation, and decapsulation. Notably, Kyber-512 completes execution in \\(0.127\\mathrm{ms}\\), whereas Kyber-1024 requires \\(0.294\\mathrm{ms}\\), demonstrating the expected computational cost of increased cryptographic strength. However, the scaling is nonlinear, as the increase from Kyber-768 to Kyber-1024 is smaller than from Kyber-512 to Kyber-768."
345
+ },
346
+ {
347
+ "type": "text",
348
+ "bbox": [
349
+ 0.503,
350
+ 0.063,
351
+ 0.923,
352
+ 0.169
353
+ ],
354
+ "angle": 0,
355
+ "content": "The AVX2 optimization significantly reduces execution time, yielding an average speedup of \\(5.98 \\times\\) across different security levels. The most substantial gains occur in decapsulation, which is reduced by up to \\(6.65 \\times\\) due to the vectorized polynomial arithmetic enabled by AVX2 instructions. This demonstrates that Kyber benefits greatly from parallelization, making it well-suited for optimized hardware implementations."
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.504,
361
+ 0.17,
362
+ 0.923,
363
+ 0.306
364
+ ],
365
+ "angle": 0,
366
+ "content": "Similarly, as shown in Table II, the execution time of Dilithium scales with security levels, with Dilithium-2 executing in 0.643 ms while Dilithium-5 requires 1.36 ms. Unlike Kyber, where operations are relatively balanced, Dilithium's signing step dominates execution time—accounting for over \\(60\\%\\) of the total runtime in all security levels. This is due to the structured lattice sampling required for signature generation, which is inherently more computationally expensive than verification."
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.503,
372
+ 0.308,
373
+ 0.923,
374
+ 0.445
375
+ ],
376
+ "angle": 0,
377
+ "content": "The AVX2 speedup for Dilithium is lower than for Kyber \\((4.8\\times\\) on average), but still significant, particularly in the signing operation, which achieves up to a \\(5.83\\times\\) reduction in execution time. The verification step sees the smallest speedup \\((3.76\\times)\\), reflecting its already efficient nature. The results emphasize that while Dilithium is computationally heavier than Kyber, its AVX2-optimized variant brings notable efficiency improvements, making it feasible for real-world applications."
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.503,
383
+ 0.447,
384
+ 0.924,
385
+ 0.568
386
+ ],
387
+ "angle": 0,
388
+ "content": "Overall, the results in Tables I and II underscore the computational viability of Kyber and Dilithium, demonstrating that hardware optimizations (e.g., AVX2) significantly enhance performance. These findings highlight the practicality of post-quantum cryptography (PQC) deployment, as even without specialized hardware accelerators, Kyber and Dilithium achieve efficient execution times while maintaining high security."
389
+ },
390
+ {
391
+ "type": "table_caption",
392
+ "bbox": [
393
+ 0.508,
394
+ 0.591,
395
+ 0.921,
396
+ 0.626
397
+ ],
398
+ "angle": 0,
399
+ "content": "TABLEI KEY AND CIPHERTEXT SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF KYBER."
400
+ },
401
+ {
402
+ "type": "table",
403
+ "bbox": [
404
+ 0.507,
405
+ 0.635,
406
+ 0.924,
407
+ 0.888
408
+ ],
409
+ "angle": 0,
410
+ "content": "<table><tr><td colspan=\"4\">KYBER 512</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 1632</td><td>gen: 0.035</td><td>gen: 0.007</td><td>5.00</td></tr><tr><td>pk: 800</td><td>enc: 0.040</td><td>enc: 0.007</td><td>5.71</td></tr><tr><td>ct: 768</td><td>dec: 0.052</td><td>dec: 0.008</td><td>6.50</td></tr><tr><td>Total</td><td>0.127</td><td>0.022</td><td>5.77</td></tr><tr><td colspan=\"4\">KYBER 768</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 2400</td><td>gen: 0.058</td><td>gen: 0.011</td><td>5.27</td></tr><tr><td>pk: 1184</td><td>enc: 0.063</td><td>enc: 0.011</td><td>5.73</td></tr><tr><td>ct: 1088</td><td>dec: 0.080</td><td>dec: 0.012</td><td>6.67</td></tr><tr><td>Total</td><td>0.201</td><td>0.034</td><td>5.91</td></tr><tr><td colspan=\"4\">KYBER 1024</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 3168</td><td>gen: 0.089</td><td>gen: 0.015</td><td>5.93</td></tr><tr><td>pk: 1568</td><td>enc: 0.092</td><td>enc: 0.015</td><td>6.13</td></tr><tr><td>ct: 1568</td><td>dec: 0.113</td><td>dec: 0.017</td><td>6.65</td></tr><tr><td>Total</td><td>0.294</td><td>0.047</td><td>6.26</td></tr></table>"
411
+ }
412
+ ],
413
+ [
414
+ {
415
+ "type": "table_caption",
416
+ "bbox": [
417
+ 0.107,
418
+ 0.065,
419
+ 0.461,
420
+ 0.099
421
+ ],
422
+ "angle": 0,
423
+ "content": "TABLE II PUBLIC KEY AND SIGNATURE SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF DILITHIUM."
424
+ },
425
+ {
426
+ "type": "table",
427
+ "bbox": [
428
+ 0.078,
429
+ 0.11,
430
+ 0.499,
431
+ 0.364
432
+ ],
433
+ "angle": 0,
434
+ "content": "<table><tr><td colspan=\"4\">DILITHIUM 2</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 1312</td><td>gen: 0.094</td><td>gen: 0.026</td><td>3.62</td></tr><tr><td>sig: 2420</td><td>sign: 0.445</td><td>sign: 0.077</td><td>5.78</td></tr><tr><td></td><td>verify: 0.104</td><td>verify: 0.028</td><td>3.71</td></tr><tr><td>Total</td><td>0.643</td><td>0.131</td><td>4.91</td></tr><tr><td colspan=\"4\">DILITHIUM 3</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 1952</td><td>gen: 0.167</td><td>gen: 0.045</td><td>3.71</td></tr><tr><td>sig: 3293</td><td>sign: 0.665</td><td>sign: 0.120</td><td>5.54</td></tr><tr><td></td><td>verify: 0.160</td><td>verify: 0.045</td><td>3.56</td></tr><tr><td>Total</td><td>0.992</td><td>0.210</td><td>4.73</td></tr><tr><td colspan=\"4\">DILITHIUM 5</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 2592</td><td>gen: 0.253</td><td>gen: 0.070</td><td>3.61</td></tr><tr><td>sig: 4595</td><td>sign: 0.840</td><td>sign: 0.144</td><td>5.83</td></tr><tr><td></td><td>verify: 0.267</td><td>verify: 0.071</td><td>3.76</td></tr><tr><td>Total</td><td>1.360</td><td>0.285</td><td>4.77</td></tr></table>"
435
+ },
436
+ {
437
+ "type": "title",
438
+ "bbox": [
439
+ 0.102,
440
+ 0.394,
441
+ 0.465,
442
+ 0.424
443
+ ],
444
+ "angle": 0,
445
+ "content": "IV. PERFORMANCE COMPARISON: POST-QUANTUM CRYPTOGRAPHY VS. CLASSICAL CRYPTOGRAPHY"
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.074,
451
+ 0.438,
452
+ 0.492,
453
+ 0.635
454
+ ],
455
+ "angle": 0,
456
+ "content": "Table III presents a comparative analysis of execution times for post-quantum and classical cryptographic algorithms, evaluated under controlled conditions. Both PQC and classical schemes were tested at different security levels, measured in bits, to assess performance variations. The table includes cryptographic algorithms across multiple security configurations, ensuring a direct comparison of execution times. The evaluation focuses on total execution time, measured in milliseconds, to quantify computational cost across different cryptographic operations. While this analysis highlights execution speed, real-world deployment must also consider additional factors such as memory footprint, communication overhead, and hardware compatibility."
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.074,
462
+ 0.636,
463
+ 0.493,
464
+ 0.909
465
+ ],
466
+ "angle": 0,
467
+ "content": "Kyber exhibits notable performance advantages over both RSA and ECDH, which are commonly employed for key exchange. Kyber-512, offering 128-bit security, achieves execution times that are approximately three times faster than both RSA-2048 and ECDH(P-256), despite these classical schemes providing lower security guarantees. Even Kyber-1024, the most computationally expensive variant, maintains an execution time that is roughly three times faster than RSA-3072, which offers only 128-bit security. At equivalent security levels, Kyber consistently achieves faster execution times than ECDH(P-256, P-384, P-521) while also providing quantum resistance. These efficiency gains are attributed to Kyber's lattice-based cryptographic foundation, which relies on small polynomials and number-theoretic transforms (NTT) rather than large-number modular exponentiation. This mathematical structure enables faster key generation and encapsulation while maintaining strong security guarantees, particularly against quantum adversaries."
468
+ },
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.503,
473
+ 0.063,
474
+ 0.923,
475
+ 0.319
476
+ ],
477
+ "angle": 0,
478
+ "content": "Dilithium demonstrates significant computational advantages over ECDSA, a widely used classical digital signature scheme. At the 128-bit security level, Dilithium-2 executes signature operations approximately \\(20\\%\\) faster than ECDSA(P-256), with the performance gap increasing at higher security levels. Dilithium-5, the highest-security variant, achieves nearly twice the execution speed of ECDSA(P-512) at the 256-bit security level. A distinct characteristic of Dilithium is that signature generation dominates execution time, accounting for over \\(60\\%\\) of the total runtime, whereas ECDSA exhibits a more balanced distribution between signing and verification. This difference arises from Dilithium's structured lattice sampling, which, while computationally intensive, remains more efficient than ECDSA's elliptic curve discrete logarithm operations. Additionally, Dilithium's deterministic signature generation eliminates nonce-related vulnerabilities, a known weakness in ECDSA implementations."
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.503,
484
+ 0.32,
485
+ 0.923,
486
+ 0.56
487
+ ],
488
+ "angle": 0,
489
+ "content": "The results indicate that post-quantum cryptographic algorithms do not inherently introduce higher computational costs. On the contrary, Kyber and Dilithium frequently outperform classical cryptographic schemes at equivalent security levels. Kyber consistently demonstrates superior efficiency in key exchange operations compared to RSA and ECDH, even at its highest security configuration. Similarly, Dilithium provides a computationally efficient alternative to ECDSA, particularly as security levels increase. While Dilithium's signing operation remains computationally heavier than verification, it still surpasses ECDSA in signature generation across all tested configurations. These findings highlight the feasibility of transitioning to quantum-resistant cryptographic standards in practical applications, demonstrating that enhanced security can be achieved without compromising computational efficiency."
490
+ },
491
+ {
492
+ "type": "text",
493
+ "bbox": [
494
+ 0.504,
495
+ 0.561,
496
+ 0.923,
497
+ 0.789
498
+ ],
499
+ "angle": 0,
500
+ "content": "These performance findings highlight the computational feasibility of Kyber and Dilithium as post-quantum cryptographic solutions, demonstrating that quantum resistance does not necessarily come at the cost of execution efficiency. However, execution time is only one aspect of cryptographic feasibility. While our controlled benchmarking showed that Kyber and Dilithium outperform classical schemes in speed, these results were obtained under optimized and isolated conditions. Real-world deployment involves additional complexities, such as infrastructure constraints, interoperability with existing systems, and operational overhead, which can impact practical performance. Thus, while PQC shows strong computational efficiency, its large-scale adoption in telecom networks requires a broader evaluation, considering scalability, integration challenges, and regulatory compliance."
501
+ },
502
+ {
503
+ "type": "title",
504
+ "bbox": [
505
+ 0.556,
506
+ 0.798,
507
+ 0.871,
508
+ 0.842
509
+ ],
510
+ "angle": 0,
511
+ "content": "V. POST-QUANTUM CRYPTOGRAPHY IN TELECOMMUNICATIONS: CHALLENGES, IMPLEMENTATIONS, AND FUTURE OUTLOOK"
512
+ },
513
+ {
514
+ "type": "text",
515
+ "bbox": [
516
+ 0.504,
517
+ 0.847,
518
+ 0.922,
519
+ 0.908
520
+ ],
521
+ "angle": 0,
522
+ "content": "Implementing PQC in telecommunications networks presents significant challenges. Telecom operators must upgrade complex, large-scale infrastructures that currently rely on classical encryption, all while maintaining service"
523
+ }
524
+ ],
525
+ [
526
+ {
527
+ "type": "table_caption",
528
+ "bbox": [
529
+ 0.093,
530
+ 0.065,
531
+ 0.476,
532
+ 0.098
533
+ ],
534
+ "angle": 0,
535
+ "content": "TABLE III EXECUTION TIME COMPARISON OF POST-QUANTUM AND CLASSICAL CRYPTOGRAPHIC ALGORITHMS."
536
+ },
537
+ {
538
+ "type": "table",
539
+ "bbox": [
540
+ 0.126,
541
+ 0.108,
542
+ 0.442,
543
+ 0.288
544
+ ],
545
+ "angle": 0,
546
+ "content": "<table><tr><td>Algorithm</td><td>Security Level</td><td>Total Time (ms)</td></tr><tr><td>Kyber-512</td><td>128-bit</td><td>0.127</td></tr><tr><td>Kyber-768</td><td>192-bit</td><td>0.201</td></tr><tr><td>Kyber-1024</td><td>256-bit</td><td>0.294</td></tr><tr><td>Dilithium-2</td><td>128-bit</td><td>0.643</td></tr><tr><td>Dilithium-3</td><td>192-bit</td><td>0.992</td></tr><tr><td>Dilithium-5</td><td>256-bit</td><td>1.360</td></tr><tr><td>ECDSA(P-256)</td><td>128-bit</td><td>0.801</td></tr><tr><td>ECDSA(P-384)</td><td>192-bit</td><td>1.702</td></tr><tr><td>ECDSA(P-512)</td><td>256-bit</td><td>2.398</td></tr><tr><td>RSA-2048</td><td>112-bit</td><td>0.324</td></tr><tr><td>RSA-3072</td><td>128-bit</td><td>0.884</td></tr><tr><td>ECDH(P-256)</td><td>128-bit</td><td>0.102</td></tr><tr><td>ECDH(P-384)</td><td>192-bit</td><td>0.299</td></tr><tr><td>ECDH(P-521)</td><td>256-bit</td><td>0.903</td></tr></table>"
547
+ },
548
+ {
549
+ "type": "text",
550
+ "bbox": [
551
+ 0.078,
552
+ 0.317,
553
+ 0.49,
554
+ 0.375
555
+ ],
556
+ "angle": 0,
557
+ "content": "continuity. Key challenges include performance and latency impacts, compatibility with legacy systems, lack of finalized standards, resource and cost constraints, transitional security risks, and vendor readiness issues."
558
+ },
559
+ {
560
+ "type": "title",
561
+ "bbox": [
562
+ 0.078,
563
+ 0.39,
564
+ 0.175,
565
+ 0.403
566
+ ],
567
+ "angle": 0,
568
+ "content": "A. Challenges"
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.078,
574
+ 0.41,
575
+ 0.49,
576
+ 0.68
577
+ ],
578
+ "angle": 0,
579
+ "content": "1) Performance Impact on Existing Infrastructure: PQC algorithms require more computational resources and larger key sizes than classical cryptography. Many schemes are at least an order of magnitude slower or produce larger keys and ciphertexts than RSA or ECC, straining network devices [5]. The increased size of PQC keys, signatures, and ciphertexts taxes bandwidth and memory-constrained hardware; for instance, an additional 1 KB in a TLS handshake can increase response time by \\(1.5\\%\\) [6]. Latency-sensitive telecom applications, such as voice and video, may experience performance degradation due to longer cryptographic operations or larger handshake messages. Operators need to evaluate whether servers, routers, and HSMs can support the increased computational load of PQC, as many may require hardware upgrades specifically for PQC adoption. Especially in radio access networks (RANs) and customer devices with limited processing power, PQC's computational overhead and memory footprint pose a significant deployment challenge."
580
+ },
581
+ {
582
+ "type": "text",
583
+ "bbox": [
584
+ 0.078,
585
+ 0.682,
586
+ 0.49,
587
+ 0.907
588
+ ],
589
+ "angle": 0,
590
+ "content": "2) Interoperability with Legacy Systems: During the transition, not all network elements and partner systems will upgrade to PQC at the same time, raising interoperability issues. If one system uses a PQC-based protocol but the communicating peer does not, secure connections cannot be established [7]. Many telecom protocols use a \"fail secure\" approach, meaning a PQC-enabled node could be cut off from legacy nodes that don't recognize the new algorithms. Due to the interconnected nature of telecom networks, a single non-upgraded component can block migration, creating deployment bottlenecks. A possible solution is hybrid cryptographic modes (combining classical and PQC algorithms), but this adds complexity and requires new protocol standards and careful validation, potentially slowing down the transition. To prevent network partitioning, telecom operators must ensure"
591
+ },
592
+ {
593
+ "type": "text",
594
+ "bbox": [
595
+ 0.509,
596
+ 0.065,
597
+ 0.919,
598
+ 0.092
599
+ ],
600
+ "angle": 0,
601
+ "content": "PQC upgrades happen in sync across critical systems or remain backward-compatible."
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.508,
607
+ 0.093,
608
+ 0.919,
609
+ 0.394
610
+ ],
611
+ "angle": 0,
612
+ "content": "3) Standardization and Regulatory Concerns: The telecom industry is highly standardized and regulated, so PQC adoption hinges on mature standards and regulatory guidance. As of 2024, standards bodies like NIST are just publishing the first official PQC algorithm standards [8]. Until international standards (e.g., 3GPP, IETF, ETSI) incorporate PQC, telcos risk adopting interim solutions that might not be interoperable or compliant long-term. There is also regulatory pressure: governments and industry bodies are already setting timelines and mandates for quantum-safe transitions. For example, the U.S. National Security Agency's CNSA 2.0 mandates specific PQC algorithms, aiming all national security systems to be quantum-resistant by 2035. However, inconsistent national strategies pose challenges for global carriers, as many countries have only issued high-level guidance to \"start planning\" with few concrete standards yet. The absence of finalized telecom-specific PQC standards adds uncertainty, requiring operators to closely coordinate with standards organizations to ensure protocols like 5G authentication, IPsec, and TLS integrate PQC effectively."
613
+ },
614
+ {
615
+ "type": "text",
616
+ "bbox": [
617
+ 0.508,
618
+ 0.395,
619
+ 0.919,
620
+ 0.651
621
+ ],
622
+ "angle": 0,
623
+ "content": "4) Cost and Resource Allocation: Upgrading a telecom operator's cryptographic infrastructure to PQC is costly and resource-intensive. Many legacy systems lack the processing power, memory, or bandwidth to support PQC, requiring replacement or retrofitting of equipment such as mobile devices, SIM cards, routers, and base stations. This represents a significant capital expense, with costs extending to PQC-capable HSMs, accelerator cards, software updates, staff training, testing, and parallel system operation during the transition. Smaller operators worry that only large carriers can afford early adoption, but as vendors integrate PQC into products, upgrade costs are expected to decrease. Nonetheless, operators need to allocate substantial resources for cryptographic inventory, upgrade planning, and continuous maintenance to ensure a smooth migration. The cost of inaction could be higher—a quantum-broken network may result in regulatory penalties and customer loss, making early investment crucial."
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.508,
629
+ 0.652,
630
+ 0.919,
631
+ 0.907
632
+ ],
633
+ "angle": 0,
634
+ "content": "5) Security Risks and Transition Challenges: Transitioning to PQC raises security concerns, as these new algorithms have not been tested in real-world deployments for decades like RSA/ECC. There are risks of undiscovered weaknesses or implementation flaws, and some PQC candidates have already been found vulnerable to cryptanalysis and side-channel attacks during standardization. Ensuring side-channel resistance is critical—cryptographic operations must not leak secrets through timing, power, or memory access patterns. Additionally, PQC introduces complex key management and new failure modes; for example, some digital signature schemes require tracking one-time keys, complicating network authentication. Early deployments have exposed issues, such as network middleware and firewalls failing due to large key exchange messages. Misconfigurations, like hybrid mode errors or certificate management lapses, could introduce vulnerabilities. To mitigate these risks, telecom operators must conduct"
635
+ }
636
+ ],
637
+ [
638
+ {
639
+ "type": "text",
640
+ "bbox": [
641
+ 0.078,
642
+ 0.064,
643
+ 0.49,
644
+ 0.093
645
+ ],
646
+ "angle": 0,
647
+ "content": "extensive testing, use proven implementations, and ensure crypto-agility, allowing algorithm updates when needed."
648
+ },
649
+ {
650
+ "type": "text",
651
+ "bbox": [
652
+ 0.078,
653
+ 0.093,
654
+ 0.49,
655
+ 0.35
656
+ ],
657
+ "angle": 0,
658
+ "content": "6) Vendor Readiness and Supply Chain Considerations: Telecommunications relies on a vast network of vendors for hardware, software, and infrastructure, making PQC adoption a supply chain challenge. Many vendors await finalized standards before integrating PQC, and without support in critical components like SIM cards and routers, full migration is impossible. To address this, telecom operators are updating procurement policies, requiring vendors to support NIST-approved PQC algorithms and crypto-agility. Regulatory bodies may also mandate certification, potentially delaying availability. While some vendors are developing PQC-capable products, widespread readiness will take time. Effective supply chain management and early engagement with suppliers are essential to ensure smooth deployment, coordinated upgrades, and interoperability. Ultimately, achieving a quantum-safe telecom network requires industry-wide collaboration and careful planning."
659
+ },
660
+ {
661
+ "type": "title",
662
+ "bbox": [
663
+ 0.079,
664
+ 0.36,
665
+ 0.443,
666
+ 0.375
667
+ ],
668
+ "angle": 0,
669
+ "content": "B. Successful Implementations and Initiatives of PQC"
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.078,
675
+ 0.379,
676
+ 0.49,
677
+ 0.498
678
+ ],
679
+ "angle": 0,
680
+ "content": "Despite the challenges, there have been several successful implementations and trials of post-quantum cryptography in telecom contexts. Forward-thinking carriers and technology partners around the world have started to integrate PQC into test networks, demonstrating feasibility and gleaning best practices. Below are a few notable examples and case studies highlighting how PQC deployment is being approached in telecommunications:"
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.078,
686
+ 0.5,
687
+ 0.49,
688
+ 0.831
689
+ ],
690
+ "angle": 0,
691
+ "content": "1) SoftBank (Japan) – Hybrid PQC Network Trial: SoftBank Corp., a major mobile operator in Japan, partnered with SandboxAQ to test PQC algorithms in a live network environment. In 2023 they conducted a hybrid encryption trial, combining classical elliptic-curve cryptography with lattice-based post-quantum algorithms on live network traffic [9]. The results were encouraging: the hybrid quantum-safe approach was verified to work on existing 4G/5G infrastructure with minimal performance impact. SoftBank reported that lattice-based PQC algorithms (such as those later standardized by NIST) outperformed other quantum-safe alternatives in their tests, providing strong security with only marginal added latency [10]. By adopting a hybrid approach, SoftBank ensured interoperability with existing systems while enhancing security. Their phased deployment, from lab tests to real-world networks, demonstrated that careful algorithm selection and optimization can mitigate future quantum threats without major performance trade-offs. Collaboration with SandboxAQ helped streamline cryptographic inventory and regulatory compliance. SoftBank continues investing in PQC, positioning early adoption as a competitive advantage in secure telecom infrastructure."
692
+ },
693
+ {
694
+ "type": "text",
695
+ "bbox": [
696
+ 0.078,
697
+ 0.833,
698
+ 0.49,
699
+ 0.908
700
+ ],
701
+ "angle": 0,
702
+ "content": "2) SK Telecom (South Korea) - PQC in 5G Standalone Network: Another pioneering effort was led by SK Telecom (SKT) in South Korea, in collaboration with Thales. SKT and Thales carried out a groundbreaking test of postquantum cryptography in a real 5G standalone network environ-"
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.508,
708
+ 0.063,
709
+ 0.92,
710
+ 0.213
711
+ ],
712
+ "angle": 0,
713
+ "content": "ronment [11]. In this pilot, SKT deployed quantum-resistant encryption to secure subscriber identities and network traffic. They tested 5G USIM cards implementing the CRYSTALS-Kyber key encapsulation algorithm, ensuring authentication remains secure against quantum threats. The trial demonstrated seamless interoperability between PQC-protected SIMs and the core network, with encrypted calls proving quantum-safe communication. This deployment, one of the first PQC integrations in 5G, underscores the role of carrier-vendor partnerships and informs ongoing standards development."
714
+ },
715
+ {
716
+ "type": "text",
717
+ "bbox": [
718
+ 0.508,
719
+ 0.214,
720
+ 0.92,
721
+ 0.425
722
+ ],
723
+ "angle": 0,
724
+ "content": "3) North American Carriers and Initiatives: U.S. and Canadian telecom operators are preparing for PQC, driven by government directives. AT&T plans to be \"quantum ready\" by 2025, with internal pilots testing PQC in VPNs and TLS. While large-scale deployments are pending, mandates for critical infrastructure are pushing adoption. In Canada and Europe, providers like Verizon, Rogers, Deutsche Telecom, and BT are engaged in research and industry collaborations, focusing on PQC for routing, customer data protection, and inter-carrier security. These efforts emphasize crypto-agility—ensuring networks can transition flexibly as standards evolve. Industry groups, including the GSMA Post-Quantum Telco Network Taskforce and 5G Americas, are developing best practices to guide telecom operators through PQC adoption."
725
+ },
726
+ {
727
+ "type": "text",
728
+ "bbox": [
729
+ 0.508,
730
+ 0.427,
731
+ 0.92,
732
+ 0.667
733
+ ],
734
+ "angle": 0,
735
+ "content": "Across successful implementations, key best practices have emerged. Conducting a cryptographic inventory helps identify necessary upgrades, while pilot deployments in less constrained environments allow for manageable PQC integration. A hybrid approach, running PQC alongside classical encryption, has been widely adopted to maintain continuity. Vendor collaboration with SIM card providers, router manufacturers, and software vendors is crucial for early integration. These trials also confirm that early PQC deployment safeguards critical data from future quantum threats. While performance impacts are generally manageable with optimized algorithms, some operators have even improved network efficiency by modernizing legacy systems. Overall, these case studies demonstrate that with careful planning and phased execution, telcos can begin inserting quantum-resistant cryptography into their networks today, gaining experience and confidence for broader rollouts."
736
+ },
737
+ {
738
+ "type": "title",
739
+ "bbox": [
740
+ 0.509,
741
+ 0.677,
742
+ 0.791,
743
+ 0.691
744
+ ],
745
+ "angle": 0,
746
+ "content": "C. Future Outlook and Recommendations"
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.508,
752
+ 0.697,
753
+ 0.92,
754
+ 0.862
755
+ ],
756
+ "angle": 0,
757
+ "content": "The adoption of post-quantum cryptography (PQC) in telecom networks is shifting from isolated pilots to broader deployments as standards solidify and the quantum threat looms. Telecom operators must act now, as waiting until large-scale quantum computers emerge will be too late. Security organizations stress the urgency of conducting cryptographic inventories to identify where public-key cryptography is used—such as SIM authentication, SSL/TLS links, and PKI certificates—prioritizing critical assets to mitigate \"harvest now, decrypt later\" risks. Awareness and education are also crucial for leadership and technical teams."
758
+ },
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.509,
763
+ 0.863,
764
+ 0.92,
765
+ 0.908
766
+ ],
767
+ "angle": 0,
768
+ "content": "A structured PQC implementation roadmap involves phased deployments, starting with hybrid cryptographic modes alongside classical encryption to maintain compatibility. Initial"
769
+ }
770
+ ],
771
+ [
772
+ {
773
+ "type": "text",
774
+ "bbox": [
775
+ 0.074,
776
+ 0.063,
777
+ 0.492,
778
+ 0.198
779
+ ],
780
+ "angle": 0,
781
+ "content": "transitions should focus on non-customer-facing segments, expanding as standards mature and interoperability improves. Operators must align migration plans with regulatory requirements, ensuring compliance with evolving mandates. From 2024 onward, telecom providers are expected to integrate PQC into technology refresh cycles, with PQC becoming a standard in 5G-Advanced and 6G networks by the late 2020s. The goal is to achieve full quantum resistance in critical infrastructure by the early 2030s."
782
+ },
783
+ {
784
+ "type": "text",
785
+ "bbox": [
786
+ 0.074,
787
+ 0.199,
788
+ 0.492,
789
+ 0.349
790
+ ],
791
+ "angle": 0,
792
+ "content": "Within the next decade, PQC will likely be as integral to telecom security as TLS and IPsec are today. Once NIST and other bodies finalize standards by 2024-2025, adoption will accelerate, giving early adopters a competitive edge with \"quantum-safe\" services. Given the uncertainty of quantum computing advancements, proactive preparation is essential. Encouragingly, PQC adoption does not necessarily require hardware replacements—many transitions can be done via software updates, reducing costs. As vendors integrate PQC into products, expenses are expected to decrease further."
793
+ },
794
+ {
795
+ "type": "text",
796
+ "bbox": [
797
+ 0.074,
798
+ 0.349,
799
+ 0.491,
800
+ 0.471
801
+ ],
802
+ "angle": 0,
803
+ "content": "By 2030, much of global telecom traffic, particularly sensitive communications, will likely be encrypted using post-quantum or hybrid cryptographic schemes. Collaboration among telecom operators, governments, and the security community will be crucial for interoperability and resilience. With proactive planning and cooperative execution, the telecom industry can secure global communications against quantum threats while maintaining security, efficiency, and compliance."
804
+ },
805
+ {
806
+ "type": "title",
807
+ "bbox": [
808
+ 0.218,
809
+ 0.476,
810
+ 0.348,
811
+ 0.489
812
+ ],
813
+ "angle": 0,
814
+ "content": "VI. CONCLUSION"
815
+ },
816
+ {
817
+ "type": "text",
818
+ "bbox": [
819
+ 0.074,
820
+ 0.493,
821
+ 0.491,
822
+ 0.643
823
+ ],
824
+ "angle": 0,
825
+ "content": "The transition to post-quantum cryptography (PQC) is no longer a theoretical consideration but an imminent necessity for securing digital communications against future quantum threats. This study has demonstrated that CRYSTALS-Kyber and CRYSTALS-Dilithium, the NIST-standardized PQC algorithms, not only provide robust quantum resistance but also achieve competitive execution times compared to classical cryptographic schemes. Benchmarking results highlight their computational efficiency, particularly when optimized with AVX2 vectorization."
826
+ },
827
+ {
828
+ "type": "text",
829
+ "bbox": [
830
+ 0.074,
831
+ 0.645,
832
+ 0.492,
833
+ 0.794
834
+ ],
835
+ "angle": 0,
836
+ "content": "However large-scale deployment in telecommunications networks introduces critical challenges, including infrastructure upgrades, interoperability concerns, regulatory compliance, and cost constraints. The successful implementation of PQC in telecom environments requires a structured, phased migration strategy, leveraging hybrid cryptographic approaches to maintain compatibility with legacy systems. Early industry trials demonstrate the viability of PQC adoption while emphasizing the importance of vendor collaboration, cryptographic agility, and thorough performance validation."
837
+ },
838
+ {
839
+ "type": "text",
840
+ "bbox": [
841
+ 0.074,
842
+ 0.795,
843
+ 0.492,
844
+ 0.825
845
+ ],
846
+ "angle": 0,
847
+ "content": "Looking ahead, PQC is expected to become a fundamental component of telecom security, with adoption accelerating as"
848
+ },
849
+ {
850
+ "type": "text",
851
+ "bbox": [
852
+ 0.503,
853
+ 0.063,
854
+ 0.923,
855
+ 0.23
856
+ ],
857
+ "angle": 0,
858
+ "content": "standards solidify and regulatory mandates take effect. As quantum computing advances remain unpredictable, proactive preparation is essential to mitigate risks associated with delayed migration. Encouragingly, the ongoing integration of PQC into security protocols for 5G and 6G networks, along with continued industry cooperation, ensures that telecom infrastructure remains resilient against emerging cryptographic threats. With careful planning and strategic execution, the transition to quantum-safe cryptography can safeguard telecom networks, ensuring their security and adaptability in the quantum era."
859
+ },
860
+ {
861
+ "type": "title",
862
+ "bbox": [
863
+ 0.665,
864
+ 0.242,
865
+ 0.762,
866
+ 0.256
867
+ ],
868
+ "angle": 0,
869
+ "content": "REFERENCES"
870
+ },
871
+ {
872
+ "type": "ref_text",
873
+ "bbox": [
874
+ 0.515,
875
+ 0.265,
876
+ 0.921,
877
+ 0.311
878
+ ],
879
+ "angle": 0,
880
+ "content": "[1] D. Joseph, R. Misoczki, M. Manzano, J. Tricot, F. D. Pinuaga, O. Lacombe, S. Leichenauer, J. Hiday, P. Venables, and R. Hansen, \"Transitioning organizations to post-quantum cryptography,\" Nature, vol. 605, no. 7909, pp. 237–243, 2022."
881
+ },
882
+ {
883
+ "type": "ref_text",
884
+ "bbox": [
885
+ 0.515,
886
+ 0.312,
887
+ 0.921,
888
+ 0.334
889
+ ],
890
+ "angle": 0,
891
+ "content": "[2] D. J. Bernstein and T. Lange, \"Post-quantum cryptography,\" Nature, vol. 549, no. 7671, pp. 188-194, 2017."
892
+ },
893
+ {
894
+ "type": "ref_text",
895
+ "bbox": [
896
+ 0.516,
897
+ 0.335,
898
+ 0.921,
899
+ 0.379
900
+ ],
901
+ "angle": 0,
902
+ "content": "[3] G. Alagic, G. Alagic, J. Alperin-Sheriff, D. Apon, D. Cooper, Q. Dang, Y.-K. Liu, C. Miller, D. Moody, R. Peralta et al., \"Status report on the first round of the NIST post-quantum cryptography standardization process,\" 2019."
903
+ },
904
+ {
905
+ "type": "ref_text",
906
+ "bbox": [
907
+ 0.516,
908
+ 0.38,
909
+ 0.921,
910
+ 0.426
911
+ ],
912
+ "angle": 0,
913
+ "content": "[4] National Institute of Standards and Technology, \"Post-Quantum Cryptography Standardization,\" 2024, accessed: 2024-03-17. [Online]. Available: https://csrc.nist.gov/projects/post-quantum-cryptography/selected-algorithm"
914
+ },
915
+ {
916
+ "type": "ref_text",
917
+ "bbox": [
918
+ 0.516,
919
+ 0.426,
920
+ 0.921,
921
+ 0.482
922
+ ],
923
+ "angle": 0,
924
+ "content": "[5] GSM Association, \"Post Quantum Cryptography - Guidelines for Telecom Use Cases,\" GSM Association, Technical Report PQ.03, February 2024, accessed: 2024-03-17. [Online]. Available: https://www.gsma.com/newsroom/wp-content/uploads/PQ.03-Post-Quantum-Cryptography-Guidelines-for-Telecom-Use-v1.0.pdf."
925
+ },
926
+ {
927
+ "type": "ref_text",
928
+ "bbox": [
929
+ 0.516,
930
+ 0.483,
931
+ 0.945,
932
+ 0.516
933
+ ],
934
+ "angle": 0,
935
+ "content": "[6] PKI Consortium, \"Key takeaways of the PQC conference in Austin,\" January 30, 2025, accessed: 2025-03-17. [Online]. Available: https://pkic.org/2025/01/30/key-takeaways-of-the-pqc-conference-in-austin/."
936
+ },
937
+ {
938
+ "type": "ref_text",
939
+ "bbox": [
940
+ 0.516,
941
+ 0.517,
942
+ 0.921,
943
+ 0.596
944
+ ],
945
+ "angle": 0,
946
+ "content": "[7] U. Government, \"Report on post-quantum cryptography,\" Government Report, The White House, Washington, D.C., Tech. Rep. REF PQC-Report FINAL Send, July 2024, presented to the Senate Committee on Homeland Security and Governmental Affairs and the House Committee on Oversight and Accountability. [Online]. Available: https://bidenwhitehouse.archives.gov/wp-content/uploads/2024/07/REF_PQC-Report_FINAL_Send.pdf"
947
+ },
948
+ {
949
+ "type": "ref_text",
950
+ "bbox": [
951
+ 0.516,
952
+ 0.597,
953
+ 0.921,
954
+ 0.642
955
+ ],
956
+ "angle": 0,
957
+ "content": "[8] J. Taaffe, \"Are telcos ready for a quantum leap?\" June 2023, accessed: March 17, 2025. [Online]. Available: https://inform.tmforum.org/features-and-opinion/are-tercos-making-a-quantum-leap."
958
+ },
959
+ {
960
+ "type": "ref_text",
961
+ "bbox": [
962
+ 0.516,
963
+ 0.643,
964
+ 0.921,
965
+ 0.72
966
+ ],
967
+ "angle": 0,
968
+ "content": "[9] SoftBank Corp. and SandboxAQ, \"SoftBank Corp. and SandboxAQ to Jointly Implement Next-Generation Cryptosystem Resilient to Cyber Attacks from Quantum Computers,\" March 2022, press Release, accessed: March 17, 2025. [Online]. Available: https://www.sandboxaq.com/press/softbank-corp-and-sandbox-aq-to-jointly-implement next-generation-cryptosystem-resilient-to-cyber-attacks-from-quantum-computers."
969
+ },
970
+ {
971
+ "type": "ref_text",
972
+ "bbox": [
973
+ 0.509,
974
+ 0.721,
975
+ 0.921,
976
+ 0.767
977
+ ],
978
+ "angle": 0,
979
+ "content": "[10] SoftBank Corp., \"SoftBank Corp. and SandboxAQ Jointly Verify Hybrid Mode Quantum-safe Technology,\" February 2023, blog Post, accessed: March 17, 2025. [Online]. Available: https://www.softbank.jp/en/corp/technology/research/story-event/008/."
980
+ },
981
+ {
982
+ "type": "ref_text",
983
+ "bbox": [
984
+ 0.509,
985
+ 0.767,
986
+ 0.921,
987
+ 0.824
988
+ ],
989
+ "angle": 0,
990
+ "content": "[11] Thales Group and SK Telecom, \"Thales and SK Telecom: Pioneering Quantum-Resistant Cryptography for 5G Networks,\" 2024, accessed: March 17, 2025. [Online]. Available: https://www.thalesgroup.com/en/markets/digital-identity-and-security/mobile/5G-skt-post-quantum-user-case."
991
+ },
992
+ {
993
+ "type": "list",
994
+ "bbox": [
995
+ 0.509,
996
+ 0.265,
997
+ 0.945,
998
+ 0.824
999
+ ],
1000
+ "angle": 0,
1001
+ "content": null
1002
+ }
1003
+ ]
1004
+ ]
data/2025/2503_12xxx/2503.12952/bbaeb81a-2f2e-42c2-84ea-7ec1e03fb00a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df3f69ac08c6f23f86d8e84d7001a87077ea37f272f0609414bfdb0bf470d8c1
3
+ size 105041
data/2025/2503_12xxx/2503.12952/full.md ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Performance Analysis and Industry Deployment of Post-Quantum Cryptography Algorithms
2
+
3
+ Elif Dicle Demir
4
+
5
+ Electrical and Electronics Eng. Dept.
6
+
7
+ Koç University
8
+
9
+ Istanbul, Türkiye
10
+
11
+ elifdemir21@ku.edu.tr
12
+
13
+ Buse Bilgin
14
+
15
+ 6GEN Lab., Next-Gen R&D
16
+
17
+ Network Technologies, Turkcell
18
+
19
+ Istanbul, Türkiye
20
+
21
+ buse.bilgin@turkcell.com.tr
22
+
23
+ Mehmet Cengiz Onbaşi
24
+
25
+ Electrical and Electronics Eng. Dept.
26
+
27
+ Koç University
28
+
29
+ Istanbul, Türkiye
30
+
31
+ monbasli@ku.edu.tr
32
+
33
+ Abstract—As quantum computing advances, modern cryptographic standards face an existential threat, necessitating a transition to post-quantum cryptography (PQC). The National Institute of Standards and Technology (NIST) has selected CRYSTALS-Kyber and CRYSTALS-Dilithium as standardized PQC algorithms for secure key exchange and digital signatures, respectively. This study conducts a comprehensive performance analysis of these algorithms by benchmarking execution times across cryptographic operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, the impact of AVX2 optimizations is evaluated to assess hardware acceleration benefits. Our findings demonstrate that Kyber and Dilithium achieve efficient execution times, outperforming classical cryptographic schemes such as RSA and ECDSA at equivalent security levels. Beyond technical performance, the real-world deployment of PQC introduces challenges in telecommunications networks, where large-scale infrastructure upgrades, interoperability with legacy systems, and regulatory constraints must be addressed. This paper examines the feasibility of PQC adoption in telecom environments, highlighting key transition challenges, security risks, and implementation strategies. Through industry case studies, we illustrate how telecom operators are integrating PQC into 5G authentication, subscriber identity protection, and secure communications. Our analysis provides insights into the computational trade-offs, deployment considerations, and standardization efforts shaping the future of quantum-safe cryptographic infrastructure.
34
+
35
+ Index Terms—Post-Quantum Cryptography, CRYSTALS-Kyber, CRYSTALS-Dilithium, NIST Standardization, Telecommunications Security, Cryptographic Deployment, Quantum-Safe Networks.
36
+
37
+ # I. INTRODUCTION
38
+
39
+ Modern cryptographic systems rely on the computational intractability of certain mathematical problems, such as integer factorization and discrete logarithms, to ensure the security of digital communication and data protection [1]. The advent of quantum computing poses a fundamental threat to modern cryptographic systems, as algorithms such as Shor's and Grover's exploit quantum parallelism to break widely used cryptographic primitives. Shor's algorithm efficiently factors large integers and solves the discrete logarithm problem, undermining the security of RSA and Elliptic Curve Cryptography(ECC), while Grover's algorithm accelerates brute-force attacks, significantly reducing the effective security of symmetric encryption schemes [2]. As research continues to refine quantum hardware, the urgency to transition towards
40
+
41
+ quantum-resistant cryptographic solutions has become a pressing concern.
42
+
43
+ To address these emerging threats, the National Institute of Standards and Technology (NIST) initiated the Post-Quantum Cryptography (PQC) Standardization process to develop cryptographic algorithms resilient to quantum threats. The evaluation criteria for candidate algorithms include security against both classical and quantum attacks, cost and performance efficiency, and implementation characteristics such as flexibility and resistance to side-channel attacks [3]. As a result of the NIST standardization process, CRYSTALS-Kyber and HQC were selected as key encapsulation mechanisms (KEMs), while CRYSTALS-Dilithium, Falcon, and SPHINCS+ were chosen as digital signature schemes due to their strong security foundations, computational efficiency, and real-world applicability. Kyber is a lattice-based KEM, while HQC is a code-based KEM, both ensuring secure key exchange over insecure communication channels. Similarly, Dilithium and Falcon are lattice-based digital signature schemes designed for message authenticity and integrity, whereas SPHINCS+ is a hash-based scheme. [4]
44
+
45
+ This study focuses on the performance evaluation of postquantum cryptographic algorithms, specifically Kyber and Dilithium, by benchmarking their execution times across key cryptographic operations. Given the critical role of computational efficiency in the real-world adoption of PQC, our analysis provides insights into their feasibility for practical deployment. Additionally, as the transition to quantum-safe cryptography involves not only technical performance but also industry-wide adoption challenges, we extend our study to include an industry perspective, assessing the implications of PQC deployment in telecommunications and broader enterprise environments.
46
+
47
+ # II. TESTING METHODOLOGY AND ENVIRONMENT SETUP FOR PERFORMANCE ANALYSIS
48
+
49
+ To understand their computational feasibility, we conducted a detailed performance analysis of Kyber and Dilithium under controlled benchmarking conditions. The performance of cryptographic algorithms is a critical factor in their real-world adoption, particularly in PQC, where computational efficiency directly impacts practical deployment in constrained
50
+
51
+ environments. This section presents a benchmarking study of Kyber and Dilithium, evaluating their execution time across key operations such as key generation, encapsulation, decapsulation, signing, and verification. Additionally, optimizations leveraging AVX2 vector instructions are examined to assess the impact of hardware acceleration on performance. Furthermore, we compare these PQC algorithms with widely used classical cryptographic schemes—Elliptic Curve Diffie-Hellman (ECDH), Elliptic Curve Digital Signature Algorithm (ECDSA), and RSA—to analyze the trade-offs in execution time and efficiency when transitioning to quantum-resistant cryptography.
52
+
53
+ Each cryptographic operation was executed 1,000 times to ensure consistency, with median and average execution times recorded. The benchmarking methodology follows standard cryptographic evaluation practices, converting measured CPU cycles to execution time using a fixed $3.3\mathrm{GHz}$ clock. We evaluated both reference and AVX2-optimized implementations of Kyber and Dilithium to assess the performance gains from vectorized instructions. Additionally, to compare PQC with classical cryptography, we tested ECDH, ECDSA, and RSA under the same conditions using OpenSSL libraries.
54
+
55
+ # III. PERFORMANCE EVALUATION OF KYBER AND DILITHIUM
56
+
57
+ Table I presents performance metrics for Kyber, a key encapsulation mechanism (KEM). It includes the secret key (sk), public key (pk), and ciphertext (ct) sizes for different security levels, reflecting storage and transmission overhead. The listed cryptographic operations are key generation (gen), responsible for producing the key pair; encapsulation (enc), encrypting a shared secret using the recipient's public key; and decapsulation (dec), recovering the shared secret with the private key. Table II provides results for Dilithium, a digital signature scheme. It reports public key (pk) and signature (sig) sizes, which indicate storage costs for authentication. The benchmarked operations include key generation (gen), used to create the signing key pair; signing (sign), which generates digital signatures for message integrity; and verification (verify), ensuring the validity of signatures. The AVX2 speedup rate in Tables I and II represents the performance improvement of the AVX2-optimized implementation compared to the reference implementation. It is calculated as the ratio of execution times, indicating how many times faster the AVX2 implementation performs a given cryptographic operation. A higher speedup value signifies greater efficiency gains achieved through vectorized polynomial arithmetic in AVX2-enabled processors.
58
+
59
+ As indicated in Table I, the execution times of Kyber increase with higher security levels across all three operations: key generation, encapsulation, and decapsulation. Notably, Kyber-512 completes execution in $0.127\mathrm{ms}$ , whereas Kyber-1024 requires $0.294\mathrm{ms}$ , demonstrating the expected computational cost of increased cryptographic strength. However, the scaling is nonlinear, as the increase from Kyber-768 to Kyber-1024 is smaller than from Kyber-512 to Kyber-768.
60
+
61
+ The AVX2 optimization significantly reduces execution time, yielding an average speedup of $5.98 \times$ across different security levels. The most substantial gains occur in decapsulation, which is reduced by up to $6.65 \times$ due to the vectorized polynomial arithmetic enabled by AVX2 instructions. This demonstrates that Kyber benefits greatly from parallelization, making it well-suited for optimized hardware implementations.
62
+
63
+ Similarly, as shown in Table II, the execution time of Dilithium scales with security levels, with Dilithium-2 executing in 0.643 ms while Dilithium-5 requires 1.36 ms. Unlike Kyber, where operations are relatively balanced, Dilithium's signing step dominates execution time—accounting for over $60\%$ of the total runtime in all security levels. This is due to the structured lattice sampling required for signature generation, which is inherently more computationally expensive than verification.
64
+
65
+ The AVX2 speedup for Dilithium is lower than for Kyber $(4.8\times$ on average), but still significant, particularly in the signing operation, which achieves up to a $5.83\times$ reduction in execution time. The verification step sees the smallest speedup $(3.76\times)$ , reflecting its already efficient nature. The results emphasize that while Dilithium is computationally heavier than Kyber, its AVX2-optimized variant brings notable efficiency improvements, making it feasible for real-world applications.
66
+
67
+ Overall, the results in Tables I and II underscore the computational viability of Kyber and Dilithium, demonstrating that hardware optimizations (e.g., AVX2) significantly enhance performance. These findings highlight the practicality of post-quantum cryptography (PQC) deployment, as even without specialized hardware accelerators, Kyber and Dilithium achieve efficient execution times while maintaining high security.
68
+
69
+ TABLEI KEY AND CIPHERTEXT SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF KYBER.
70
+
71
+ <table><tr><td colspan="4">KYBER 512</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 1632</td><td>gen: 0.035</td><td>gen: 0.007</td><td>5.00</td></tr><tr><td>pk: 800</td><td>enc: 0.040</td><td>enc: 0.007</td><td>5.71</td></tr><tr><td>ct: 768</td><td>dec: 0.052</td><td>dec: 0.008</td><td>6.50</td></tr><tr><td>Total</td><td>0.127</td><td>0.022</td><td>5.77</td></tr><tr><td colspan="4">KYBER 768</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 2400</td><td>gen: 0.058</td><td>gen: 0.011</td><td>5.27</td></tr><tr><td>pk: 1184</td><td>enc: 0.063</td><td>enc: 0.011</td><td>5.73</td></tr><tr><td>ct: 1088</td><td>dec: 0.080</td><td>dec: 0.012</td><td>6.67</td></tr><tr><td>Total</td><td>0.201</td><td>0.034</td><td>5.91</td></tr><tr><td colspan="4">KYBER 1024</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>sk: 3168</td><td>gen: 0.089</td><td>gen: 0.015</td><td>5.93</td></tr><tr><td>pk: 1568</td><td>enc: 0.092</td><td>enc: 0.015</td><td>6.13</td></tr><tr><td>ct: 1568</td><td>dec: 0.113</td><td>dec: 0.017</td><td>6.65</td></tr><tr><td>Total</td><td>0.294</td><td>0.047</td><td>6.26</td></tr></table>
72
+
73
+ TABLE II PUBLIC KEY AND SIGNATURE SIZES AND EXECUTION TIMES (IN MILLSECONDS) FOR ALL PARAMETER SETS OF DILITHIUM.
74
+
75
+ <table><tr><td colspan="4">DILITHIUM 2</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 1312</td><td>gen: 0.094</td><td>gen: 0.026</td><td>3.62</td></tr><tr><td>sig: 2420</td><td>sign: 0.445</td><td>sign: 0.077</td><td>5.78</td></tr><tr><td></td><td>verify: 0.104</td><td>verify: 0.028</td><td>3.71</td></tr><tr><td>Total</td><td>0.643</td><td>0.131</td><td>4.91</td></tr><tr><td colspan="4">DILITHIUM 3</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 1952</td><td>gen: 0.167</td><td>gen: 0.045</td><td>3.71</td></tr><tr><td>sig: 3293</td><td>sign: 0.665</td><td>sign: 0.120</td><td>5.54</td></tr><tr><td></td><td>verify: 0.160</td><td>verify: 0.045</td><td>3.56</td></tr><tr><td>Total</td><td>0.992</td><td>0.210</td><td>4.73</td></tr><tr><td colspan="4">DILITHIUM 5</td></tr><tr><td>Sizes (Bytes)</td><td>Reference (ms)</td><td>AVX2 (ms)</td><td>AVX2 Speedup Rate</td></tr><tr><td>pk: 2592</td><td>gen: 0.253</td><td>gen: 0.070</td><td>3.61</td></tr><tr><td>sig: 4595</td><td>sign: 0.840</td><td>sign: 0.144</td><td>5.83</td></tr><tr><td></td><td>verify: 0.267</td><td>verify: 0.071</td><td>3.76</td></tr><tr><td>Total</td><td>1.360</td><td>0.285</td><td>4.77</td></tr></table>
76
+
77
+ # IV. PERFORMANCE COMPARISON: POST-QUANTUM CRYPTOGRAPHY VS. CLASSICAL CRYPTOGRAPHY
78
+
79
+ Table III presents a comparative analysis of execution times for post-quantum and classical cryptographic algorithms, evaluated under controlled conditions. Both PQC and classical schemes were tested at different security levels, measured in bits, to assess performance variations. The table includes cryptographic algorithms across multiple security configurations, ensuring a direct comparison of execution times. The evaluation focuses on total execution time, measured in milliseconds, to quantify computational cost across different cryptographic operations. While this analysis highlights execution speed, real-world deployment must also consider additional factors such as memory footprint, communication overhead, and hardware compatibility.
80
+
81
+ Kyber exhibits notable performance advantages over both RSA and ECDH, which are commonly employed for key exchange. Kyber-512, offering 128-bit security, achieves execution times that are approximately three times faster than both RSA-2048 and ECDH(P-256), despite these classical schemes providing lower security guarantees. Even Kyber-1024, the most computationally expensive variant, maintains an execution time that is roughly three times faster than RSA-3072, which offers only 128-bit security. At equivalent security levels, Kyber consistently achieves faster execution times than ECDH(P-256, P-384, P-521) while also providing quantum resistance. These efficiency gains are attributed to Kyber's lattice-based cryptographic foundation, which relies on small polynomials and number-theoretic transforms (NTT) rather than large-number modular exponentiation. This mathematical structure enables faster key generation and encapsulation while maintaining strong security guarantees, particularly against quantum adversaries.
82
+
83
+ Dilithium demonstrates significant computational advantages over ECDSA, a widely used classical digital signature scheme. At the 128-bit security level, Dilithium-2 executes signature operations approximately $20\%$ faster than ECDSA(P-256), with the performance gap increasing at higher security levels. Dilithium-5, the highest-security variant, achieves nearly twice the execution speed of ECDSA(P-512) at the 256-bit security level. A distinct characteristic of Dilithium is that signature generation dominates execution time, accounting for over $60\%$ of the total runtime, whereas ECDSA exhibits a more balanced distribution between signing and verification. This difference arises from Dilithium's structured lattice sampling, which, while computationally intensive, remains more efficient than ECDSA's elliptic curve discrete logarithm operations. Additionally, Dilithium's deterministic signature generation eliminates nonce-related vulnerabilities, a known weakness in ECDSA implementations.
84
+
85
+ The results indicate that post-quantum cryptographic algorithms do not inherently introduce higher computational costs. On the contrary, Kyber and Dilithium frequently outperform classical cryptographic schemes at equivalent security levels. Kyber consistently demonstrates superior efficiency in key exchange operations compared to RSA and ECDH, even at its highest security configuration. Similarly, Dilithium provides a computationally efficient alternative to ECDSA, particularly as security levels increase. While Dilithium's signing operation remains computationally heavier than verification, it still surpasses ECDSA in signature generation across all tested configurations. These findings highlight the feasibility of transitioning to quantum-resistant cryptographic standards in practical applications, demonstrating that enhanced security can be achieved without compromising computational efficiency.
86
+
87
+ These performance findings highlight the computational feasibility of Kyber and Dilithium as post-quantum cryptographic solutions, demonstrating that quantum resistance does not necessarily come at the cost of execution efficiency. However, execution time is only one aspect of cryptographic feasibility. While our controlled benchmarking showed that Kyber and Dilithium outperform classical schemes in speed, these results were obtained under optimized and isolated conditions. Real-world deployment involves additional complexities, such as infrastructure constraints, interoperability with existing systems, and operational overhead, which can impact practical performance. Thus, while PQC shows strong computational efficiency, its large-scale adoption in telecom networks requires a broader evaluation, considering scalability, integration challenges, and regulatory compliance.
88
+
89
+ # V. POST-QUANTUM CRYPTOGRAPHY IN TELECOMMUNICATIONS: CHALLENGES, IMPLEMENTATIONS, AND FUTURE OUTLOOK
90
+
91
+ Implementing PQC in telecommunications networks presents significant challenges. Telecom operators must upgrade complex, large-scale infrastructures that currently rely on classical encryption, all while maintaining service
92
+
93
+ TABLE III EXECUTION TIME COMPARISON OF POST-QUANTUM AND CLASSICAL CRYPTOGRAPHIC ALGORITHMS.
94
+
95
+ <table><tr><td>Algorithm</td><td>Security Level</td><td>Total Time (ms)</td></tr><tr><td>Kyber-512</td><td>128-bit</td><td>0.127</td></tr><tr><td>Kyber-768</td><td>192-bit</td><td>0.201</td></tr><tr><td>Kyber-1024</td><td>256-bit</td><td>0.294</td></tr><tr><td>Dilithium-2</td><td>128-bit</td><td>0.643</td></tr><tr><td>Dilithium-3</td><td>192-bit</td><td>0.992</td></tr><tr><td>Dilithium-5</td><td>256-bit</td><td>1.360</td></tr><tr><td>ECDSA(P-256)</td><td>128-bit</td><td>0.801</td></tr><tr><td>ECDSA(P-384)</td><td>192-bit</td><td>1.702</td></tr><tr><td>ECDSA(P-512)</td><td>256-bit</td><td>2.398</td></tr><tr><td>RSA-2048</td><td>112-bit</td><td>0.324</td></tr><tr><td>RSA-3072</td><td>128-bit</td><td>0.884</td></tr><tr><td>ECDH(P-256)</td><td>128-bit</td><td>0.102</td></tr><tr><td>ECDH(P-384)</td><td>192-bit</td><td>0.299</td></tr><tr><td>ECDH(P-521)</td><td>256-bit</td><td>0.903</td></tr></table>
96
+
97
+ continuity. Key challenges include performance and latency impacts, compatibility with legacy systems, lack of finalized standards, resource and cost constraints, transitional security risks, and vendor readiness issues.
98
+
99
+ # A. Challenges
100
+
101
+ 1) Performance Impact on Existing Infrastructure: PQC algorithms require more computational resources and larger key sizes than classical cryptography. Many schemes are at least an order of magnitude slower or produce larger keys and ciphertexts than RSA or ECC, straining network devices [5]. The increased size of PQC keys, signatures, and ciphertexts taxes bandwidth and memory-constrained hardware; for instance, an additional 1 KB in a TLS handshake can increase response time by $1.5\%$ [6]. Latency-sensitive telecom applications, such as voice and video, may experience performance degradation due to longer cryptographic operations or larger handshake messages. Operators need to evaluate whether servers, routers, and HSMs can support the increased computational load of PQC, as many may require hardware upgrades specifically for PQC adoption. Especially in radio access networks (RANs) and customer devices with limited processing power, PQC's computational overhead and memory footprint pose a significant deployment challenge.
102
+
103
+ 2) Interoperability with Legacy Systems: During the transition, not all network elements and partner systems will upgrade to PQC at the same time, raising interoperability issues. If one system uses a PQC-based protocol but the communicating peer does not, secure connections cannot be established [7]. Many telecom protocols use a "fail secure" approach, meaning a PQC-enabled node could be cut off from legacy nodes that don't recognize the new algorithms. Due to the interconnected nature of telecom networks, a single non-upgraded component can block migration, creating deployment bottlenecks. A possible solution is hybrid cryptographic modes (combining classical and PQC algorithms), but this adds complexity and requires new protocol standards and careful validation, potentially slowing down the transition. To prevent network partitioning, telecom operators must ensure
104
+
105
+ PQC upgrades happen in sync across critical systems or remain backward-compatible.
106
+
107
+ 3) Standardization and Regulatory Concerns: The telecom industry is highly standardized and regulated, so PQC adoption hinges on mature standards and regulatory guidance. As of 2024, standards bodies like NIST are just publishing the first official PQC algorithm standards [8]. Until international standards (e.g., 3GPP, IETF, ETSI) incorporate PQC, telcos risk adopting interim solutions that might not be interoperable or compliant long-term. There is also regulatory pressure: governments and industry bodies are already setting timelines and mandates for quantum-safe transitions. For example, the U.S. National Security Agency's CNSA 2.0 mandates specific PQC algorithms, aiming all national security systems to be quantum-resistant by 2035. However, inconsistent national strategies pose challenges for global carriers, as many countries have only issued high-level guidance to "start planning" with few concrete standards yet. The absence of finalized telecom-specific PQC standards adds uncertainty, requiring operators to closely coordinate with standards organizations to ensure protocols like 5G authentication, IPsec, and TLS integrate PQC effectively.
108
+
109
+ 4) Cost and Resource Allocation: Upgrading a telecom operator's cryptographic infrastructure to PQC is costly and resource-intensive. Many legacy systems lack the processing power, memory, or bandwidth to support PQC, requiring replacement or retrofitting of equipment such as mobile devices, SIM cards, routers, and base stations. This represents a significant capital expense, with costs extending to PQC-capable HSMs, accelerator cards, software updates, staff training, testing, and parallel system operation during the transition. Smaller operators worry that only large carriers can afford early adoption, but as vendors integrate PQC into products, upgrade costs are expected to decrease. Nonetheless, operators need to allocate substantial resources for cryptographic inventory, upgrade planning, and continuous maintenance to ensure a smooth migration. The cost of inaction could be higher—a quantum-broken network may result in regulatory penalties and customer loss, making early investment crucial.
110
+
111
+ 5) Security Risks and Transition Challenges: Transitioning to PQC raises security concerns, as these new algorithms have not been tested in real-world deployments for decades like RSA/ECC. There are risks of undiscovered weaknesses or implementation flaws, and some PQC candidates have already been found vulnerable to cryptanalysis and side-channel attacks during standardization. Ensuring side-channel resistance is critical—cryptographic operations must not leak secrets through timing, power, or memory access patterns. Additionally, PQC introduces complex key management and new failure modes; for example, some digital signature schemes require tracking one-time keys, complicating network authentication. Early deployments have exposed issues, such as network middleware and firewalls failing due to large key exchange messages. Misconfigurations, like hybrid mode errors or certificate management lapses, could introduce vulnerabilities. To mitigate these risks, telecom operators must conduct
112
+
113
+ extensive testing, use proven implementations, and ensure crypto-agility, allowing algorithm updates when needed.
114
+
115
+ 6) Vendor Readiness and Supply Chain Considerations: Telecommunications relies on a vast network of vendors for hardware, software, and infrastructure, making PQC adoption a supply chain challenge. Many vendors await finalized standards before integrating PQC, and without support in critical components like SIM cards and routers, full migration is impossible. To address this, telecom operators are updating procurement policies, requiring vendors to support NIST-approved PQC algorithms and crypto-agility. Regulatory bodies may also mandate certification, potentially delaying availability. While some vendors are developing PQC-capable products, widespread readiness will take time. Effective supply chain management and early engagement with suppliers are essential to ensure smooth deployment, coordinated upgrades, and interoperability. Ultimately, achieving a quantum-safe telecom network requires industry-wide collaboration and careful planning.
116
+
117
+ # B. Successful Implementations and Initiatives of PQC
118
+
119
+ Despite the challenges, there have been several successful implementations and trials of post-quantum cryptography in telecom contexts. Forward-thinking carriers and technology partners around the world have started to integrate PQC into test networks, demonstrating feasibility and gleaning best practices. Below are a few notable examples and case studies highlighting how PQC deployment is being approached in telecommunications:
120
+
121
+ 1) SoftBank (Japan) – Hybrid PQC Network Trial: SoftBank Corp., a major mobile operator in Japan, partnered with SandboxAQ to test PQC algorithms in a live network environment. In 2023 they conducted a hybrid encryption trial, combining classical elliptic-curve cryptography with lattice-based post-quantum algorithms on live network traffic [9]. The results were encouraging: the hybrid quantum-safe approach was verified to work on existing 4G/5G infrastructure with minimal performance impact. SoftBank reported that lattice-based PQC algorithms (such as those later standardized by NIST) outperformed other quantum-safe alternatives in their tests, providing strong security with only marginal added latency [10]. By adopting a hybrid approach, SoftBank ensured interoperability with existing systems while enhancing security. Their phased deployment, from lab tests to real-world networks, demonstrated that careful algorithm selection and optimization can mitigate future quantum threats without major performance trade-offs. Collaboration with SandboxAQ helped streamline cryptographic inventory and regulatory compliance. SoftBank continues investing in PQC, positioning early adoption as a competitive advantage in secure telecom infrastructure.
122
+
123
+ 2) SK Telecom (South Korea) - PQC in 5G Standalone Network: Another pioneering effort was led by SK Telecom (SKT) in South Korea, in collaboration with Thales. SKT and Thales carried out a groundbreaking test of postquantum cryptography in a real 5G standalone network environ-
124
+
125
+ ronment [11]. In this pilot, SKT deployed quantum-resistant encryption to secure subscriber identities and network traffic. They tested 5G USIM cards implementing the CRYSTALS-Kyber key encapsulation algorithm, ensuring authentication remains secure against quantum threats. The trial demonstrated seamless interoperability between PQC-protected SIMs and the core network, with encrypted calls proving quantum-safe communication. This deployment, one of the first PQC integrations in 5G, underscores the role of carrier-vendor partnerships and informs ongoing standards development.
126
+
127
+ 3) North American Carriers and Initiatives: U.S. and Canadian telecom operators are preparing for PQC, driven by government directives. AT&T plans to be "quantum ready" by 2025, with internal pilots testing PQC in VPNs and TLS. While large-scale deployments are pending, mandates for critical infrastructure are pushing adoption. In Canada and Europe, providers like Verizon, Rogers, Deutsche Telecom, and BT are engaged in research and industry collaborations, focusing on PQC for routing, customer data protection, and inter-carrier security. These efforts emphasize crypto-agility—ensuring networks can transition flexibly as standards evolve. Industry groups, including the GSMA Post-Quantum Telco Network Taskforce and 5G Americas, are developing best practices to guide telecom operators through PQC adoption.
128
+
129
+ Across successful implementations, key best practices have emerged. Conducting a cryptographic inventory helps identify necessary upgrades, while pilot deployments in less constrained environments allow for manageable PQC integration. A hybrid approach, running PQC alongside classical encryption, has been widely adopted to maintain continuity. Vendor collaboration with SIM card providers, router manufacturers, and software vendors is crucial for early integration. These trials also confirm that early PQC deployment safeguards critical data from future quantum threats. While performance impacts are generally manageable with optimized algorithms, some operators have even improved network efficiency by modernizing legacy systems. Overall, these case studies demonstrate that with careful planning and phased execution, telcos can begin inserting quantum-resistant cryptography into their networks today, gaining experience and confidence for broader rollouts.
130
+
131
+ # C. Future Outlook and Recommendations
132
+
133
+ The adoption of post-quantum cryptography (PQC) in telecom networks is shifting from isolated pilots to broader deployments as standards solidify and the quantum threat looms. Telecom operators must act now, as waiting until large-scale quantum computers emerge will be too late. Security organizations stress the urgency of conducting cryptographic inventories to identify where public-key cryptography is used—such as SIM authentication, SSL/TLS links, and PKI certificates—prioritizing critical assets to mitigate "harvest now, decrypt later" risks. Awareness and education are also crucial for leadership and technical teams.
134
+
135
+ A structured PQC implementation roadmap involves phased deployments, starting with hybrid cryptographic modes alongside classical encryption to maintain compatibility. Initial
136
+
137
+ transitions should focus on non-customer-facing segments, expanding as standards mature and interoperability improves. Operators must align migration plans with regulatory requirements, ensuring compliance with evolving mandates. From 2024 onward, telecom providers are expected to integrate PQC into technology refresh cycles, with PQC becoming a standard in 5G-Advanced and 6G networks by the late 2020s. The goal is to achieve full quantum resistance in critical infrastructure by the early 2030s.
138
+
139
+ Within the next decade, PQC will likely be as integral to telecom security as TLS and IPsec are today. Once NIST and other bodies finalize standards by 2024-2025, adoption will accelerate, giving early adopters a competitive edge with "quantum-safe" services. Given the uncertainty of quantum computing advancements, proactive preparation is essential. Encouragingly, PQC adoption does not necessarily require hardware replacements—many transitions can be done via software updates, reducing costs. As vendors integrate PQC into products, expenses are expected to decrease further.
140
+
141
+ By 2030, much of global telecom traffic, particularly sensitive communications, will likely be encrypted using post-quantum or hybrid cryptographic schemes. Collaboration among telecom operators, governments, and the security community will be crucial for interoperability and resilience. With proactive planning and cooperative execution, the telecom industry can secure global communications against quantum threats while maintaining security, efficiency, and compliance.
142
+
143
+ # VI. CONCLUSION
144
+
145
+ The transition to post-quantum cryptography (PQC) is no longer a theoretical consideration but an imminent necessity for securing digital communications against future quantum threats. This study has demonstrated that CRYSTALS-Kyber and CRYSTALS-Dilithium, the NIST-standardized PQC algorithms, not only provide robust quantum resistance but also achieve competitive execution times compared to classical cryptographic schemes. Benchmarking results highlight their computational efficiency, particularly when optimized with AVX2 vectorization.
146
+
147
+ However large-scale deployment in telecommunications networks introduces critical challenges, including infrastructure upgrades, interoperability concerns, regulatory compliance, and cost constraints. The successful implementation of PQC in telecom environments requires a structured, phased migration strategy, leveraging hybrid cryptographic approaches to maintain compatibility with legacy systems. Early industry trials demonstrate the viability of PQC adoption while emphasizing the importance of vendor collaboration, cryptographic agility, and thorough performance validation.
148
+
149
+ Looking ahead, PQC is expected to become a fundamental component of telecom security, with adoption accelerating as
150
+
151
+ standards solidify and regulatory mandates take effect. As quantum computing advances remain unpredictable, proactive preparation is essential to mitigate risks associated with delayed migration. Encouragingly, the ongoing integration of PQC into security protocols for 5G and 6G networks, along with continued industry cooperation, ensures that telecom infrastructure remains resilient against emerging cryptographic threats. With careful planning and strategic execution, the transition to quantum-safe cryptography can safeguard telecom networks, ensuring their security and adaptability in the quantum era.
152
+
153
+ # REFERENCES
154
+
155
+ [1] D. Joseph, R. Misoczki, M. Manzano, J. Tricot, F. D. Pinuaga, O. Lacombe, S. Leichenauer, J. Hiday, P. Venables, and R. Hansen, "Transitioning organizations to post-quantum cryptography," Nature, vol. 605, no. 7909, pp. 237–243, 2022.
156
+ [2] D. J. Bernstein and T. Lange, "Post-quantum cryptography," Nature, vol. 549, no. 7671, pp. 188-194, 2017.
157
+ [3] G. Alagic, G. Alagic, J. Alperin-Sheriff, D. Apon, D. Cooper, Q. Dang, Y.-K. Liu, C. Miller, D. Moody, R. Peralta et al., "Status report on the first round of the NIST post-quantum cryptography standardization process," 2019.
158
+ [4] National Institute of Standards and Technology, "Post-Quantum Cryptography Standardization," 2024, accessed: 2024-03-17. [Online]. Available: https://csrc.nist.gov/projects/post-quantum-cryptography/selected-algorithm
159
+ [5] GSM Association, "Post Quantum Cryptography - Guidelines for Telecom Use Cases," GSM Association, Technical Report PQ.03, February 2024, accessed: 2024-03-17. [Online]. Available: https://www.gsma.com/newsroom/wp-content/uploads/PQ.03-Post-Quantum-Cryptography-Guidelines-for-Telecom-Use-v1.0.pdf.
160
+ [6] PKI Consortium, "Key takeaways of the PQC conference in Austin," January 30, 2025, accessed: 2025-03-17. [Online]. Available: https://pkic.org/2025/01/30/key-takeaways-of-the-pqc-conference-in-austin/.
161
+ [7] U. Government, "Report on post-quantum cryptography," Government Report, The White House, Washington, D.C., Tech. Rep. REF PQC-Report FINAL Send, July 2024, presented to the Senate Committee on Homeland Security and Governmental Affairs and the House Committee on Oversight and Accountability. [Online]. Available: https://bidenwhitehouse.archives.gov/wp-content/uploads/2024/07/REF_PQC-Report_FINAL_Send.pdf
162
+ [8] J. Taaffe, "Are telcos ready for a quantum leap?" June 2023, accessed: March 17, 2025. [Online]. Available: https://inform.tmforum.org/features-and-opinion/are-tercos-making-a-quantum-leap.
163
+ [9] SoftBank Corp. and SandboxAQ, "SoftBank Corp. and SandboxAQ to Jointly Implement Next-Generation Cryptosystem Resilient to Cyber Attacks from Quantum Computers," March 2022, press Release, accessed: March 17, 2025. [Online]. Available: https://www.sandboxaq.com/press/softbank-corp-and-sandbox-aq-to-jointly-implement next-generation-cryptosystem-resilient-to-cyber-attacks-from-quantum-computers.
164
+ [10] SoftBank Corp., "SoftBank Corp. and SandboxAQ Jointly Verify Hybrid Mode Quantum-safe Technology," February 2023, blog Post, accessed: March 17, 2025. [Online]. Available: https://www.softbank.jp/en/corp/technology/research/story-event/008/.
165
+ [11] Thales Group and SK Telecom, "Thales and SK Telecom: Pioneering Quantum-Resistant Cryptography for 5G Networks," 2024, accessed: March 17, 2025. [Online]. Available: https://www.thalesgroup.com/en/markets/digital-identity-and-security/mobile/5G-skt-post-quantum-user-case.
data/2025/2503_12xxx/2503.12952/images/9c0eacd157e54f2d5f632c6c7f77df6b42ee84b9d3b991b5c9429c568a4961ff.jpg ADDED

Git LFS Details

  • SHA256: e3ba7c5d51e7cf29a56aaa4f2bfdf6090119fa132bea47776467264bd0dfb188
  • Pointer size: 130 Bytes
  • Size of remote file: 87.9 kB
data/2025/2503_12xxx/2503.12952/images/bd69976a57eb5e31707407c28643d178dd46223bf5a751e795ecb0e0d3d78495.jpg ADDED

Git LFS Details

  • SHA256: 78e1f05fc7daaf5836dc30367671a2afa569db24e9579d48b2d589417942ff54
  • Pointer size: 130 Bytes
  • Size of remote file: 59.5 kB
data/2025/2503_12xxx/2503.12952/images/cd5e919056af76267e409d8fb1057479a9f46d5fb8d12f6435f7d32ac2f38b8c.jpg ADDED

Git LFS Details

  • SHA256: 1e59ab1d1c63985c1c513de4b464b7b687ba73e68ad68988ac6d2d494f9c7cc4
  • Pointer size: 130 Bytes
  • Size of remote file: 83.2 kB