Chelsea707 commited on
Commit
936a380
·
verified ·
1 Parent(s): f1e8b5f

MinerU Batch bf050d1b-7cc0-4553-9038-095f9c6a4d04 (Part 7/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_content_list.json +0 -0
  3. data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_model.json +0 -0
  4. data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_origin.pdf +3 -0
  5. data/2025/2504_09xxx/2504.09634/full.md +535 -0
  6. data/2025/2504_09xxx/2504.09634/images/0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg +3 -0
  7. data/2025/2504_09xxx/2504.09634/images/1af4aced79d75a548723f09e225664bbb96f6d6700d00c43968aa178dc0a5c72.jpg +3 -0
  8. data/2025/2504_09xxx/2504.09634/images/251f99d34e8d7e9454bfd3edc048190023c2a6c8c23b168d632e267238bde84f.jpg +3 -0
  9. data/2025/2504_09xxx/2504.09634/images/2a90fde578b9f825ce59035599dea091ebd2cf5ed5be137d79b307d09e9cfb2d.jpg +3 -0
  10. data/2025/2504_09xxx/2504.09634/images/2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg +3 -0
  11. data/2025/2504_09xxx/2504.09634/images/3352c2d6279b3c18c32a15035d2fad226977bad5a82e283f7e78eb8d92736391.jpg +3 -0
  12. data/2025/2504_09xxx/2504.09634/images/3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg +3 -0
  13. data/2025/2504_09xxx/2504.09634/images/3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg +3 -0
  14. data/2025/2504_09xxx/2504.09634/images/57cb6a1aa51f0f83e9611c049e5334b014beab92ec3cd0b15411af446b8dc0ef.jpg +3 -0
  15. data/2025/2504_09xxx/2504.09634/images/58dc9bf0185249aca304b6bf51cdc56444b2584dad26b32bd6bb1216bfa9dc43.jpg +3 -0
  16. data/2025/2504_09xxx/2504.09634/images/663548692e8e7e2de3dfdeda62c7ea5cdf8682492fac6d42168c814f8765d763.jpg +3 -0
  17. data/2025/2504_09xxx/2504.09634/images/69d318352c9083ca1c5ac983dc98578e751733e24bc8f5dcb0a365155d70ef36.jpg +3 -0
  18. data/2025/2504_09xxx/2504.09634/images/76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg +3 -0
  19. data/2025/2504_09xxx/2504.09634/images/80b826b2da3aaae86deb3b263a1de847b08d3842d3eb3476def4d337e1348324.jpg +3 -0
  20. data/2025/2504_09xxx/2504.09634/images/8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg +3 -0
  21. data/2025/2504_09xxx/2504.09634/images/a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg +3 -0
  22. data/2025/2504_09xxx/2504.09634/images/a5849353a126f0c5cd9481a994454c5ef073b83de215a78893cca68077303183.jpg +3 -0
  23. data/2025/2504_09xxx/2504.09634/images/a8c052e149131e14ee8d791a6e088133751de0ae0ea0575bd6291fcfd479610c.jpg +3 -0
  24. data/2025/2504_09xxx/2504.09634/images/a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg +3 -0
  25. data/2025/2504_09xxx/2504.09634/images/de6163c30a2befa354ff42e749225c46579ebb595981253974955912f27f9118.jpg +3 -0
  26. data/2025/2504_09xxx/2504.09634/images/ebfdbe0d011113feaa528a9d5edb54922ef1df36ccfb3ae94161ccec55847281.jpg +3 -0
  27. data/2025/2504_09xxx/2504.09634/layout.json +0 -0
  28. data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_content_list.json +1210 -0
  29. data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_model.json +1584 -0
  30. data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_origin.pdf +3 -0
  31. data/2025/2504_09xxx/2504.09641/full.md +225 -0
  32. data/2025/2504_09xxx/2504.09641/images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg +3 -0
  33. data/2025/2504_09xxx/2504.09641/images/05940639d82d38045b2319a9d1473fb595debc46d1d349a320af83d414a9b9cc.jpg +3 -0
  34. data/2025/2504_09xxx/2504.09641/images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg +3 -0
  35. data/2025/2504_09xxx/2504.09641/images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg +3 -0
  36. data/2025/2504_09xxx/2504.09641/images/35ec78c343f589aa72f0f2b4daca8a7ec35abc3ba14def0a6a833156be5653cd.jpg +3 -0
  37. data/2025/2504_09xxx/2504.09641/images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg +3 -0
  38. data/2025/2504_09xxx/2504.09641/images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg +3 -0
  39. data/2025/2504_09xxx/2504.09641/images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg +3 -0
  40. data/2025/2504_09xxx/2504.09641/images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg +3 -0
  41. data/2025/2504_09xxx/2504.09641/images/b7f84bb0c6fdf03dc58d42b8a8e2990ec5f33aee8c4c7afb886a25430c954304.jpg +3 -0
  42. data/2025/2504_09xxx/2504.09641/images/c028c6a75c1c71f4e565c9681cf2ede2f12cf9fc48793116e93b6718b118b7ec.jpg +3 -0
  43. data/2025/2504_09xxx/2504.09641/images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg +3 -0
  44. data/2025/2504_09xxx/2504.09641/images/d230ef1b85351ef5e90a50e68411b841c08e2f6235d4f5db26fdbb49de9bca68.jpg +3 -0
  45. data/2025/2504_09xxx/2504.09641/images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg +3 -0
  46. data/2025/2504_09xxx/2504.09641/images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg +3 -0
  47. data/2025/2504_09xxx/2504.09641/layout.json +0 -0
  48. data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_content_list.json +0 -0
  49. data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_model.json +0 -0
  50. data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_origin.pdf +3 -0
.gitattributes CHANGED
@@ -1188,3 +1188,11 @@ data/2025/2504_09xxx/2504.09858/aee82fd0-741f-450f-b13e-3a2b835b4b13_origin.pdf
1188
  data/2025/2504_09xxx/2504.09946/bcacb31d-9da1-45e5-a5c9-17c70dbf8404_origin.pdf filter=lfs diff=lfs merge=lfs -text
1189
  data/2025/2504_10xxx/2504.10068/b12624ad-dc0b-4ee7-af9d-cbeeaaff7682_origin.pdf filter=lfs diff=lfs merge=lfs -text
1190
  data/2025/2504_10xxx/2504.10081/74c741cd-0934-4aaa-b8c4-a6d4a37b4d55_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1188
  data/2025/2504_09xxx/2504.09946/bcacb31d-9da1-45e5-a5c9-17c70dbf8404_origin.pdf filter=lfs diff=lfs merge=lfs -text
1189
  data/2025/2504_10xxx/2504.10068/b12624ad-dc0b-4ee7-af9d-cbeeaaff7682_origin.pdf filter=lfs diff=lfs merge=lfs -text
1190
  data/2025/2504_10xxx/2504.10081/74c741cd-0934-4aaa-b8c4-a6d4a37b4d55_origin.pdf filter=lfs diff=lfs merge=lfs -text
1191
+ data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_origin.pdf filter=lfs diff=lfs merge=lfs -text
1192
+ data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_origin.pdf filter=lfs diff=lfs merge=lfs -text
1193
+ data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_origin.pdf filter=lfs diff=lfs merge=lfs -text
1194
+ data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1195
+ data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_origin.pdf filter=lfs diff=lfs merge=lfs -text
1196
+ data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_origin.pdf filter=lfs diff=lfs merge=lfs -text
1197
+ data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_origin.pdf filter=lfs diff=lfs merge=lfs -text
1198
+ data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275315c214ba75139ab36605daa99d69c761d094ab91f106c875da3e920d71e1
3
+ size 746094
data/2025/2504_09xxx/2504.09634/full.md ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Evaluating Machine Learning-Driven Intrusion Detection Systems in IoT: Performance and Energy Consumption
2
+
3
+ Saeid Jamshidi, Kawser Wazed Nafi, Amin Nikanjam, Foutse Khomh
4
+
5
+ SWAT, Polytechnique, Montréal, H3T 1J4, Quebec, Canada
6
+
7
+ # ARTICLEINFO
8
+
9
+ Keywords:
10
+
11
+ Machine Learning, Intrusion Detection System, Energy Consumption, Software-Defined Networking, SDN-IoT
12
+
13
+ # ABSTRACT
14
+
15
+ In the landscape of network security, the integration of Machine Learning (ML)-based Intrusion Detection System (IDS) represents a significant leap forward, especially in the domain of the Internet of Things (IoT) and Software-Defined Networking (SDN). Such ML-based IDS are crucial for improving security infrastructures, and their importance is increasingly pronounced in IoT systems. However, despite the rapid advancement of ML-based IDS, there remains a gap in understanding their impact on critical performance metrics (e.g., CPU load, energy consumption, and CPU usage) in resource-constrained IoT devices. This becomes especially crucial in scenarios involving real-time cyber threats that challenge IoT devices in a public/private network.
16
+
17
+ To address this gap, this article presents an empirical study that evaluates the impact of state-of-the-art ML-based IDSs on performance metrics such as CPU usage, energy consumption, and CPU load in the absence and presence of real-time cyber threats, with a specific focus on their deployment at the edge of IoT infrastructures. We also incorporate SDN to evaluate the comparative performance of ML-based IDSs with and without SDN. To do so, we focus on the impact of both SDN's centralized control and dynamic resource management on the performance metrics of an IoT system. Finally, we analyze our findings using statistical analysis using the Analysis of Variance (ANOVA) analysis. Our findings demonstrate that traditional ML-based IDS, when implemented at the edge gateway with and without SDN architecture, significantly affects performance metrics against cyber threats compared to DL-based ones. Also, we observed substantial increases in energy consumption, CPU usage, and CPU load during real-time cyber threat scenarios at the edge, underscoring the resource-intensive nature of these systems. This research fills the existing knowledge void and delivers essential insights into the operational dynamics of ML-based IDS at edge gateway in IoT systems.
18
+
19
+ # 1. Introduction
20
+
21
+ The rapid expansion of the Internet of Things (IoT) has ushered in an era where data flows seamlessly across various sectors, driving profound changes in how devices interact [1][2]. This intricate IoT ecosystem, composed of countless devices, sensors, and intelligent nodes, has fundamentally reshaped how we think about device communication, significantly minimizing the need for human involvement [3]. The integration of Software-Defined Networking (SDN) within the IoT landscape represents a significant step forward, creating a unified IoT-SDN framework that offers centralized control, improved network management, and stronger security measures [4][5].
22
+
23
+ The rapid expansion of IoT, driven by the interconnection of millions of devices via Wireless Sensor Networks (WSNs), presents significant challenges [6]. These challenges stem mainly from these devices' limited memory, power, and battery life, highlighting the need for optimized computing and advanced data analysis techniques [7]. Deploying SDN within this framework aims to overcome these obstacles by offering a streamlined, secure network infrastructure that facilitates effective resource allocation and enhanced threat
24
+
25
+ management.
26
+
27
+ Given the widespread security vulnerabilities in IoT networks, such as service disruptions and unauthorized access, the importance of Machine Learning (ML)-based Intrusion Detection Systems (IDS) has grown [8]. ML-based IDS are crucial for protecting network integrity due to their ability to adapt dynamically and effectively identify threats [9][10] [11].
28
+
29
+ However, despite advancements in developing ML-based IDS for IoT, several critical gaps remain, as highlighted by Tekin et al. [12]. While previous research has examined ML-based IDS's performance in controlled, static testbed environments, there is a significant gap in understanding how these systems operate under the dynamic conditions of real-time cyber threats, especially when IoT is integrated with SDN. Moreover, while the potential of SDN to significantly enhance resource management in IoT systems is widely acknowledged [13][14][15], there is a lack of empirical evidence on how SDN interacts with ML-based IDS during cyber threats.
30
+
31
+ In this study, we set two primary objectives designed to deepen our understanding of network performance metrics in IoT. Firstly, we assess the impact of deploying ML-based IDS at edge gateway, mainly focusing on ML-based IDS performance metrics under real-time cyber threats. Secondly, we explore the impact of integrating SDN with our testbed, again at edge gateway, to evaluate its influence on performance metrics under similar cyber threats. The rationale behind incorporating SDN into our testbed is its
32
+
33
+ potential to improve resource management in IoT systems significantly [16][17]. We conduct a comparative analysis of the performance of seven state-of-the-art ML-based IDSs in two distinct setups: firstly, at the edge gateway, and secondly, in a similar setup augmented with SDN integration at the edge gateway, all under real-time cyber threats. This analysis is designed to elucidate the impact of SDN on performance metrics and resource management in IoT systems, especially highlighting how SDN integration can optimize the operational efficiency and resilience of IoT networks against the backdrop of evolving cyber threats. To summarize, this paper makes the following contributions:
34
+
35
+ - Assessing performance metrics of ML-based IDS in IoT systems under real-time cyber threats: Our investigation revealed the significant impact of seven ML-based IDS on the performance at the edge, specifically measuring CPU usage, CPU load, and energy consumption amidst cyber threats. Utilizing ANOVA, we clarify the operational consequences of deploying these sophisticated IDSs on the edge.
36
+ - Evaluating the impact of ML-based IDS at edge integrated with SDN: we evaluated the performance metrics of seven ML-based IDS at the edge gateway system integrated with SDN. Utilizing ANOVA, we clarify the impact of the integrated SDN with IoT on deploying these sophisticated IDS under real-time cyber threats.
37
+ - Proposing a plugin-based ML-based IDS test suite: This test suite comes with a group of available datasets and available ML-based IDSs and allows the users to define their own IoT and SDN applications and test their ML-based IDSs and models in terms of detection accuracy and performance metrics. Researchers can efficiently perform comparative analyses for their algorithms and models with other available algorithms and models. The test suite is publicly available (section 8) for researchers and practitioners to reuse.
38
+
39
+ The remainder of this paper is organized as follows: Section 2 discusses the review of our research literature. Section 3 discusses the necessary background knowledge. In Section 4, we describe the experimental design, the Research Questions (RQs), and the metrics of the experiments. Section 5 explains our results and findings. Section 8 discusses threats to the validity of our study. Finally, Section 9 concludes the paper and outlines future work.
40
+
41
+ # 2. Related Works
42
+
43
+ Understanding the performance trade-offs of ML-based IDS in IoT, especially in resource-constrained edge gateways, remains an open challenge. While numerous studies, as mentioned in the previous section, have focused on detection accuracy, limited research has analyzed their real-time computational impact. In particular, there is a significant gap in understanding how ML-based IDS operate under real-time
44
+
45
+ cyber threats, especially when integrated with SDN. This section reviews prior works on ML-based IDS in IoT and SDN, examining their strengths and limitations and focusing on ML models and energy consumption concerns.
46
+
47
+ # 2.1. IoT Intrusion Detection
48
+
49
+ Alsulami et al. [18] proposed a new ML model to identify and categorize network activity in IoT systems. Their research aimed to classify network traffic into distinct categories, including normal behavior and various types of attacks (e.g., Mirai, Denial-of-Service (DoS), Scan, and Man-in-the-Middle (MITM)). The study involved testing several supervised learning models on the customized IoTID20 dataset, including Spiking Neural Networks (SNNs), DT, Boosting Trees (BT), Support Vector Machines (SVM), and KNN. These models, enhanced through deep feature engineering, effectively identified and classified network anomalies.
50
+
51
+ Mukherjee et al. [19] conducted an in-depth investigation into the predictive capabilities of supervised learning models (e.g., Logistic Regression (LR), Naïve Bayes (NB), DT, RF, and Artificial Neural Network (ANN)) for anomaly detection. Their study utilized a dataset comprising 350,000 data points. The research compared these models against established state-of-the-art techniques, including BIRCH clustering and K-Means, and evaluated their performance in different scenarios. This included an analysis using the complete dataset and a separate evaluation after removing binary data points in the 'value' feature. The models demonstrated high precision in both scenarios, underscoring their efficacy in practical anomaly forecasting and enhancing security measures against potential risks.
52
+
53
+ Elnakib et al. [20] proposed the Enhanced Intrusion Detection Deep Learning Multi-class Classification Model (EIDM), a sophisticated Deep Learning (DL) model designed to enhance security in the IoT context. This model is adept at accurately categorizing 15 distinct traffic characteristics, encompassing a range of 14 discrete attack types. The performance of EIDM was evaluated against four other contemporary models, focusing on classification accuracy and efficiency. The increased precision of EIDM highlights its promise as a powerful solution for safeguarding IoT networks against a wide range of attacks.
54
+
55
+ Douiba et al. [21] proposed an innovative IDS to enhance IoT device security. Their approach utilized gradient boosting and DT in the Catboost framework. The model's performance was rigorously assessed on several datasets, including NSL-KDD, IoT-23, BoT-IoT, and Edge-IIoT, with optimization achieved through GPU acceleration. The IDS distinguished itself with its ability to detect anomalies in real-time and its computing efficiency, demonstrating high accuracy, recall, and precision metrics, around $99.9\%$ on a record detection and computation time.
56
+
57
+ Kasongo et al. [22] presented a research endeavor in which they proposed a Feed-Forward Deep Neural Network (FFDNN) IDS, enhanced by the inclusion of a Wrapper Feature Extraction Unit (WFEU) utilizing the Extra Trees
58
+
59
+ algorithm. The WFEU-FFDNN was evaluated for its performance on several datasets, including UNSW-NB15 and AWID, and compared with traditional ML methods. The system demonstrated high classification accuracies in binary and multiclass classifications across these datasets, significantly outperforming in scenarios involving the AWID dataset. The enhanced precision of the WFEU-FFDNN model emphasizes its efficacy in real-time anomaly detection and computing efficiency.
60
+
61
+ In addition to all of the works stated above, Verma et al. [23] examined ML algorithms in the context of augmenting security measures in the IoT. The researchers compared classifiers using benchmark datasets (e.g., CIDDS-001, UNSW-NB15, and NSL-KDD). This analysis was supported by statistical tests, namely the Friedman and Nemenyi tests. The researchers also evaluated the reaction times on the Raspberry Pi platform, showcasing the adaptability and efficiency of the classifiers in IoT scenarios, hence emphasizing their practical relevance.
62
+
63
+ Otoum et al. [24] presented a scholarly investigation in which they propose a DL-powered intrusion detection system (DL-based IDS) to effectively address challenges associated with feature learning and dataset management. The DL-based IDS developed by the researchers integrates the Spider Monkey Optimization(SMO) algorithm with the stacked-deep polynomial network (SDPN) to enhance threat identification. The system can detect various abnormalities, including DoS, User to Root attacks (U2R), probing, and Root-to-local attacks (R2L). The DL-based IDS was evaluated using the NSL-KDD dataset and exhibited outstanding performance metrics, showcasing its efficacy in various aspects of threat detection.
64
+
65
+ Gaber et al. [25] highlight securing IoT systems, especially in complex environments ( e.g., smart cities). The authors introduced a feature selection methodology that combines constant removal and recursive feature elimination strategies. They utilized a DT classifier with a subset of 8 characteristics, assessed on the AWID dataset using various ML classifiers. In contrast to existing methods, their approach exhibited exceptional performance, achieving high accuracy, precision, and F1 score rates. These results underscore the potential of their methodology in the domain of IoT-IDS.
66
+
67
+ Sachdeva et al. [26] investigate the issue of fortifying cybersecurity in IoT networks to mitigate the impact of distributed denial-of-service (DDoS) attacks. The authors put out an innovative approach for data pre-processing, which involves the integration of ML and DL classifiers. The class imbalances in the BOT-IoT and TON-IoT datasets from UNSW Australia are mitigated using several Synthetic Minority Oversampling Technique (SMOTE) variants. The hybrid methodology employed in this study, which integrates many algorithms, demonstrates the promising prospects for efficient detection of DDoS attacks in IoT networks.
68
+
69
+ The related works discussed above show that the most ML-based IDS developed and re-used by researchers are DT, KNN, RF, LSTM, CNN, and a hybrid model of CNN and
70
+
71
+ LSTM. In addition, EIDM is the most recent work that has overcome the limitations of the previous ML models. That is why we proceed with all these six ML-based IDS to carry out our study in this paper.
72
+
73
+ # 2.2. Energy consumption in IDS
74
+
75
+ Only a tiny amount of research has been done so far to determine the energy consumption in IDS. Among them, Tekin et al. [12] investigated the topic of IDS in the context of the IoT, with a specific focus on the energy consumption aspect in devices with limitations. The authors assessed various ML paradigms in the context of cloud computing, edge computing, and IoT devices. They specifically emphasize the promising capabilities of TinyML for microcontroller units (MCUs). DT algorithm demonstrates in terms of training, inference, and power efficiency. Although Naive Bayes (NB) has superior training speed, it exhibits a minor accuracy trade-off requirements of the KNN algorithm increase proportionally with the quantity of the dataset, hence diminishing its suitability for deployment in IoT systems. Both DT and RF exhibit low power consumption and high accuracy. However, it is essential to consider that RF's longer execution time represents a trade-off. The research findings also elucidate the advantages and constraints of cloud-based ML, underscoring the significance of algorithm choice in practical implementations.
76
+
77
+ Nimmy et al. [27] utilize the energy consumption patterns of IoT devices to identify irregularities in smart home environments. They developed a prototype of a smart camera based on Raspberry Pi to gather power traces during regular operations and simulated DDoS attacks. This approach emphasizes the importance of energy consumption as a crucial indicator of aberrant behaviors. The deep feedforward neural network used in their study demon- strates exceptional performance in identifying anomalies, as evidenced by rigorous evaluations of ML models. This indicates its potential to enhance the security of smart homes significantly.
78
+
79
+ # 2.3. IoT Intrusion Detection in SDN
80
+
81
+ Chaganti et al. [28] present a sophisticated IDS for IoT networks. This system leverages SDN and specifically emphasizes the utilization of DL techniques. The research is for its utilization of LSTM networks, a Recurrent Neural Network (RNN) type renowned for its efficacy in handling time series data, which is critical in detecting network threats. The authors' principal contribution is utilizing an LSTM model, which they employ to discern network attacks. To evaluate the efficacy of their approach, the authors conduct a comparative analysis with alternative architectures(e.g., SVM). The experimental findings present solid evidence that highlights the improved efficacy of the LSTM model in accurately categorizing various network attacks. The LSTM model demonstrated exceptional accuracy and efficiency in detecting attack patterns, surpassing conventional ML models in precision and recall metrics.
82
+
83
+ M. M. Isa et al. [29] present the DAERF model in their research, an innovative IDS for SDN. This model combines
84
+
85
+ a Deep Autoencoder (DAE) with an RF algorithm, creating a unique approach. The DAE excels in feature extraction and data dimensionality reduction. At the same time, the RF approach, known for using an ensemble of DTs, shows significant accuracy and robustness in classification tasks. The DAERF model was evaluated in a simulated SDN using commonly used datasets, demonstrating a high efficacy level. The integration of DL and ML in the DAERF model represents a novel approach that effectively identifies and categorizes network intrusions, enhancing the security of SDN systems and ensuring their capability to handle real-time applications with scalability and adaptability.
86
+
87
+ Phan The Duy et al. [30] presented 'FoolYE,' an innovative IDS designed specifically for SDN systems. The system combines cyber deception techniques with Moving Target Defense (MTD) methodologies. The core of this methodology lies in its ability to create a dynamic and misleading network environment, making it challenging for malicious actors to identify and exploit genuine resources. A key innovation is deep transfer learning-based IDS, which employs advanced DL models (e.g., ResNet50 and DenseNet161), originally designed for image recognition. These models have been adapted using deep transfer learning techniques to analyze network traffic for ML-based IDS, demonstrating the versatility and efficacy of DL in cybersecurity. The study involved experiments in simulated SDN systems, where the performance of the IDS was thoroughly examined, showing its high capability in accurately detecting a wide range of network intrusions.
88
+
89
+ Despite advancements in ML-based IDS for IoT, a significant gap remains in understanding their real-time computational impact, especially in energy consumption, CPU load, and CPU usage at the edge gateway. This gap is further compounded by the lack of empirical studies evaluating the effectiveness and efficiency of ML-based IDS in real-world, resource-constrained edge gateway, especially when integrated with SDN during cyber threats. To address these shortcomings, our study provides a comprehensive empirical analysis of ML-based IDS, focusing on their performance trade-offs in SDN-enabled and non-SDN edge gateways. Specifically, we assess how different ML-based IDS models impact system resources under real-time cyber threats, offering critical insights into their feasibility for deployment in IoT networks.
90
+
91
+ # 3. Background
92
+
93
+ This section dives into the underlying premise of the research's baselines.
94
+
95
+ Decision Tree (DT): In the field of IDS, DT is a key ML method for analyzing network data. They use trees, e.g., models, to break down network features into binary decisions, evaluating network attributes at each node to identify effective splits. This creates a rule-based hierarchy that excels at spotting differences between normal and suspicious network activities. DTs are valued for their clarity and ease of interpretation, playing a vital in improving cybersecurity by identifying unusual or unauthorized actions
96
+
97
+ [31] [32].
98
+
99
+ Random Forest (RF): The algorithm is highly valued in IDS for its precision in classifying network data. Utilizing RF, an ML algorithm, it creates a group of DT to assess various network attributes, effectively distinguishing between normal and malicious activities. RF excels in managing large datasets, balancing IDS data disparities, and minimizing overfitting, making IoT and network security crucial. It achieves accurate detection of unusual network behaviors [33] [34].
100
+
101
+ K-Nearest Neighbor (KNN): The KNN algorithm is a key IDS tool known for its effective similarity-based classification. It compares network traffic with existing labeled data using distance metrics to classify new instances, with 'k' indicating the number of neighbors considered. This method is crucial for identifying normal versus abnormal network activities, offering a simple yet versatile solution for real-time IDS. KNN excels in both binary and multiclass problems, providing quick, reliable categorizations crucial for responding to threats in dynamic networks [35] [36] [37]. Long short-term memory (LSTM): LSTM networks, a type of recurrent neural network, are highly effective in analyzing sequential data for IDS. Their unique memory cells excel at identifying complex patterns in network traffic, making them adept at spotting advanced threats that traditional methods may miss. LSTMs are especially valuable for maintaining context over data sequences, which is crucial for distinguishing between normal and malicious network activities. Their application in IDS significantly boosts cybersecurity, especially in dynamic and IoT environments, by adapting to new threats and efficiently handling varying data lengths, offering a robust solution to modern cybersecurity challenges [38] [39].
102
+
103
+ Convolutional Neural Network(CNN): CNNs provide a resilient DL methodology for IDS. CNNs are widely recognized for their ability to independently acquire hierarchical features from network traffic. This is achieved through convolutional, pooling, and fully connected layers, which enable the discernment of spatial patterns in the traffic data. This capacity facilitates the recognition of both well-established and new threats. CNN in IDS is considered crucial in enhancing cybersecurity defenses against a wide range of cyber threats due to their capacity to scale effectively and efficiently handle real-time data [40] [41].
104
+
105
+ Hybrid model of LSTM and CNN: The integration of LSTM and CNN models into IDS significantly boosts network security by combining the spatial analysis capabilities of CNNs with the temporal pattern recognition of LSTMs. This hybrid approach detects complex cyber threats by analyzing network traffic data in both spatial and temporal dimensions. CNNs effectively identify security breaches through local pattern recognition, while LSTMs track the sequence of network events over time, offering a detailed understanding of potential threats. This fusion results in more accurate and efficient detection of sophisticated, multistage attacks, reducing false positives and adapting to new threats, thereby enhancing overall anomaly detection and
106
+
107
+ maintaining network integrity without excessive alerts [42] [43].
108
+
109
+ EIDM: The EIDM is a cutting-edge IDS approach expertly handling a wide range of network events. Its design combines convolutional and dense layers to tackle the challenges of class diversity and data imbalance. The model begins with a 120-node dense layer, followed by an 80-neuron convolutional layer with a kernel size of 20 to better distinguish between similar network activities. It also features a Maxpooling layer for enhanced feature extraction and a dropout layer to avoid overfitting. EIDM can classify 15 network behaviors through six dense layers, using 'relu' activation and SGD and Adam optimizers for optimal accuracy and efficiency. According to [20], EIDM's unique structure and optimization techniques make it a standout solution for improving network IDS.
110
+
111
+ # 4. Study design
112
+
113
+ This section describes our methodology to evaluate the impact of specific ML-based IDSs using selected performance metrics. We first mention our Research Questions (RQs), followed by an explanation of the experimental design and the metrics used to evaluate the impact of the ML-based IDS.
114
+
115
+ # 4.1. Research questions(RQs)
116
+
117
+ Our research aims to address the following RQs:
118
+
119
+ - RQ1: How do ML-based IDSs impact CPU usage, CPU load, and energy consumption at the edge gateway without SDN during real-time cyber threats?
120
+
121
+ This RQ examines the impact of ML-based IDSs on crucial performance metrics, specifically CPU usage, CPU load, and energy consumption, at edge gateway not integrated with SDN. It focuses on analyzing the performance of seven state-of-the-art ML-based IDSs and their impacts on these key metrics in the face of diverse cyber threats.
122
+
123
+ - RQ2: What are the differences in CPU usage, CPU load, and energy consumption impacts of ML-based IDS at the edge gateway with SDN integration during real-time cyber threats?
124
+
125
+ This RQ explores how ML-based IDSs influence CPU usage, CPU load, and energy consumption at the edge gateway integrated with SDN. It involves analyzing the impacts of various ML-based IDSs on these essential performance metrics under various cyber threats.
126
+
127
+ # 4.2. DataSet
128
+
129
+ In our study, we used the CICIDS2017 data set [44], a highly regarded resource organized by the Canadian Institute for Cybersecurity. This dataset is recognized as one of the gold standards in cybersecurity research, capturing a broad spectrum of benign network activities and the latest cyberattacks [45]. CICIDS2017 is designed to simulate
130
+
131
+ Table 1 Distribution of labeled IoT-SDN attacks in the dataset
132
+
133
+ <table><tr><td>IoT Attack Labels</td><td>No of labeled entries</td></tr><tr><td>BENIGN</td><td>2271320</td></tr><tr><td>DoS Hulk</td><td>230124</td></tr><tr><td>Port Scan</td><td>158804</td></tr><tr><td>DDoS</td><td>128025</td></tr><tr><td>DoS GoldenEye</td><td>10293</td></tr><tr><td>FTP-Patator</td><td>7935</td></tr><tr><td>SSH-Patator</td><td>5897</td></tr><tr><td>DoS slowloris</td><td>5796</td></tr><tr><td>DoS Slowhtttest</td><td>5499</td></tr><tr><td>Bot</td><td>1956</td></tr><tr><td>Web Attack &amp; Brute Force</td><td>1507</td></tr><tr><td>Web Attack &amp; XSS</td><td>652</td></tr><tr><td>Infiltration</td><td>36</td></tr><tr><td>Web Attack &amp; SQL Injection</td><td>21</td></tr><tr><td>Heartbleed</td><td>11</td></tr></table>
134
+
135
+ real-world network environments, making it an essential resource for researchers to test and validate advanced IDS thoroughly. The breadth and diversity of the asset highlight its importance, making it necessary for those aiming to strengthen network security paradigms.
136
+
137
+ # 4.3. The ML-based IDS
138
+
139
+ Numerous ML-based IDS have been developed by researchers [12] [22] [25] [46]. However, we had a significant challenge in reviewing these publications and selecting some for our study. Most did not make their solutions' applications or source code publicly available. This lack of transparency hinders the ability to experiment with these works in real IoT devices. This omission complicates, and may even prevent, the objective comparison of the proposed solutions. Consequently, to initiate our study, it became necessary to independently implement all ML-based IDS that have been previously utilized, except the ML-based IDS proposed by [20], which shared their code ML-based IDS available to researchers. In this section, we explore the implementation process of seven ML-based IDSs that we have developed: DT, KNN, RF, LSTM, CNN, and a hybrid model of LSTM and CNN. Table 3 presents a comparative analysis of the performance metrics of ML-based IDS.
140
+
141
+ # 4.3.1. DT, KNN, RF
142
+
143
+ We have developed and deployed DT-based IDS, RF-based IDS, and KNN-based IDS [47], each specifically designed to improve security policy. The foundation of these models is a preprocessing technique applied to the selected CICIDS 2017 dataset. The dataset features various simulated cyber-attack scenarios alongside standard traffic data. It encompasses multiple numerical attributes, including but not limited to packet sizes, flow durations, and bytes per flow, which are critical for analyzing network behavior and detecting anomalies. We applied min-max normalization as our initial preprocessing step to ensure uniformity across these diverse numerical attributes and
144
+
145
+ Table 6 Comparison of structure and accuracy of different Neural Network models in IDS for IoT-SDN network
146
+
147
+ <table><tr><td>Dataset</td><td>CICIDS2017</td><td>CICIDS2017</td><td>CICIDS2017</td><td>CICIDS2017</td></tr><tr><td>Categories</td><td>15</td><td>15</td><td>15</td><td>15</td></tr><tr><td>Model</td><td>LSTM</td><td>LSTM+CNN</td><td>CNN</td><td>EIDM</td></tr><tr><td>Layers</td><td>10</td><td>11</td><td>8</td><td>12</td></tr><tr><td>Parameters</td><td>56386</td><td>12795</td><td>3497</td><td>48735</td></tr><tr><td rowspan="10">Structure details</td><td>Dense (64)</td><td>Dense (64)</td><td></td><td>Dense (120)</td></tr><tr><td>Dense (128)</td><td>Conv1D (64, 10)</td><td>Conv1D (16,30)</td><td>Conv1D(80,20)</td></tr><tr><td>LSTM (128)</td><td>Conv1D (64, 10)</td><td>Conv1D (16,30)</td><td>MaxPooling1D (2)</td></tr><tr><td>LSTM (256)</td><td>MaxPooling1D (2)</td><td>MaxPooling1D (2)</td><td>Dense (120)</td></tr><tr><td>Dense (128)</td><td>LSTM (128)</td><td>Flatten()</td><td>Dense (100)</td></tr><tr><td>Dense (48)</td><td>LSTM (64)</td><td>Dense (32)</td><td>Dense (80)</td></tr><tr><td>Dense (15)</td><td>Dense (64)</td><td>Dense (15)</td><td>Dense (60)</td></tr><tr><td></td><td>Dense (15)</td><td></td><td>Dense (60)</td></tr><tr><td></td><td></td><td></td><td>Dense (40)</td></tr><tr><td></td><td></td><td></td><td>Dense (15)</td></tr><tr><td>Training Accuracy (%)</td><td>97.72%</td><td>98.77%</td><td>97.92%</td><td>99.57%</td></tr><tr><td>Testing Accuracy (%)</td><td>93.86%</td><td>95.75%</td><td>94.74%</td><td>99.56%</td></tr></table>
148
+
149
+ mitigate scale discrepancies. Missing values were imputed to preserve the integrity of the data. The LabelEncoder[48] was utilized to convert labels into a format suitable for ML techniques. An essential aspect of our methodology is to divide the selected dataset into training and testing subsets. For the first RQ, we adopted $80\%$ training and $20\%$ testing, aligning with standard practices in ML model development. This adjustment was made to accommodate the different requirements of each research phase. As shown in Table 1, the dataset has five classes (Benign, DDoS, DoS, Brute force, and Port scan) with significantly more entries than the remaining ten classes, which contain fewer samples. SMOTE [49] with auto-sampling was employed to address the class imbalance issue in the dataset. This technique effectively augmented the representation of underrepresented classes, leading to a more balanced dataset for training purposes.
150
+
151
+ # 4.3.2. CNN
152
+
153
+ In our research, we deployed a CNN-based IDS tailored for our experimental testbed. The configuration details of the CNN model, including its layers, parameters, and architecture specifics, are outlined in Table 2.
154
+
155
+ # 4.3.3. LSTM
156
+
157
+ In our investigation, we implemented an LSTM-based IDS specifically for our testbeds. The detailed architecture and parameters of the LSTM model, crucial for its operation in our IDS, are thoroughly presented in Table 2.
158
+
159
+ # 4.3.4. Hybrid model of LSTM and CNN
160
+
161
+ In our exploration, we implemented a hybrid LSTM and CNN architectures model to create an advanced IDS tailored to our experimental setup. This architecture has already been tested in various scenarios [50][51][43]. The intricate configuration of this hybrid LSTM and CNN model, which leverages the strengths of both LSTM and CNN to enhance
162
+
163
+ detection capabilities, is detailed in Table 2.
164
+
165
+ The goal of using the hybridization of LSTM and CNN is twofold. First, CNN can drop the non-impactful features and select only the impactful ones (feature engineering). At the same time, it helps to learn the features in a Spatial Hierarchical manner [52]. Second, from our dataset, we got 77 features. As it is unknown which features are impactful from the given features, we applied a 2 1-dimensional CNN layer followed by a max-pooling layer to find the impactful features by learning the 10 nearby features together (kernel size 10). This helps us to create new feature representations where the impactful ones are sustained. Later, we fed these newly derived features directly to 2 LSTM layers. This step helps to learn the spatial and temporal features from CNN, resulting in feature representations presented in context and awarded. Finally, we applied 2 Dense layers to regress the feature representations generated from previous CNN and LSTM layers into 15 classes. This process helps us learn the input features more deeply and increase the classification accuracy.
166
+
167
+ # 4.4. Experimental Design
168
+
169
+ To address RQ1, we designed a testbed incorporating two Raspberry Pi 4 Model B units as edge gateways. Each unit is equipped with 8GB of RAM and a 1.5GHz 64-bit quad-core CPU, providing a realistic environment for evaluating the computational impact of ML-based IDS at the edge gateway. Our study evaluates the performance of seven ML-based IDS models: DT, KNN, RF, LSTM, CNN, EIDM, and a hybrid of LSTM and CNN model, selected for their established effectiveness in cybersecurity. We conducted controlled experiments in IoT-edge networks to assess these IDS models, simulating a range of cyber threats(e.g., BENIGN, DDoS, DoS, Brute force attacks, and the Port scan) using Kali Linux [53]. These experiments
170
+
171
+ Table 3 Performance Comparison of ML-based IDS
172
+
173
+ <table><tr><td></td><td>DT</td><td>KNN</td><td>RF</td><td>LSTM</td><td>LSTM+CNN</td><td>CNN</td></tr><tr><td>Accuracy</td><td>0.9985</td><td>0.9967</td><td>0.9981</td><td>0.9386</td><td>0.9575</td><td>0.9474</td></tr><tr><td>Precision</td><td>0.9985</td><td>0.9966</td><td>0.9980</td><td>0.9771</td><td>0.9877</td><td>0.9792</td></tr><tr><td>Recall</td><td>0.9985</td><td>0.9967</td><td>0.9981</td><td>0.9524</td><td>0.9645</td><td>0.9611</td></tr><tr><td>F1-Score</td><td>0.9985</td><td>0.9966</td><td>0.9980</td><td>0.9646</td><td>0.9760</td><td>0.9701</td></tr></table>
174
+
175
+ ![](images/0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg)
176
+ Figure 1: IoT-edge testbed topology, illustrating non-SDN and SDN-enabled setups.
177
+
178
+ enabled us to analyze the IDS models' impact on critical performance metrics, specifically CPU usage, CPU load, and energy consumption.
179
+
180
+ To address RQ2, we extended our testbed by integrating the edge gateway with the Ryu controller, establishing an SDN-based environment. Ryu, an open-source Python-based SDN controller [54], provides centralized traffic management, enhancing resource allocation and security analysis. We further utilized Mininet [55] to simulate a realistic SDN infrastructure consisting of eighteen hosts, six switches, and a Ryu controller, mirroring real-world network conditions.
181
+
182
+ # 4.5. Metrics
183
+
184
+ We evaluated CPU usage, CPU load, and energy consumption in our test beds in the context of ML-based IDS during cyber threat scenarios. We employed the ANOVA[56] to ensure an objective assessment of the performance of various ML-based IDS.
185
+
186
+ # 4.5.1. CPU Load CPU Usage
187
+
188
+ IDS, especially at the edge and SDN environments. CPU usage measures the percentage of the CPU's current capacity, reflecting how much processing power is dedicated to task execution. High CPU usage in an IDS can signal extensive computational demands, potentially impacting the performance of other tasks and system responsiveness, a concern in resource-limited IoT settings. Efficient IDS, especially those utilizing ML techniques, must manage CPU
189
+
190
+ usage carefully to balance detection accuracy with minimal resource use. Excessive CPU usage can slow IDS's real-time network traffic processing, leading to delays or missed attack detection. On the other hand, CPU load indicates the number of processes waiting to be executed, providing an understanding of the CPU's workload. An increase in CPU load might suggest heavy network traffic or numerous attack attempts, highlighting the risk of system overload. Monitoring CPU load allows for early identification of potential bottlenecks, ensuring that IDS operations do not adversely impact system performance. In SDN-enabled IoT edge systems, adept CPU load management is vital to distribute tasks between IDS and other network efficient functions, ensuring optimal resource allocation and system performance. Both CPU usage and load are pivotal metrics for assessing IDS efficacy in environments where resources are constrained, e.g., at the edge gateway[57][58][59].
191
+
192
+ # 4.5.2. CPU Performance Metrics
193
+
194
+ To assess the computational impact of ML-based IDS, we analyze both CPU load and CPU usage, as these metrics provide complementary insights into system performance. CPU usage is typically expressed as a percentage, indicating the proportion of processing power utilized at a given moment. In contrast, CPU load is presented as a numerical value, representing the average number of active processes waiting for CPU execution over a specific time interval. Moreover,
195
+
196
+ while CPU load can be converted into a percentage, it provides a more detailed view of system stress, especially in multi-core environments. In a multi-core processor, a load value of 1.0 on a single-core system indicates full utilization. In contrast, on a quad-core system, a load of 1.0 suggests that only $25\%$ of the total available processing capacity is used. This distinction is crucial when interpreting our results, as high CPU load does not always imply that the system is at risk of overutilization—it depends on the number of available processing cores and the workload distribution.
197
+
198
+ # 4.5.3. Energy Consumption
199
+
200
+ Energy consumption, often measured in watt-hours or joules, quantifies the amount of energy a device or system expended during its operation. In IoT hardware, where many devices are battery-powered or operate in energy-constrained environments, efficient energy consumption is desirable and necessary. Devices (e.g., sensors, actuators) and even more complex IoT nodes must be designed to perform their tasks while consuming minimal energy, ensuring longevity, and reducing the need for frequent battery replacements or recharges. Moreover, IoT devices integrated with SDN bring a new dimension to the energy conversation; SDN centralizes network control, dynamically optimizing network resources based on real-time demands. Although this centralization offers enhanced flexibility and scalability, it also means that the network's core components must be energy efficient. In IoT systems, where potentially thousands or even millions of devices communicate and exchange data, even minor inefficiencies in energy consumption can accumulate, leading to significant energy drains. Integrating ML-based IDS into the edge gateway emphasizes the need to consider energy metrics critically. ML-based IDS are inherently data-intensive, requiring substantial computational resources to process large datasets for detecting and mitigating security threats. Although these systems offer invaluable security enhancements, their operation can be energy-intensive. Therefore, measuring and optimizing the energy consumption of ML-based IDS is crucial to ensure they deliver effective security measures without unduly burdening the system's energy resources. This balance is essential for maintaining the sustainability and efficiency of the edge gateway, where energy efficiency is often a key concern.
201
+
202
+ We employed PowerTop [60], a robust tool, to precisely gauge and examine the energy consumption in two separate testbed configurations: the edge gateway integrated with SDN and without SDN. PowerTop's sophisticated monitoring capabilities allowed us to gain insights into these testbeds' energy consumption patterns and processor activity.
203
+
204
+ # 4.5.4. Designed cyber threats
205
+
206
+ For our research, we focused on analyzing DDoS, DoS, brute force attacks, and the port scan. We chose these specific types of attacks since they were already categorized in the employed dataset. These cyber threats are prevalent and
207
+
208
+ pose substantial risks in the field of cybersecurity. Below, a concise summary of each is presented:
209
+
210
+ - A Denial-of-Service (DoS): At the edge, DoS attacks are critical cybersecurity threats that disrupt device and service operations by flooding systems with excessive requests and consuming vital resources (e.g., bandwidth, processing power, and memory). This overload prevents the system from serving legitimate users, blocking access to essential operations. The distributed, resource-constrained nature of the edge makes them especially susceptible to DoS attacks. The vulnerability of these devices, coupled with their interconnectedness, means that an attack on a single device can significantly compromise the entire network's functionality and security [61].
211
+ - A distributed denial-of-service (DDoS): A DDoS attack is a coordinated effort where multiple attackers from different locations flood a specific target, such as a server or network at the edge, with excessive traffic. The goal is to deplete the target's resources, causing severe service disruptions or a complete shutdown. Unlike traditional DoS attacks, which come from a single source, DDoS attacks are distributed across numerous sources, making them harder to defend against. This distributed nature makes DDoS attacks especially dangerous at the edge, where the interconnected and resource-constrained devices can exacerbate the attack's impact, potentially crippling the entire network [62].
212
+ - Brute Force: A brute force attack involves an attacker systematically attempting to gain unauthorized access to a system by trying every possible combination, such as trying every key until one works. With its many interconnected devices and varying security levels, the edge is especially vulnerable to such attacks. Attackers exploit these weaknesses by repeatedly guessing passwords, encryption keys, or access codes, which seriously threatens the integrity and confidentiality of data at the edge gateway[63].
213
+ - Port Scan:A port scan aims to identify a target system's open ports. By identifying open ports and the services running on them at the edge, attackers can uncover and exploit vulnerabilities, posing a serious threat to the security and integrity of the edge gateway[64].
214
+
215
+ # 4.5.5. Analysis method for energy consumption, CPU usage, CPU load
216
+
217
+ We used ANOVA to assess our observed results. ANOVA is an indispensable statistical tool for testing the null hypothesis that posits the equivalence of group means. Our study specifically employed one-way ANOVA to examine the impact of a singular independent variable on the evaluated systems. This method relies on several crucial assumptions, including the necessity for the data to exhibit
218
+
219
+ a normal distribution, the variances between groups being equal (homogeneity of variance), and all observations being independent.
220
+
221
+ In addition, we conducted 15 separate tests on ML-based IDS to measure CPU load, CPU usage, and energy consumption under various cyber threats. This rigorous approach allowed us to leverage the F statistic, which quantifies the variance ratio between the means of different groups to the variance in the groups. A significant F-statistic, together with a p-value of $\leq 0.05$ , denotes statistically significant differences between group means, underscoring the efficacy of our testing methodology. By implementing this robust statistical framework, we have thoroughly evaluated the performance of various ML-based IDS models in response to different cyber threats. This analysis has allowed us to identify specific models that demonstrate resilience or efficiency against multiple attacks and require increased computational resources or energy consumption. While CPU load is a key performance metric for IDS evaluation, it is also crucial to consider its impact on IoT device availability and reliability. Excessive CPU consumption by an IDS can degrade the device's primary functions, leading to slow response times or system failures. This is especially critical in real-time applications such as healthcare, industrial automation, and smart home security, where device downtime can have serious consequences. An IDS must enhance security without inadvertently causing an attack such as a DDoS condition due to resource exhaustion. In addition, through these fifteen iterations of testing, ANOVA has enabled us to validate significant differences in IDS performance metrics (e.g., detection accuracy, false positive rates), CPU load, CPU usage, and energy consumption across diverse scenarios. This methodological approach provides a detailed examination of how different IDS models respond to varied threats, establishing a solid statistical foundation for assessing the efficacy of each model in a controlled environment. By distinguishing between performance differences attributable to the models' inherent capabilities and those due to random variation, our use of ANOVA has proven to be critical. It aids in identifying the most resource-efficient and reliable IDS, thereby guiding the selection process for optimal cybersecurity defenses and enhancing our management and understanding of IDS performance under cyber threat conditions [65] [66].
222
+
223
+ # 4.6. TestSuite
224
+
225
+ To initiate the research work presented in this paper and to facilitate the environment for further research and testing, we introduce a versatile test suite designed to experiment with and evaluate ML-based IDS in SDN environments. Unlike conventional experimental testbeds, our test suite is an extensible framework equipped with predefined APIs and a selection of pre-integrated algorithms, facilitating the seamless integration and testing of novel IDS models. Another good contribution to our test suite is that users can execute their experiments on it without Raspberry Pi or any other hardware support. As discussed in the previous
226
+
227
+ paragraph, the test suite is developed following the plug-in architecture feature. This ensures that the user can easily integrate their algorithm into the test suite and test the accuracy, energy consumption, and CPU usage with or without security threats. Users can create their own IoT-SDN network and complexity in the network and generate any number of security breaching attacks. This approach not only simplifies the validation process of IDS models in a realistic network scenario but also encourages the exploration of innovative IDS methodologies by providing a solid foundation of tools and benchmarks. We have made the test suite available with the same configuration discussed in Section 4.4. We integrated the same tools for creating an IoT-SDN network, generating security attacks, and measuring IDS accuracy, energy consumption, CPU usage, etc. Through its design, the test suite aims to advance the development and thorough evaluation of cutting-edge IDS solutions, significantly enhancing network security in the era of SDN.
228
+
229
+ # 5. Experimental Results and Analysis
230
+
231
+ This section discusses our experimental results and findings. After presenting our results, we conducted an in-depth statistical analysis using ANOVA. This analysis aims to illuminate the implications and insights that emerge from the experimental results, providing an understanding of the efficacy and nuances of each IDS under study.
232
+
233
+ # 5.1. Experimental finding for RQ1 CPU Load:
234
+
235
+ We tested ML-based IDSs under various cyberattack scenarios to assess their impact and strain on our testbed. The types of cyberattacks we considered include DDoS, DoS, brute force attacks, and the port scan. Moreover, we conducted the ANOVA focusing on CPU load variations in our testbed. Figure 2 illustrates a comparative analysis of the average CPU load among different ML-based IDS models in the presence of various types of cyberattacks. The DL-based IDS (CNN, LSTM, combined model of LSTM and CNN, and EIDM) consistently maintain lower CPU loads across all attack types, demonstrating their efficiency in resource utilization during inference. In contrast, traditional ML-based IDS such as KNN, DT, and RF exhibit significantly higher CPU loads, especially under brute force and DDoS attacks, with KNN and DT being the most resource-intensive. This is because DL models, such as CNN and LSTM, efficiently handle computations in parallel and are optimized for inference. In contrast, traditional models (e.g., KNN and DT) require more repeated, resource-heavy calculations, such as distance computations in KNN or recursive splitting in DTs, especially under large-scale attacks.
236
+
237
+ # Statistical Findings:
238
+
239
+ We conducted an ANOVA, and the results presented in Table 4 illuminate significant differences in CPU load among diverse ML-based IDS under DDoS, underscored by F-statistic of 60.40 and a p-value $< 0.05$ . This F-statistic delineates
240
+
241
+ ![](images/2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg)
242
+ Evaluating Machine Learning-driven Intrusion Detection System
243
+ Figure 2: The Average CPU load of ML-based IDS under cyber threats.
244
+
245
+ Table 4 ANOVA results: CPU Load for ML-based IDS under DDoS.
246
+
247
+ <table><tr><td>Source</td><td>Degrees of Freedom</td><td>Sum of Squares</td><td>Mean Square</td><td>F Statistic</td><td>P-value</td></tr><tr><td>Between groups</td><td>6</td><td>21609.87</td><td>3601.64</td><td>60.40</td><td>&lt; 0.05</td></tr><tr><td>in groups</td><td>91</td><td>5426.49</td><td>59.63</td><td></td><td></td></tr><tr><td>Total</td><td>97</td><td>27036.36</td><td>278.73</td><td></td><td></td></tr></table>
248
+
249
+ the contrast in CPU load variance across ML-based IDSs against the variance in, highlighting a significant influence of IDS selection on CPU load. The remarkably low p-value corroborates this finding, conclusively demonstrating the substantial differences in CPU load among the IDSs. Furthermore, we observed similar p-values $(< 0.05)$ across other attacks, including brute force, DoS, and the port scan, so we do not report them. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS under different cyber threats.
250
+
251
+ # Finding
252
+
253
+ DL-based IDS, such as CNN, LSTM, and hybrids, perform more efficiently in managing computational demands across diverse types of cyber threats than traditional ML-based IDS, such as KNN, DT, and RF, as they exhibit higher CPU loads at the edge. This pattern suggests that DL-based IDS' intrinsic efficiency is not attack-specific but rooted in their architecture, making them especially suited for real-time applications at edge gateway. These results are expected, as traditional ML-based IDS (e.g., KNN, DT, RF) perform computationally expensive operations during inference, unlike DL-based IDS, which optimizes processing through parallelization and learned feature extraction.
254
+
255
+ # CPU Usage:
256
+
257
+ Figure 3 compares the average CPU usage of various ML-based IDS models under different cyberattacks. The KNN model consistently exhibits the highest CPU usage across all attack types, indicating its high computational demand, which limits its use in resource-constrained environments. The RF and DT models are also CPU-bound, though they are less intensive than KNN. In contrast, the LSTM model demonstrates the lowest CPU usage, making it the most efficient option for scenarios where minimizing resource consumption is critical. The hybrid of the LSTM and CNN model, along with the CNN and EIDM models, offer a balance between inference accuracy and computational efficiency, making them viable choices for environments with moderate resource availability.
258
+
259
+ # Statistical Findings:
260
+
261
+ Table 5 presents our ANOVA results. Our results reveal significant differences in CPU load among diverse ML-based IDS under DDoS, as evidenced by a compelling F-statistic of 60.39 and a p-value $< 0.05$ . This F-statistic highlights the variance in CPU load across IDS groups compared to the variance in, underscoring a significant impact of IDS selection on CPU load. The exceedingly small p-value further supports this conclusion. Moreover, we observed similar p-values (below 0.05) across various cyber threats, such as brute force, DoS, and the port scan, so we do not report those results.
262
+
263
+ ![](images/a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg)
264
+ Evaluating Machine Learning-driven Intrusion Detection System
265
+ Figure 3: The Average CPU usage of ML-based IDS under cyber threats.
266
+
267
+ Table 5 ANOVA results: CPU Usage for ML-based IDS under DDoS.
268
+
269
+ <table><tr><td>Source</td><td>Degrees of Freedom</td><td>Sum of Squares</td><td>Mean Square</td><td>F Statistic</td><td>P-value</td></tr><tr><td>Between groups</td><td>6</td><td>21609.86</td><td>3601.64</td><td>60.39</td><td>&lt; 0.05</td></tr><tr><td>in groups</td><td>91</td><td>5426.49</td><td>59.62</td><td></td><td></td></tr><tr><td>Total</td><td>97</td><td>27036.36</td><td>278.73</td><td></td><td></td></tr></table>
270
+
271
+ # Finding
272
+
273
+ Our analysis reveals that traditional ML-based IDS such as KNN, DT, and RF exhibit increased CPU usage under various cyber threats, thus posing challenges for the edge. Also, LSTM and other DL-based IDS exhibit lower CPU demands. This consistent efficiency across various attacks highlights the benefit of adopting DL-based IDS at the edge gateway. The increased CPU usage of KNN, DT, and RF reflects their reliance on instance-based and tree-splitting operations, which require repeated evaluations. In contrast, DL models efficiently process data in structured layers, reducing computational strain.
274
+
275
+ # Energy consumption:
276
+
277
+ Figure 4 shows that the LSTM and DT models are the most energy-efficient across different types of cyberattacks, consistently exhibiting the lowest energy consumption. The CNN model also performs efficiently, with slightly higher energy usage. The LSTM, CNN model hybrid, and EIDM have moderate energy consumption, balancing complexity and efficiency. In contrast, the KNN model has the highest energy consumption across all scenarios, making it less suitable for energy-constrained environments. The RF model falls in between, with moderate energy demands.
278
+
279
+ # Statistical Findings:
280
+
281
+ We conducted the ANOVA, and the results presented in Table 6 reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by F-statistic of 57.44 and a p-value of $< 0.05$ . This F-statistic delineates the contrast in energy consumption variance across the group of IDSs against the variance in, highlighting a significant influence of IDS selection on energy consumption. The extremely low p-value further supports this conclusion, conclusively demonstrating the substantial differences in energy consumption among the IDSs. In addition, we observed similar p-values ( $< 0.05$ ) for other cyber threats, such as brute force, DoS, and the port scan, so we do not report the results. This observation demonstrates significant differences in energy consumed among various ML-based IDS when faced with differing cyber threats.
282
+
283
+ ![](images/8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg)
284
+ Figure 4: The Average Energy consumption of ML-based IDS under cyber threats.
285
+
286
+ Table 6 ANOVA results: energy consumption for ML-based IDS under DDoS.
287
+
288
+ <table><tr><td>Source</td><td>Degrees of Freedom</td><td>Sum of Squares</td><td>Mean Square</td><td>F Statistic</td><td>P-value</td></tr><tr><td>Between groups</td><td>6</td><td>47732.07</td><td>7955.34</td><td>57.44</td><td>&lt; 0.05</td></tr><tr><td>in groups</td><td>98</td><td>13571.72</td><td>138.48</td><td></td><td></td></tr><tr><td>Total</td><td>104</td><td>61303.80</td><td>589.45</td><td></td><td></td></tr></table>
289
+
290
+ # Finding
291
+
292
+ Our analysis concludes a marked discrepancy in energy consumption, with traditional ML-based IDS such as KNN, RF, and DT exhibiting significantly higher energy consumption under cyber threats such as DDoS and brute force, a drawback for energy-constrained at the edge. In contrast, DL-based IDS models, LSTM, CNN, EIDM, and their hybrids excel in energy efficiency, making them the preferable choice for the edge. Traditional ML models' higher energy consumption results from their iterative computations and lack of optimized inference paths, making them less viable for real-time IoT applications where power efficiency is crucial.
293
+
294
+ # 5.2. Experimental finding for RQ2
295
+
296
+ This section presents our experimental results for IoT-edge devices with SDN integration during real-time cyber threats.
297
+
298
+ # CPU Load:
299
+
300
+ In Figure 5, we illustrate the CPU load of various ML-based IDS models under different cyberattacks in an SDN-enabled at the edge gateway. The analysis shows that KNN and DT models have the highest CPU load, especially during DDoS and DoS, indicating significant resource demands at
301
+
302
+ the edge. Conversely, the LSTM model demonstrates the lowest CPU load, highlighting its efficiency in resource management. The CNN model also performs efficiently but not as well as LSTM. The LSTM and CNN model hybrid, similar to EIDM, offers balanced performance, making them suitable for scenarios where moderate CPU efficiency is required at the edge.
303
+
304
+ # Statistical Findings:
305
+
306
+ We conducted an ANOVA for the case of the DDoS attack, and the results are presented in Table 7. The results reveal significant differences in CPU load among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 142.57 and a p-value of $< 0.05$ . This F-statistic highlights the variance in CPU load across IDSs compared to the variance in them, indicating a significant impact of IDS selection on CPU load. In addition, consistent p-values $(< 0.05)$ were observed across other cyber threats, including brute force, DoS, and the port scan, and we do not report the result. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS when subjected to different cyber threats.
307
+
308
+ ![](images/a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg)
309
+ Evaluating Machine Learning-driven Intrusion Detection System
310
+ Figure 5: The Average CPU load of ML-based IDS under cyber threats.
311
+
312
+ Table 7 ANOVA results: CPU load for ML-based IDS in SDN under DDoS.
313
+
314
+ <table><tr><td>Source</td><td>Degrees of Freedom</td><td>Sum of Squares</td><td>Mean Square</td><td>F Statistic</td><td>P-value</td></tr><tr><td>Between groups</td><td>6</td><td>1184.21</td><td>197.36</td><td>142.57</td><td>&lt; 0.05</td></tr><tr><td>in groups</td><td>91</td><td>125.97</td><td>1.38</td><td></td><td></td></tr><tr><td>Total</td><td>97</td><td>1310.18</td><td>13.50</td><td></td><td></td></tr></table>
315
+
316
+ # Finding
317
+
318
+ The findings demonstrate that traditional ML-based IDS, e.g., DT, exhibit elevated loads under DDoS and DoS. In contrast, DL-based IDSs, including EIDM, LSTM, CNN, and their hybrids, demonstrate superior energy efficiency, making them suitable for SDN-enabled at the edge gateway. The integration of SDN helps balance network resource allocation. Yet, traditional ML-based IDS still exhibit higher CPU load due to their design, reinforcing the efficiency advantage of DL-based models in dynamic network environments.
319
+
320
+ # CPU Usage:
321
+
322
+ Figure 6 shows that CPU usage across various ML-based IDS models in an SDN-enabled edge gateway is fairly consistent across different attack scenarios. Only minor variations are observed, as CNN, LSTM, and hybrid versions demonstrate relatively lower CPU usage, indicating efficient resource management. The DT, KNN, and RF models also show consistent CPU usage across attacks. The EIDM model balances efficiency and performance well.
323
+
324
+ # Statistical Findings:
325
+
326
+ We conducted an ANOVA for the results we got for ML-based IDS in SDN under the DDoS attack. The results presented in Table 8 reveal significant differences in CPU
327
+
328
+ usage among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 5.94 and a p-value of $< 0.05$ . This F-statistic highlights the variance in CPU usage across the group of IDSs compared to the variance in, indicating a significant impact of IDS selection on CPU usage. In addition, we observed a consistently low p-value $(< 0.05)$ for other examined cyber threats (not reported in the paper), including brute force, DoS, and port scan, reinforcing the presence of marked differences in CPU usage among diverse ML-based IDS when subjected to different cyber threats.
329
+
330
+ ![](images/3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg)
331
+ Evaluating Machine Learning-driven Intrusion Detection System
332
+ Figure 6: The Average CPU usage of ML-based IDS under cyber threats.
333
+
334
+ Table 8 ANOVA results: CPU usage for ML-based IDS in SDN under DDoS.
335
+
336
+ <table><tr><td>Source</td><td>Degrees of Freedom</td><td>Sum of Squares</td><td>Mean Square</td><td>F Statistic</td><td>P-value</td></tr><tr><td>Between groups</td><td>6</td><td>27.97</td><td>4.66</td><td>5.94</td><td>&lt; 0.05</td></tr><tr><td>in groups</td><td>91</td><td>71.32</td><td>0.78</td><td></td><td></td></tr><tr><td>Total</td><td>97</td><td>99.30</td><td>1.02</td><td></td><td></td></tr></table>
337
+
338
+ # Finding
339
+
340
+ In the context of SDN-enhanced IoT, deploying DL-based IDS with advanced models such as CNN, LSTM, EIDM, and their hybrids demonstrates efficient energy consumption. These models achieve reduced CPU usage against brute force and port scan, benefiting from the centralized resource optimization afforded by SDN. Nonetheless, the complexity of DDoS and DoS presents a significant challenge, necessitating increased computational resources. Although SDN optimizes network operations, IDS models such as KNN and RF remain resource-intensive due to their frequent computational overhead. At the same time, DL-based IDS maintains efficiency through batch processing and learned representations.
341
+
342
+ # Energy consumption:
343
+
344
+ Figure 7 depicts the average energy consumption of ML-based IDS models under different attacks in an SDN environment. The results indicate that traditional ML models consume more energy, especially during port scans, e.g., DT, KNN, and RF. In contrast, the EIDM model consistently shows lower energy consumption across all attack types, highlighting its efficiency. The LSTM and CNN models display moderate energy usage, including their hybrid
345
+
346
+ version. Compared to non-SDN environments, the increased energy consumption in the SDN setup is attributed to the SDN controller's active role in traffic management and threat response, which demands more energy resources.
347
+
348
+ # Statistical Findings:
349
+
350
+ We applied ANOVA on energy consumption data across ML-based IDSs in SDN under DDoS. The results, presented in Table 9, reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by an impressive F-statistic of 18.27 and a p-value of $< 0.05$ . This F-statistic highlights the variance in energy consumption across a group of IDSs compared to the variance in, indicating a significant impact of IDS selection on energy consumption. Moreover, a consistently low p-value ( $< 0.05$ ) was observed across other cyber threats, including brute force, DoS, and port scan, so we do not report the results here. This highlights marked differences in CPU usage among diverse ML-based IDS when subjected to examined cyber threats.
351
+
352
+ ![](images/76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg)
353
+ Evaluating Machine Learning-driven Intrusion Detection System
354
+ Figure 7: The Average Energy consumption of ML-based IDS under cyber threats.
355
+
356
+ Table 9 ANOVA results: Energy consumption for ML-based IDS in SDN under DDoS.
357
+
358
+ <table><tr><td>Source</td><td>Degrees of Freedom</td><td>Sum of Squares</td><td>Mean Square</td><td>F Statistic</td><td>P-value</td></tr><tr><td>Between groups</td><td>6</td><td>1263.26</td><td>210.54</td><td>18.27</td><td>&lt; 0.05</td></tr><tr><td>in groups</td><td>91</td><td>1048.21</td><td>11.51</td><td></td><td></td></tr><tr><td>Total</td><td>97</td><td>2311.48</td><td>23.82</td><td></td><td></td></tr></table>
359
+
360
+ # Finding
361
+
362
+ The findings accentuate the distinct energy efficiency profiles of ML-based IDSs when exposed to various cyber threat scenarios. During brute force and the port scan, traditional ML-based IDS such as DT, KNN, and RF are observed to have higher energy consumption. This indicates that these models are not energy-efficient under the examined conditions due to their complex computational frameworks. On the other hand, DL-based IDS and the EIDM show markedly superior energy efficiency. The reduced energy footprint of DL-based IDS is especially advantageous in the context of the SDN-enabled at the edge, where low energy consumption is crucial due to device constraints and the need for long-term, autonomous operation. The reduction in energy consumption observed in DL-based IDS when integrated with SDN highlights the benefits of centralized network control and optimized workload distribution, making them a more sustainable choice for IoT security.
363
+
364
+ # 5.3. Analyzing the Impact of SDN on CPU Usage, Load, and Energy Efficiency in ML-Based IDS
365
+
366
+ Figure 8 demonstrates that integrating SDN with ML-based IDS in the edge gateway significantly improves resource efficiency, reducing energy consumption, CPU usage, and CPU load. The most substantial improvement is in CPU usage, where DL-based IDS, e.g., LSTM and CNN, outperform traditional ML models by efficiently handling complex computations through parallel processing. Additionally, SDN integration reduces CPU load by balancing workloads, essential for real-time threat detection in edge gateway. The observed reduction in energy consumption further highlights the approach's suitability for battery-powered edge gateway, confirming its scalability and practicality for real-world applications.
367
+
368
+ # 6. ML-Based IDS vs. Signature-Based IDS (Snort)
369
+
370
+ This section compares our ML-based IDS models and the signature-based Snort IDS to evaluate the performance improvements achieved by leveraging ML-based IDS over traditional detection systems. This comparison is essential to highlight the advantages of ML-based approaches regarding resource efficiency, scalability, and adaptability, especially in edge gateway.
371
+
372
+ ![](images/3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg)
373
+ Evaluating Machine Learning-driven Intrusion Detection System
374
+ Figure 8: Reduction in energy consumption, CPU usage, and CPU load for ML-based IDS models with SDN integration in edge gateway.
375
+
376
+ The results presented in Table 10 provide a comparative analysis of our ML-based IDS models against the signature-based Snort IDS discussed in other research.
377
+
378
+ Regarding CPU usage, Snort IDS shows high utilization under heavy traffic due to its reliance on predefined rules and signature matching. In contrast, the ML-based IDS models demonstrate better CPU efficiency. While traditional ML models, e.g., DT and KNN, have higher CPU usage because of iterative computations, DL-based IDS, e.g., LSTM, CNN, and a hybrid of LSTM and CNN, EIDM exhibits lower CPU usage. This is primarily due to DL-based IDS's ability to process data in batches and leverage parallel processing for real-time threat detection. For energy consumption, Table 10 shows that Snort IDS consumes more energy, especially in IoT networks requiring multiple containers. However, our ML-based IDS models, especially DL architectures, e.g., LSTM and EIDM, demonstrate superior energy efficiency. These models optimize resource usage and process data efficiently, making them suitable for resource-constrained edge gateway and highlighting their scalability advantages. Finally, in terms of CPU load, Table 10 indicates that earlier versions of Snort IDS suffer from high CPU load on a single core because of their single-threaded architecture. Although newer versions introduce multi-threading, they still encounter processing bottlenecks under heavy traffic. Conversely, the ML-based IDS models distribute the CPU load more effectively across multiple cores. DL-based IDS, especially LSTM and hybrid architectures, achieve the lowest CPU load levels due to their parallel execution capabilities and efficient handling of sequential data.
379
+
380
+ # 7. Discussion
381
+
382
+ Our investigations explored the performance metrics of ML-based IDS with various models, especially in IoT-edge devices with and without SDN integration. Our study was primarily evaluating the impact of these models on CPU load, CPU usage, and energy consumption amidst diverse
383
+
384
+ cyberattack scenarios. The empirical findings revealed significant disparities in resource utilization across different ML-based IDS, shedding light on crucial aspects of their deployment in IoT devices integrated with SDN. The KNN, DT, and RF significantly exhibited higher CPU load, CPU usage, and energy consumption, especially under specific types of cyberattacks. While these models are adept at identifying threats, their resource-intensive nature could pose challenges in the IoT context, where computational resources are often limited. This could lead to diminished performance or instability in environments with constrained resources. Specifically, KNN's higher variance in CPU load and energy consumption, as observed in Tables 4 and 5, stems from its lazy learning approach. Unlike other models, KNN does not build a generalized model during training but instead stores the entire dataset and computes distances at query time. This results in increased processing demands, leading to fluctuations in resource utilization. Such behavior makes KNN less suitable for real-time IDS applications in resource-constrained IoT networks[72] [73]. While CPU load significantly impacts energy consumption, it is not the sole factor. Memory operations, network activity, peripheral devices, and thermal management also contribute to power usage in IoT devices. High data transmission rates and active sensors can increase energy demands, while sustained CPU load may trigger additional energy consumption for cooling mechanisms. Although a strong correlation between CPU load and energy consumption is expected, these factors introduce variations across IDS models. Optimizing IDS efficiency can help balance security and resource constraints in IoT networks. Conversely, the CNN and LSTM models demonstrated greater efficiency in resource utilization. While their architectures are sophisticated and adept at processing complex data structures, they appear to optimize the computational load during inference when employed in IDS. This makes them more suitable for scenarios where resource conservation is critical. However, the complexity of these models introduces its own set of challenges, especially
385
+
386
+ Table 17 Comparative Resource Utilization of ML-Based IDS and Snort IDS Based
387
+
388
+ <table><tr><td>Metric</td><td>Snort IDS</td><td>ML-Based IDS (Our Findings)</td></tr><tr><td>CPU Usage</td><td>- High Traffic Conditions: CPU usage can reach its maximum during initialization with many active rules [67].
389
+ - Multi-Core Systems: Snort 3.0 utilizes a significant portion of CPU resources on a multi-core processor [68] [69].</td><td>- Traditional ML Models (DT, KNN, RF): Tend to exhibit higher CPU usage during real-time cyber threats, especially those requiring intensive computations.
390
+ - DL-Based Models (CNN, LSTM, Hybrid of LSTM and CNN and EIDM): Show lower CPU usage compared to traditional ML models, with LSTM models demonstrating the most efficient utilization due to sequential data processing and parallelization.</td></tr><tr><td>Energy Consumption</td><td>- IoT Deployment: Deployment of Snort on IoT gateways results in considerable energy consumption [70].</td><td>- Traditional ML-based IDS: Generally consume more energy during inference cycles due to repetitive computations.
391
+ - DL-Based Models: Exhibit better energy efficiency, especially models that combine convolutional and sequential layers, benefiting from optimized processing structures.</td></tr><tr><td>CPU Load</td><td>- Single-Core Utilization: Older Snort versions (pre-3.0) lead to high load on a single core under heavy traffic [71].
392
+ - Multi-Core Systems: Updated versions distribute the load but still face processing bottlenecks under extensive traffic [71].</td><td>- Traditional ML-based IDS: Often show higher CPU load during complex attack scenarios.
393
+ - DL-Based Models: Maintain a lower CPU load, benefiting from parallel processing capabilities, with hybrid models showing the most balanced load distribution.</td></tr></table>
394
+
395
+ in terms of training and ongoing maintenance in the dynamic landscape of IoT devices integrated with SDN.
396
+
397
+ The balance between detection efficiency and resource consumption is especially critical at edge gateway, where devices often have limited processing power and energy reserves. This balance is closely tied to several United Nations Sustainable Development Goals (SDGs), especially SDG 9 (Industry, Innovation, and Infrastructure), SDG 11 (Sustainable Cities and Communities), and SDG 13 (Climate Action). Optimizing IDS deployment in smart cities strengthens cybersecurity infrastructure, directly supporting SDG 9 while fostering resilient, sustainable urban environments in line with SDG 11. Furthermore, by prioritizing energy-efficient IDS solutions, this research contributes to SDG 13, promoting responsible resource consumption and mitigating the environmental impact of growing IoT networks [74].
398
+
399
+ To aid IoT developers in selecting appropriate IDS solutions, we provide detailed guidelines in Table 11 and Table 12, outlining the performance trade-offs of seven different ML-based IDS models for IoT devices examined in this paper, both with and without SDN integration. These insights enable developers to make informed decisions, ensuring the optimal balance between security and resource efficiency during application development. We use graphical indicators (smiley faces) instead of numerical values to provide an intuitive, high-level comparison of IDS performance. This visual approach simplifies decision-making for IoT developers, aligning with similar methodologies used in prior work [75]. Moreover, all corresponding numerical values
400
+
401
+ related to CPU usage, CPU load, and energy consumption are presented in the Figures and Tables in Section 5.
402
+
403
+ On the other hand, to the best of our knowledge, only Tekin et al. [12] have explored a similar direction in evaluating the performance of ML-based IDS in IoT systems. However, our study takes a fundamentally different approach, especially in how computational resources are classified and utilized, which plays a critical role in the effectiveness and scalability of IoT systems. While Tekin et al. focus on energy consumption and inference times using Raspberry Pi as an IoT device, our study emphasizes the advantages of processing data at the edge, especially regarding energy efficiency, CPU load, and usage. We show how models such as DT and RF benefit from edge processing, reducing latency and improving responsiveness, especially when combined with SDN, which optimizes network traffic and resource allocation. Our findings underscore the importance of balancing computational tasks across the network using SDN to maintain performance, unlike Tekin et al. [12], who do not explore the impact of edge computing or SDN integration.
404
+
405
+ # 8. Threat and validity
406
+
407
+ Empirical research inevitably encounters issues related to the validity of findings. In light of this, the present section seeks to identify and discuss possible threats to our research's validity, per the recommendations of Wohlin et al. [76].
408
+
409
+ Table 11 Guideline for selecting seven ML-based IDS in edge gateway.
410
+
411
+ <table><tr><td>Metric</td><td>DT</td><td>KNN</td><td>RF</td><td>CNN</td><td>LSTM</td><td>CNN</td><td>LSTM+CNN</td><td>EIDM</td></tr><tr><td>CPU load</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>CPU usage</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Energy consumption</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table>
412
+
413
+ Table 12 Guideline for selecting seven ML-based IDS in SDN-edge gateway.
414
+
415
+ <table><tr><td>Metric</td><td>DT</td><td>KNN</td><td>RF</td><td>CNN</td><td>LSTM</td><td>CNN</td><td>LSTM+CNN</td><td>EIDM</td></tr><tr><td>CPU load</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>CPU usage</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Energy consumption</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table>
416
+
417
+ The energy consumption and CPU usage in all ML-based IDS lowered during the brute force attack and port scan.
418
+
419
+ # 8.1. Internal Threats
420
+
421
+ During our empirical study on ML-based IDS in the context of IoT devices with IoT devices integrated with SDN, we recognized the existence of internal obstacles that impact the credibility of our findings. The precision of our performance measures is of utmost importance, namely the measurement of CPU load, CPU usage, and energy consumption in these intricate network settings. The complex characteristics of IoT devices and the adaptable structure of SDN provide significant difficulties in guaranteeing accurate and dependable performance evaluations. To address these concerns, we performed fifteen experiments on our testbeds. To improve the trustworthiness of our results in the context of SDN and IoT, we utilized average values to reduce the impact of network or hardware differences and ambient factors. In addition, the cyber threat simulations were conducted using highly practiced cyber security testing mechanisms in academic research and industries in IoT-edge devices integrated with SDN. This work aims to tackle internal risks associated with the setup and precision of ML-based IDS, improving their usefulness and significance in these fast-advancing technical fields.
422
+
423
+ # 8.2. External Threats:
424
+
425
+ The landscape of network security, especially in IoT-edge devices and IoT-edge devices integrated with SDN realms, is increasingly challenged by external threats. These range from sophisticated cyberattacks such as DoS, DDoS, and brute force attacks to more subtle, yet equally harmful, reconnaissance methods such as a port scan. These threats highlight the urgent need for robust and adaptable IDS solutions. Integrating ML into IDS presents promising advancements in threat detection and mitigation. However, this integration faces challenges due to the complexity of IoT-edge devices, which are marked by numerous interconnected devices, and the dynamic nature of SDN architectures. IDS solutions must be precise in threat detection while also being resource-efficient. Our research evaluates ML-based IDS based on CPU usage, CPU load, and energy consumption, especially under real-time cyber threats. These metrics are
426
+
427
+ vital to ensure that ML-based IDS are effective in protecting networks against external threats and sustainable in their operation. They help maintain a crucial balance between security and performance in the complex ecosystems of IoT devices and IoT devices integrated with SDN. Additionally, to ensure the transparency and reproducibility of our study, we have provided detailed information about the experimental setup and made our testbed and results publicly available for further research [77]. By adopting these measures, we have attempted to provide robust validation and increase the inability to reject our findings among practitioners and researchers.
428
+
429
+ # 9. Conclusion
430
+
431
+ This paper presents a comparative analysis of the ML-based IDS in IoT-edge devices and IoT-edge devices integrated with SDN under different cyberattack scenarios, resulting in comprehension. In IoT systems, conventional ML models (e.g., KNN and DT) often experience increased CPU load and CPU usage, especially when subjected to DoS and DDoS cyber threats. This suggests that these models have limits in resource-limited situations. In contrast, DL-based IDS (e.g., CNN and LSTM) exhibit reduced CPU usage, indicating improved efficiency and compatibility with IoT security. A consistent energy consumption pattern was identified across attack types in both scenarios, encompassing advanced neural networks and conventional methods. The consistent energy efficiency of these models, independent of their computing complexity, highlights their efficacy and long-term viability for use in different network environments. The findings emphasize the significance of choosing ML-based IDS according to their computational efficiency and energy consumption to achieve optimal performance in networks with limited resources. It is imperative to thoroughly evaluate the scalability and robustness of ML-based IDS in future research, especially in more significant and more complex network environments. This assessment will explain their ability to adjust to changing cyber threats. Furthermore, it is crucial to evaluate the influence of new technologies, e.g., 5G and edge computing, on the efficacy
432
+
433
+ and suitability of ML-based IDS in advanced network infrastructures.
434
+
435
+ Future research directions should pivot towards optimizing ML-based IDS for enhanced scalability, real-time processing, and energy consumption. The overarching challenge is to develop effective threat detection models that minimally impact system resources. Furthermore, integrating these models into existing IoT devices and IoT devices integrated with SDN infrastructures presents additional challenges, including ensuring compatibility, scalability, and ease of maintenance.
436
+
437
+ # A. Conflict of interest
438
+
439
+ The authors declare that they have no known conflict of interest or personal relationships that could have appeared to influence the work reported in this paper.
440
+
441
+ # B. Acknowledgement
442
+
443
+ The authors thank Dr. Karim A. Emara et al. for collaborating to share the EIDM-IDS source code.
444
+
445
+ # References
446
+
447
+ [1] D. G. Chowdhry, R. Verma, M. Mathur, The Evolution of Business in the Cyber Age: Digital Transformation, Threats, and Security, CRC Press, 2020.
448
+ [2] B. Kaur, S. Dadkhah, F. Shoeleh, al., Internet of things (iot) security dataset evolution: Challenges and future directions, Internet of Things (2023) 100780.
449
+ [3] S. Hadzovic, S. Mrdovic, M. Radonjic, A path towards an internet of things and artificial intelligence regulatory framework, IEEE Communications Magazine (2023).
450
+ [4] K. L. M. Ang, J. K. P. Seng, E. Ngharamike, Towards crowdsourcing internet of things (crowd-iot): Architectures, security, and applications, Future Internet 14 (2) (2022) 49.
451
+ [5] M. Ahmid, O. Kazar, A comprehensive review of the internet of things security, Journal of Applied Security Research 18 (3) (2023) 289-305.
452
+ [6] P. Mall, R. Amin, A. K. Das, M. T. Leung, K.-K. R. Choo, Puf-based authentication and key agreement protocols for IoT, wsns, and smart grids: a comprehensive survey, IEEE Internet of Things Journal 9 (11) (2022) 8205-8228.
453
+ [7] A. Lakhlan, M. A. Mohammed, K. H. Abdulkareem, M. M. Jaber, J. Nedoma, R. Martinek, P. Zmij, Delay optimal schemes for internet of things applications in heterogeneous edge cloud computing networks, Sensors 22 (16) (2022) 5937.
454
+ [8] P. Malhotra, Y. Singh, P. Anand, Bangotra, al, Internet of things: Evolution, concerns and security challenges, Sensors 21 (5) (2021) 1809.
455
+ [9] A. Djenna, S. Harous, D. E. Saidouni, Internet of things meet the internet of threats: New concern cyber security issues of critical cyber infrastructure, Applied Sciences 11 (10) (2021) 4580.
456
+ [10] M. Almiani, A. AbuGhazleh, A. Al-Rahayfeh, S. Atiewi, A. Razaque, Deep recurrent neural network for IoT intrusion detection system, Simulation Modelling Practice and Theory 101 (2020) 102031.
457
+ [11] T. Rajmohan, P. H. Nguyen, N. Ferry, Research landscape of patterns and architectures for IoT security: a systematic review, in: 2020 46th Euromicro conference on software engineering and advanced applications (SEAA), IEEE, 2020, pp. 463-470.
458
+ [12] N. Tekin, A. Acar, A. Aris, A. S. Uluagac, V. C. Gungor, Energy consumption of on-device machine learning models for IoT intrusion detection, Internet of Things 21 (2023) 100670.
459
+
460
+ [13] A. Hakiri, A. Gokhale, P. Berthou, D. C. Schmidt, T. Gayraud, Software-defined networking: Challenges and research opportunities for future internet, Computer Networks 75 (2014) 453-471.
461
+ [14] K. H. K. Reddy, A. K. Luhach, V. V. Kumar, S. Pratihar, D. Kumar, D. S. Roy, Towards energy efficient smart city services: A software defined resource management scheme for data centers, Sustainable Computing: Informatics and Systems 35 (2022) 100776.
462
+ [15] A. Montazerolghaem, Software-defined internet of multimedia things: Energy-efficient and load-balanced resource management, IEEE Internet of Things Journal 9 (3) (2021) 2432-2442.
463
+ [16] J. Liu, H. Shen, H. S. Narman, W. Chung, Z. Lin, A survey of mobile crowdsensing techniques: A critical component for the internet of things, ACM Transactions on Cyber-Physical Systems 2 (3) (2018) 1-26.
464
+ [17] B. B. Gupta, M. Quamara, An overview of internet of things (iot): Architectural aspects, challenges, and protocols, Concurrency and Computation: Practice and Experience 32 (21) (2020) e4946.
465
+ [18] A. A. Alsulami, Q. A. Al-Haija, A. Tayeb, Anomaly-based intrusion detection system for IoT networks with improved data engineering (2022).
466
+ [19] I. Mukherjee, N. K. Sahu, S. K. Sahana, Simulation and modeling for anomaly detection in IoT network using machine learning, International Journal of Wireless Information Networks 30 (2) (2023) 173-189.
467
+ [20] O. Elnakib, E. Shaaban, M. Mahmoud, K. Emara, Eidm: deep learning model for IoT intrusion detection systems, The Journal of Supercomputing (2023) 1-21.
468
+ [21] M. Douiba, S. Benkirane, A. Guezzzaz, M. Azrour, An improved anomaly detection model for IoT security using decision tree and gradient boosting, The Journal of Supercomputing 79 (3) (2023) 3392-3411.
469
+ [22] S. M. Kasongo, Y. Sun, A deep learning method with wrapper-based feature extraction for wireless intrusion detection system, Computers & Security 92 (2020) 101752.
470
+ [23] A. Verma, V. Ranga, Machine learning-based intrusion detection systems for IoT applications, Wireless Personal Communications 111 (2020) 2287-2310.
471
+ [24] Y. Otoum, D. Liu, A. Nayak, Dl-ids: a deep learning-based intrusion detection framework for securing IoT, Transactions on Emerging Telecommunications Technologies 33 (3) (2022) e3803.
472
+ [25] T. Gaber, A. El-Ghamry, A. E. Hassanien, Injection attack detection using machine learning for smart IoT applications, Physical Communication 52 (2022) 101685.
473
+ [26] U. Sachdeva, P. R. Vamsi, Analysis of deep learning models for anomaly detection in time series IoT sensor data, in: Proceedings of the 2022 Fourteenth International Conference on Contemporary Computing, 2022, pp. 54-62.
474
+ [27] K. Nimmy, M. Dilraj, S. Sankaran, K. Achuthan, Leveraging power consumption for anomaly detection on IoT devices in smart homes, Journal of Ambient Intelligence and Humanized Computing (2022) 1-12.
475
+ [28] R. Chaganti, W. Suliman, V. Ravi, A. Dua, Deep learning approach for sdn-enabled intrusion detection system in IoT networks, Information 14 (1) (2023) 41.
476
+ [29] M. M. Isa, L. Mhamdi, Hybrid deep autoencoder with random forest in native sdn intrusion detection environment, in: ICC 2022-IEEE International Conference on Communications, IEEE, 2022, pp. 1698-1703.
477
+ [30] P. T. Duy, H. Do Hoang, N. H. Khoa, V.-H. Pham, et al., Fool your enemies: Enable cyber deception and moving target defense for intrusion detection in sdn, in: 2022 21st International Symposium on Communications and Information Technologies (ISCIT), IEEE, 2022, pp. 27-32.
478
+ [31] M. A. Bouke, A. Abdullah, S. H. ALshatebi, M. T. Abdullah, E2ids: An enhanced intelligent intrusion detection system based on decision tree algorithm, Journal of Applied Artificial Intelligence 3 (1) (2022) 1-16.
479
+
480
+ [32] L. A. C. Ahakonye, C. I. Nwakanma, J.-M. Lee, D.-S. Kim, Scada intrusion detection scheme exploiting the fusion of modified decision tree and chi-square feature selection, Internet of Things 21 (2023) 100676.
481
+ [33] M. Hammad, N. Hewahi, W. Elmedany, Mmm-rf: A novel high accuracy multinomial mixture model for network intrusion detection systems, Computers & Security 120 (2022) 102777.
482
+ [34] K. Albulayhi, Q. Abu Al-Haija, S. A. Alsuhibany, A. A. Jillepalli, M. Ashrafuzzaman, F. T. Sheldon, Iot intrusion detection using machine learning with a novel high performing feature selection method, Applied Sciences 12 (10) (2022) 5015.
483
+ [35] H. Yang, S. Liang, J. Ni, H. Li, X. S. Shen, Secure and efficient km classification for industrial internet of things, IEEE Internet of Things Journal 7 (11) (2020) 10945-10954.
484
+ [36] A. D. Afifaturahman, M. Firmansyah, Perbandingan algorithm k-nearest neighbour (knn) dan naive bayes pada intrusion detection system (ids), Innovation in Research of Informatics (INNOVATICs) 3 (1) (2021).
485
+ [37] F. Z. Belgrana, N. Benamrane, M. A. Hamaida, A. M. Chaabani, A. Taleb-Ahmed, Network intrusion detection system using neural network and condensed nearest neighbors with selection of nsl-kdd influencing features, in: 2020 IEEE International Conference on Internet of Things and Intelligence System (IoTaIS), IEEE, 2021, pp. 23-29.
486
+ [38] Y. Yan, L. Qi, J. Wang, Y. Lin, L. Chen, A network intrusion detection method based on stacked autoencoder and LSTM, in: ICC 2020-2020 IEEE International Conference on Communications (ICC), IEEE, 2020, pp. 1-6.
487
+ [39] M. D. Hossain, H. Inoue, H. Ochiai, D. Fall, Y. Kadobayashi, Lstmbased intrusion detection system for in-vehicle can bus communications, IEEE Access 8 (2020) 185489-185502.
488
+ [40] A. El-Ghamry, A. Darwish, A. E. Hassanien, An optimized cnn-based intrusion detection system for reducing risks in smart farming, Internet of Things 22 (2023) 100709.
489
+ [41] S. Jamshidi, A. Nikanjam, M. A. Hamdaqa, F. Khomh, Attack detection by using deep learning for cyber-physical system, in: Artificial Intelligence for Cyber-Physical Systems Hardening, Springer, 2022, pp. 155–179.
490
+ [42] P. Sun, P. Liu, Q. Li, C. Liu, X. Lu, R. Hao, J. Chen, Dl-ids: Extracting features using cnn-lstm hybrid network for intrusion detection system, Security and communication networks 2020 (2020) 1–11.
491
+ [43] A. Halbouni, T. S. Gunawan, M. H. Habaebi, M. Halbouni, M. Kartiwi, R. Ahmad, Cnn-lstm: hybrid deep neural network for network intrusion detection system, IEEE Access 10 (2022) 99837-99849.
492
+ [44] D. Stiawan, M. Y. B. Idris, A. M. Bamhdi, R. Budiarto, et al., Cicids-2017 dataset feature analysis with information gain for anomaly detection, IEEE Access 8 (2020) 132911–132921.
493
+ [45] R. Panigrahi, S. Borah, A detailed analysis of cicids2017 dataset for designing intrusion detection systems, International Journal of Engineering & Technology 7 (3.24) (2018) 479-482.
494
+ [46] A. A. Alsulami, Q. Abu Al-Haija, A. Tayeb, A. Alqahtani, An intrusion detection and classification system for IoT traffic with improved data engineering, Applied Sciences 12 (23) (2022) 12336.
495
+ [47] L. Yang, A. Moubayed, I. Hamieh, A. Shami, Tree-based intelligent intrusion detection system in internet of vehicles, in: 2019 IEEE global communications conference (GLOBECOM), IEEE, 2019, pp. 1-6.
496
+ [48] Great Learning, Label encoding in python, [link], accessed: 2024-03-21 (n.d.).
497
+ [49] Analytics Vidhya, Overcoming class imbalance using smote techniques, [link], accessed: 2024-03-21 (2020).
498
+ [50] T. N. Sainath, O. Vinyals, A. Senior, H. Sak, Convolutional, long short-term memory, fully connected deep neural networks, in: 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), IEEE, 2015, pp. 4580-4584.
499
+ [51] L. Muhammad, A. A. Haruna, U. S. Sharif, M. B. Mohammed, Cnn-lstm deep learning based forecasting model for Covid-19 infection cases in nigeria, south africa and botswana, Health and technology 12 (6) (2022) 1259–1276.
500
+
501
+ [52] L. Alzubaidi, J. Zhang, A. J. Humaidi, A. Al-Dujaili, Y. Duan, O. Al-Shamma, J. Santamaría, M. A. Fadhel, M. Al-Amidie, L. Farhan, Review of deep learning: concepts, cnn architectures, challenges, applications, future directions, Journal of big Data 8 (2021) 1-74.
502
+ [53] G. Najera-Gutierrez, J. A. Ansari, Web Penetration Testing with Kali Linux: Explore the methods and tools of ethical hacking with Kali Linux, Packt Publishing Ltd, 2018.
503
+ [54] S. Asadollahi, B. Goswami, M. Sameer, Ryu controller's scalability experiment on software defined networks, in: 2018 IEEE international conference on current trends in advanced computing (ICCTAC), IEEE, 2018, pp. 1-5.
504
+ [55] K. Kaur, J. Singh, N. S. Ghumman, Mininet as software defined networking testing platform, in: International conference on communication, computing & systems (ICCCS), 2014, pp. 139-42.
505
+ [56] L. St, S. Wold, et al., Analysis of variance (anova), Chemometrics and intelligent laboratory systems 6 (4) (1989) 259-272.
506
+ [57] D. Breitenbacher, I. Homoliak, Y. L. Aung, N. O. Tippenhauer, Y. Elovici, Hades-iot: A practical host-based anomaly detection system for iot devices, in: Proceedings of the 2019 ACM Asia conference on computer and communications security, 2019, pp. 479-484.
507
+ [58] B. Chen, Y. Zhang, G. Iosifidis, M. Liu, Reinforcement learning on computational resource allocation of cloud-based wireless networks, in: 2020 IEEE 6th World Forum on Internet of Things (WF-IoT), IEEE, 2020, pp. 1-6.
508
+ [59] R. D. Corin, A. Costanzo, F. Callegati, D. Siracusa, Methods and techniques for dynamic deployability of software-defined security services, CoRR (2020).
509
+ [60] A. van de Ven, Powertop, [link].
510
+ [61] N. F. Syed, Z. Baig, A. Ibrahim, C. Valli, Denial of service attack detection through machine learning for the IoT, Journal of Information and Telecommunication 4 (4) (2020) 482-503.
511
+ [62] K. Sonar, H. Upadhyay, A survey: Ddos attack on internet of things, International Journal of Engineering Research and Development 10 (11) (2014) 58-63.
512
+ [63] M. M. Raikar, S. Meena, Ssh brute force attack mitigation in internet of things (iot) network: An edge device security measure, in: 2021 2nd international conference on secure cyber computing and communications (ICSCCC), IEEE, 2021, pp. 72-77.
513
+ [64] Q. A. Al-Haija, E. Saleh, M. Alnabhan, Detecting port scan attacks using logistic regression, in: 2021 4th International symposium on advanced electrical and communication technologies (ISAECT), IEEE, 2021, pp. 1-5.
514
+ [65] Z. Campbell, A. Bray, A. Ritz, A. Groce, Differentially private anova testing, in: 2018 1st International Conference on Data Intelligence and Security (ICDIS), IEEE, 2018, pp. 281-285.
515
+ [66] H. Wei, X. Song, Smooth tests for normality in anova, arXiv preprint arXiv:2110.04849 (2021).
516
+ [67] E. Frimpong, A performance study of the snort ids (2008).
517
+ [68] D. Fadhilah, M. I. Marzuki, Performance analysis of ids snort and ids suricata with many-core processor in virtual machines against dos/ddos attacks, in: 2020 2nd International Conference on Broadband Communications, Wireless Sensors and Powering (BCWSP), IEEE, 2020, pp. 157-162.
518
+ [69] M. Hawedi, C. Talhi, H. Boucheneb, Multi-tenant intrusion detection system for public cloud (mtids), The Journal of Supercomputing 74 (2018) 5199–5230.
519
+ [70] S. M. Raza, J. Jeong, M. Kim, B. Kang, H. Choo, Empirical performance and energy consumption evaluation of container solutions on resource constrained IoT gateways, Sensors 21 (4) (2021) 1378.
520
+ [71] W. Park, S. Ahn, Performance comparison and detection analysis in snort and suricata environment, Wireless Personal Communications 94 (2017) 241-252.
521
+ [72] E. Ozturk Kiyak, B. Ghasemkhani, D. Birant, High-level k-nearest neighbors (hlknn): A supervised machine learning model for classification analysis, Electronics 12 (18) (2023) 3828.
522
+ [73] E. Altulaihan, M. A. Almaiah, A. Aljughaiman, Anomaly detection ids for detecting dos attacks in IoT networks based on machine learning
523
+
524
+ algorithms, Sensors 24 (2) (2024) 713.
525
+
526
+ [74] U. Nations, United nations goals: Sustainable development, [link], accessed: September 3, 2024 (2023).
527
+ [75] F. Khomh, S. A. Abtahizadeh, Understanding the impact of cloud patterns on performance and energy consumption, Journal of Systems and Software 141 (2018) 151-170.
528
+ [76] C. Wohlin, P. Runeson, M. Höst, M. C. Ohlsson, B. Regnell, A. Wesslén, Experimentation in software engineering, Springer Science & Business Media, 2012.
529
+ [77] S. Jamshidi, Replication packages, [link].
530
+
531
+ # Appendix
532
+
533
+ Table 13: Abbreviations used in this research.
534
+
535
+ <table><tr><td>Abbreviation</td><td>Meaning</td></tr><tr><td>AI</td><td>Artificial Intelligence</td></tr><tr><td>ANOVA</td><td>Analysis of Variance</td></tr><tr><td>ANN</td><td>Artificial Neural Network</td></tr><tr><td>BT</td><td>Boosting Tree</td></tr><tr><td>CPU</td><td>Central Processing Unit</td></tr><tr><td>DAE</td><td>Deep Autoencoder</td></tr><tr><td>DDoS</td><td>Distributed Denial-of-Service</td></tr><tr><td>DL</td><td>Deep Learning</td></tr><tr><td>DoS</td><td>Denial-of-Service</td></tr><tr><td>DT</td><td>Decision Tree</td></tr><tr><td>GPU</td><td>Graphics Processing Unit</td></tr><tr><td>IDS</td><td>Intrusion Detection System</td></tr><tr><td>IoT</td><td>Internet of Things</td></tr><tr><td>KNN</td><td>K-Nearest Neighbor</td></tr><tr><td>LR</td><td>Logistic Regression</td></tr><tr><td>LSTM</td><td>Long Short-Term Memory</td></tr><tr><td>CNN</td><td>Convolutional Neural Network</td></tr><tr><td>MCU</td><td>Microcontroller Unit</td></tr><tr><td>MITM</td><td>Man-in-the-Middle</td></tr><tr><td>ML</td><td>Machine Learning</td></tr><tr><td>MTD</td><td>Moving Target Defense</td></tr><tr><td>NB</td><td>Naïve Bayes</td></tr><tr><td>R2L</td><td>Root to Local</td></tr><tr><td>RF</td><td>Random Forest</td></tr><tr><td>RNN</td><td>Recurrent Neural Network</td></tr><tr><td>SDN</td><td>Software-Defined Networking</td></tr><tr><td>SDPN</td><td>Stacked-Deep Polynomial Network</td></tr><tr><td>SMO</td><td>Spider Monkey Optimization</td></tr><tr><td>SMOTE</td><td>Synthetic Minority Oversampling Technique</td></tr><tr><td>SNN</td><td>Spiking Neural Network</td></tr><tr><td>SVM</td><td>Support Vector Machine</td></tr><tr><td>U2R</td><td>User to Root</td></tr><tr><td>WFEU</td><td>Wrapper Feature Extraction Unit</td></tr><tr><td>WSN</td><td>Wireless Sensor Network</td></tr></table>
data/2025/2504_09xxx/2504.09634/images/0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg ADDED

Git LFS Details

  • SHA256: d482f2169ab6e9df119a29a1b3feb8686695cb6c5aa39cec13492b738de232e7
  • Pointer size: 130 Bytes
  • Size of remote file: 61.6 kB
data/2025/2504_09xxx/2504.09634/images/1af4aced79d75a548723f09e225664bbb96f6d6700d00c43968aa178dc0a5c72.jpg ADDED

Git LFS Details

  • SHA256: 2275bad5b31f71d8638c27d52ac1a3fa178053933da021ff83047243b965a5c5
  • Pointer size: 130 Bytes
  • Size of remote file: 31.6 kB
data/2025/2504_09xxx/2504.09634/images/251f99d34e8d7e9454bfd3edc048190023c2a6c8c23b168d632e267238bde84f.jpg ADDED

Git LFS Details

  • SHA256: 19dd1369014050af77f23d432d97a20de76a2cf2bd21d74891079bcd73036000
  • Pointer size: 130 Bytes
  • Size of remote file: 30.7 kB
data/2025/2504_09xxx/2504.09634/images/2a90fde578b9f825ce59035599dea091ebd2cf5ed5be137d79b307d09e9cfb2d.jpg ADDED

Git LFS Details

  • SHA256: e2fdd0d388d5b77f7bbaf5c0f5b5d5f7e99c47fd907516561db8e2e7b8fcaac8
  • Pointer size: 130 Bytes
  • Size of remote file: 64.9 kB
data/2025/2504_09xxx/2504.09634/images/2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg ADDED

Git LFS Details

  • SHA256: 2394c75c53aa5d52fb52c02ce55c43528eb56352f5f234a10c9d9d65bba19ebc
  • Pointer size: 130 Bytes
  • Size of remote file: 30.2 kB
data/2025/2504_09xxx/2504.09634/images/3352c2d6279b3c18c32a15035d2fad226977bad5a82e283f7e78eb8d92736391.jpg ADDED

Git LFS Details

  • SHA256: c769427387cdccdb733574bf20c8fef0eb62612f085bea16cdfc4047cce434b4
  • Pointer size: 130 Bytes
  • Size of remote file: 33.2 kB
data/2025/2504_09xxx/2504.09634/images/3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg ADDED

Git LFS Details

  • SHA256: 45b987c37439e48b72770976a88a200d42730778a9284b15b798f245b4af491f
  • Pointer size: 130 Bytes
  • Size of remote file: 29.6 kB
data/2025/2504_09xxx/2504.09634/images/3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg ADDED

Git LFS Details

  • SHA256: 01812e6c21e27b509d3e5a6e2fae082299dfd873263853a35ade219bd6216359
  • Pointer size: 130 Bytes
  • Size of remote file: 36.8 kB
data/2025/2504_09xxx/2504.09634/images/57cb6a1aa51f0f83e9611c049e5334b014beab92ec3cd0b15411af446b8dc0ef.jpg ADDED

Git LFS Details

  • SHA256: 701723c80d3b0039662cd6e59598bc70599896e9df04de2583eb4961cc6ebb1c
  • Pointer size: 130 Bytes
  • Size of remote file: 32.8 kB
data/2025/2504_09xxx/2504.09634/images/58dc9bf0185249aca304b6bf51cdc56444b2584dad26b32bd6bb1216bfa9dc43.jpg ADDED

Git LFS Details

  • SHA256: 001e60b77f5e3c2930c5618a2ea2164025af2a6e9c36fb5fd553dbe3fc9f3dbb
  • Pointer size: 130 Bytes
  • Size of remote file: 38 kB
data/2025/2504_09xxx/2504.09634/images/663548692e8e7e2de3dfdeda62c7ea5cdf8682492fac6d42168c814f8765d763.jpg ADDED

Git LFS Details

  • SHA256: 5f1b80db3d6e83c7bf0d03f3e3b0dc5e5f534c120372acbf6238b37618b6feb4
  • Pointer size: 130 Bytes
  • Size of remote file: 30.5 kB
data/2025/2504_09xxx/2504.09634/images/69d318352c9083ca1c5ac983dc98578e751733e24bc8f5dcb0a365155d70ef36.jpg ADDED

Git LFS Details

  • SHA256: a6a273d08312900fa23eca7a1aa0acf3ff55f78f6c276ecce2f20cf567326606
  • Pointer size: 131 Bytes
  • Size of remote file: 107 kB
data/2025/2504_09xxx/2504.09634/images/76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg ADDED

Git LFS Details

  • SHA256: 560d245d813ececf44e823d822c15f433b6106e41d92e16fc26d9a7f81d83331
  • Pointer size: 130 Bytes
  • Size of remote file: 30.5 kB
data/2025/2504_09xxx/2504.09634/images/80b826b2da3aaae86deb3b263a1de847b08d3842d3eb3476def4d337e1348324.jpg ADDED

Git LFS Details

  • SHA256: 73efe3da1da5845d8e271563c38bb0790ab3c37cff8e126649d7da00f1660a42
  • Pointer size: 131 Bytes
  • Size of remote file: 137 kB
data/2025/2504_09xxx/2504.09634/images/8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg ADDED

Git LFS Details

  • SHA256: a9b9d0537265974e0ce97def2f6aba7a2d872bce07b11da8392f7a97a0e3ea98
  • Pointer size: 130 Bytes
  • Size of remote file: 31.3 kB
data/2025/2504_09xxx/2504.09634/images/a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg ADDED

Git LFS Details

  • SHA256: 2bd7480c4b36c7bd8c5ddd4e4128a8bea513ab1a362531c0979bb01ecf79b9e1
  • Pointer size: 130 Bytes
  • Size of remote file: 32 kB
data/2025/2504_09xxx/2504.09634/images/a5849353a126f0c5cd9481a994454c5ef073b83de215a78893cca68077303183.jpg ADDED

Git LFS Details

  • SHA256: c457701d4e16634dfce821911290d91ccf23dfeae71b24d1f89d6405f8932987
  • Pointer size: 130 Bytes
  • Size of remote file: 32.5 kB
data/2025/2504_09xxx/2504.09634/images/a8c052e149131e14ee8d791a6e088133751de0ae0ea0575bd6291fcfd479610c.jpg ADDED

Git LFS Details

  • SHA256: ec1aacb88df2ce9e536cf3cc6934f8a71b70bc1ed755a5ed1a9024ab0d4e6173
  • Pointer size: 130 Bytes
  • Size of remote file: 26.9 kB
data/2025/2504_09xxx/2504.09634/images/a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg ADDED

Git LFS Details

  • SHA256: 72aed6586f45bfd215bf56003085ff8f4bd601b982c0debf051994eca8c5ac68
  • Pointer size: 130 Bytes
  • Size of remote file: 34 kB
data/2025/2504_09xxx/2504.09634/images/de6163c30a2befa354ff42e749225c46579ebb595981253974955912f27f9118.jpg ADDED

Git LFS Details

  • SHA256: ed2b73f78e64b7f8105680a2cfd91ecae90bc7a9ef3126b1c6136e7c0b2e5502
  • Pointer size: 131 Bytes
  • Size of remote file: 212 kB
data/2025/2504_09xxx/2504.09634/images/ebfdbe0d011113feaa528a9d5edb54922ef1df36ccfb3ae94161ccec55847281.jpg ADDED

Git LFS Details

  • SHA256: 963e7f25b609750bced1d83dbe1d59127dc6aee949cc83d9ba3bd327a3da354c
  • Pointer size: 130 Bytes
  • Size of remote file: 36 kB
data/2025/2504_09xxx/2504.09634/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_content_list.json ADDED
@@ -0,0 +1,1210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "TinyLLaVA-Video-R1: Towards Smaller LMMs for Video Reasoning",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 186,
8
+ 121,
9
+ 816,
10
+ 174
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Xingjian Zhang $^{1,*}$ Siwei Wen $^{1,2,*}$ Wenjun Wu $^{1,2,3}$ Lei Huang $^{1,2,3,\\boxtimes}$",
17
+ "bbox": [
18
+ 245,
19
+ 223,
20
+ 767,
21
+ 242
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ SKLCCSE, Institute of Artificial Intelligence, Beihang University, Beijing, China \n $^{2}$ Beijing Advanced Innovation Center for Future Blockchain and Privacy Computing, Beihang University \n $^{3}$ Hangzhou International Innovation Institute, Beihang University, Hangzhou, China",
28
+ "bbox": [
29
+ 184,
30
+ 253,
31
+ 810,
32
+ 297
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "{huangleiai}@buaa.edu.cn",
39
+ "bbox": [
40
+ 393,
41
+ 308,
42
+ 604,
43
+ 325
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Abstract",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 459,
53
+ 359,
54
+ 540,
55
+ 376
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Recently, improving the reasoning ability of large multimodal models (LMMs) through reinforcement learning has made great progress. However, most existing works are based on highly reasoning-intensive datasets such as mathematics and code, and researchers generally choose large-scale models as the foundation. We argue that exploring small-scale models' reasoning capabilities remains valuable for researchers with limited computational resources. Moreover, enabling models to explain their reasoning processes on general question-answering datasets is equally meaningful. Therefore, we present the small-scale video reasoning model TinyLLaVA-Video-R1. Based on TinyLLaVA-Video [27], a traceably trained video understanding model with no more than 4B parameters, it not only demonstrates significantly improved reasoning and thinking capabilities after using reinforcement learning on general Video-QA datasets, but also exhibits the emergent characteristic of \"aha moments\". Furthermore, we share a series of experimental findings, aiming to provide practical insights for future exploration of video reasoning (thinking) abilities in small-scale models. It is available at https://github.com/ZhangXJ199/TinyLLaVA-Video-R1.",
62
+ "bbox": [
63
+ 228,
64
+ 392,
65
+ 769,
66
+ 616
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "1 Introduction",
73
+ "text_level": 1,
74
+ "bbox": [
75
+ 171,
76
+ 645,
77
+ 315,
78
+ 662
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Since DeepSeek-R1 [6] demonstrated that pure reinforcement learning can significantly enhance a model's reasoning capabilities, many subsequent works [1, 7, 15, 30, 16] have also explored improving the reasoning abilities of multimodal models, achieving notable progress. Most of these efforts focus on extending reasoning capabilities to the image modality [15, 1], conducting research using strong reasoning data such as math-image pairs and spatial reasoning [16, 7, 30], or task-specific data like grounding [14, 18]. However, existing research on video reasoning models has not made significant progress due to the scarcity of highly reasoning-intensive data.",
85
+ "bbox": [
86
+ 169,
87
+ 676,
88
+ 826,
89
+ 777
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Open-R1-Video [21] is the first to introduce reasoning into the video domain, however, its performance on general Video-QA datasets is unsatisfactory, with benchmark results even showing a decline. Subsequently, Video-R1 [3] successfully integrates strong reasoning image-text pairs for video reasoning, achieving remarkable performance, but they make preliminary attempts and argue that small-scale models cannot produce effective reasoning processes. However, the high computational cost of large-scale models remains a significant barrier for many researchers with limited resources. Therefore, exploring the reasoning capabilities of smaller models is still necessary.",
96
+ "bbox": [
97
+ 169,
98
+ 781,
99
+ 828,
100
+ 881
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "aside_text",
106
+ "text": "arXiv:2504.09641v1 [cs.CV] 13 Apr 2025",
107
+ "bbox": [
108
+ 22,
109
+ 262,
110
+ 60,
111
+ 708
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "page_footnote",
117
+ "text": "\\*denotes equal contributor; $\\boxtimes$ denotes corresponding author.",
118
+ "bbox": [
119
+ 189,
120
+ 893,
121
+ 565,
122
+ 907
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "image",
128
+ "img_path": "images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg",
129
+ "image_caption": [
130
+ "Figure 1: A case of TinyLLaVA-Video-R1 on video understanding data, sourced from MVBench. The model demonstrates the ability to perceive video scenes and analyze options, while also exhibiting reflective and backtracking behavior (highlighted in blue)."
131
+ ],
132
+ "image_footnote": [],
133
+ "bbox": [
134
+ 186,
135
+ 99,
136
+ 810,
137
+ 178
138
+ ],
139
+ "page_idx": 1
140
+ },
141
+ {
142
+ "type": "image",
143
+ "img_path": "images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg",
144
+ "image_caption": [],
145
+ "image_footnote": [],
146
+ "bbox": [
147
+ 181,
148
+ 195,
149
+ 210,
150
+ 215
151
+ ],
152
+ "page_idx": 1
153
+ },
154
+ {
155
+ "type": "text",
156
+ "text": "Question: What is the action performed by the person in the video? A. drop. B. sit down. C. pick up. D.squat down. Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags.",
157
+ "bbox": [
158
+ 212,
159
+ 191,
160
+ 790,
161
+ 220
162
+ ],
163
+ "page_idx": 1
164
+ },
165
+ {
166
+ "type": "text",
167
+ "text": "Owen2-VL-7B-Instruct:",
168
+ "text_level": 1,
169
+ "bbox": [
170
+ 189,
171
+ 232,
172
+ 315,
173
+ 241
174
+ ],
175
+ "page_idx": 1
176
+ },
177
+ {
178
+ "type": "text",
179
+ "text": "<think> The person in the video is seen bending down towards the floor. Given the context of the room and the action being performed, it is reasonable to infer that the person is picking something up from the floor. </think> <answer> C. pick up </answer>",
180
+ "bbox": [
181
+ 183,
182
+ 241,
183
+ 792,
184
+ 268
185
+ ],
186
+ "page_idx": 1
187
+ },
188
+ {
189
+ "type": "text",
190
+ "text": "TinyLLaVA-Video-SFT: C. pick up.",
191
+ "bbox": [
192
+ 191,
193
+ 280,
194
+ 383,
195
+ 292
196
+ ],
197
+ "page_idx": 1
198
+ },
199
+ {
200
+ "type": "text",
201
+ "text": "TinyLLaVA-Video-R1:",
202
+ "text_level": 1,
203
+ "bbox": [
204
+ 189,
205
+ 305,
206
+ 303,
207
+ 313
208
+ ],
209
+ "page_idx": 1
210
+ },
211
+ {
212
+ "type": "text",
213
+ "text": "<think> Let's start by analyzing the video. The video takes place in a room with white curtains, a green chair, a white table, and a white cabinet. The person is wearing a light green shirt, dark pants, and black shoes. The video begins with the person standing in the room, and then they start to move, bending down and picking up a colorful book or magazine from the floor. The video ends with the person standing up and holding the book or magazine. The question asks what the person is doing in the video. Let's look at the options: A. drop, B. sit down, C. pick up, D. squat down. Option A is incorrect because the person does not drop anything. Option B is incorrect because the person does not sit down. Option D is incorrect because the person does not squat down. Option C is correct because the person is picking up a colorful book or magazine from the floor. Let's confirm this by looking at the video again. The person is seen bending down and picking up a colorful book or magazine from the floor, and then standing up holding it. Therefore, the correct answer is C. </think> <answer> C </answer>",
214
+ "bbox": [
215
+ 183,
216
+ 314,
217
+ 799,
218
+ 412
219
+ ],
220
+ "page_idx": 1
221
+ },
222
+ {
223
+ "type": "text",
224
+ "text": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, based on the traceably trained model TinyLLaVA-Video [27]. After reinforcement learning on general VideoQA datasets, the model not only significantly improves its reasoning and thinking abilities, but also exhibits the emergent characteristic of \"aha moments\", which is more meaningful than simply generating answers through perception. Moreover, through extensive experiments under various configurations, we have obtained a series of insightful findings. We believe these discoveries will provide valuable guidance for future exploration of video reasoning capabilities in small-scale models.",
225
+ "bbox": [
226
+ 169,
227
+ 500,
228
+ 823,
229
+ 597
230
+ ],
231
+ "page_idx": 1
232
+ },
233
+ {
234
+ "type": "text",
235
+ "text": "2 Related Work",
236
+ "text_level": 1,
237
+ "bbox": [
238
+ 171,
239
+ 621,
240
+ 321,
241
+ 636
242
+ ],
243
+ "page_idx": 1
244
+ },
245
+ {
246
+ "type": "text",
247
+ "text": "Large Language Reasoning Models. Recently, Kimi K1.5 [19] and DeepSeek-R1 [6] have gained widespread attention for their significant improvements in model performance on reasoning tasks through using reinforcement learning. Unlike approaches relying on process-supervised reward models [5], the Group Relative Policy Optimization (GRPO) algorithm [17] relying on rule-based rewards not only substantially reduces computational costs but has also sparked a new wave of interest due to intriguing phenomena such as \"aha moments\" observed during training. Under the influence of this research paradigm, numerous follow-up studies have advanced along this technical path, some have successfully replicated the method and extended it to multimodal domains [1, 7, 30], achieving notable progress, while others [13, 24] have focused on algorithmic optimization to further enhance the reasoning efficiency and performance of models.",
248
+ "bbox": [
249
+ 169,
250
+ 655,
251
+ 826,
252
+ 794
253
+ ],
254
+ "page_idx": 1
255
+ },
256
+ {
257
+ "type": "text",
258
+ "text": "Multimodal Reasoning Models. With the remarkable advancement in reasoning capabilities of language models, an increasing number of studies have attempted to extend them to multimodal domains. LMM-R1 [16] proposes a two-stage training strategy to enhance the reasoning performance of multimodal models; Vision-R1 [7] attempts to address the post-cold-start overthinking issue in multimodal models; Video-R1 [3] develops T-GRPO to further enhance model's video comprehension. While these studies have made notable progress, most tend to adopt base models with 7B or more parameters to ensure superior reasoning effectiveness and robust performance.",
259
+ "bbox": [
260
+ 169,
261
+ 814,
262
+ 826,
263
+ 912
264
+ ],
265
+ "page_idx": 1
266
+ },
267
+ {
268
+ "type": "page_number",
269
+ "text": "2",
270
+ "bbox": [
271
+ 493,
272
+ 935,
273
+ 504,
274
+ 946
275
+ ],
276
+ "page_idx": 1
277
+ },
278
+ {
279
+ "type": "image",
280
+ "img_path": "images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg",
281
+ "image_caption": [
282
+ "Figure 2: A case of TinyLLaVA-Video-R1 on video reasoning data, sourced from MMVU. The model demonstrates comprehensive video content understanding and the capability to derive correct answers through analytical reasoning."
283
+ ],
284
+ "image_footnote": [],
285
+ "bbox": [
286
+ 186,
287
+ 99,
288
+ 808,
289
+ 181
290
+ ],
291
+ "page_idx": 2
292
+ },
293
+ {
294
+ "type": "image",
295
+ "img_path": "images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg",
296
+ "image_caption": [],
297
+ "image_footnote": [],
298
+ "bbox": [
299
+ 183,
300
+ 196,
301
+ 210,
302
+ 217
303
+ ],
304
+ "page_idx": 2
305
+ },
306
+ {
307
+ "type": "text",
308
+ "text": "Question: What is the treatment used in the video? A. Bone grafting. B. Hormone therapy. C. Chemotherapy. D. Photodynamic therapy. Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags.",
309
+ "bbox": [
310
+ 212,
311
+ 191,
312
+ 812,
313
+ 220
314
+ ],
315
+ "page_idx": 2
316
+ },
317
+ {
318
+ "type": "text",
319
+ "text": "Owen2-VL-7B-Instruct:",
320
+ "text_level": 1,
321
+ "bbox": [
322
+ 189,
323
+ 233,
324
+ 315,
325
+ 242
326
+ ],
327
+ "page_idx": 2
328
+ },
329
+ {
330
+ "type": "text",
331
+ "text": "<think> The video shows a medical procedure involving bone chips and fibrin glue, which are used to repair a damaged knee joint. This procedure is known as autologous bone grafting (ABG). Therefore, the treatment used in the video is bone grafting. </think> <answer> A. Bone grafting. </answer>",
332
+ "bbox": [
333
+ 183,
334
+ 242,
335
+ 808,
336
+ 270
337
+ ],
338
+ "page_idx": 2
339
+ },
340
+ {
341
+ "type": "text",
342
+ "text": "TinyLLaVA-Video-SFT: A.",
343
+ "text_level": 1,
344
+ "bbox": [
345
+ 191,
346
+ 281,
347
+ 331,
348
+ 292
349
+ ],
350
+ "page_idx": 2
351
+ },
352
+ {
353
+ "type": "text",
354
+ "text": "TinyLLaVA-Video-R1:",
355
+ "text_level": 1,
356
+ "bbox": [
357
+ 189,
358
+ 306,
359
+ 303,
360
+ 314
361
+ ],
362
+ "page_idx": 2
363
+ },
364
+ {
365
+ "type": "text",
366
+ "text": "<think> To determine the treatment for osteochondral lesions, I will first review the types of osteochondral lesions to focus on the focus. The different types of osteochondral lesions include superficial and severe articular cartilage injuries with or without bone fragmentation. After defining the type of osteochondral lesion, the next step is to identify the treatment for it. I will first consider the available options, such as bone grafting (A), hormone therapy (B), chemotherapy (C), photodynamic therapy (D), and radiation therapy (E). Looking at the video, I can see the focus on a patient with a severe articular cartilage injury and bone fragmentation. Based on our knowledge of cartilage and bone diseases, I will select the treatment A, which is bone grafting. The video will show a comparison between normal and severely damaged cartilage, illustrating the treatment of severe articular cartilage injury with bone grafting using a surgical tool to fill the gap. Bone grafting is the correct treatment for this type of osteochondral lesion. Therefore, I will select option A. </think> <answer> A </answer>",
367
+ "bbox": [
368
+ 181,
369
+ 314,
370
+ 808,
371
+ 412
372
+ ],
373
+ "page_idx": 2
374
+ },
375
+ {
376
+ "type": "text",
377
+ "text": "Although some preliminary work has explored small-scale multimodal models [16, 1, 30], these investigations have primarily focused on the image modality, leaving research on video reasoning significantly underdeveloped. Due to the scarcity of high-quality video reasoning datasets and the inherent capability limitations of models, small-scale video reasoning models face multiple challenges including limited response length, difficulties in presenting coherent reasoning chains, and constrained overall performance improvement [3]. These critical challenges demand thorough investigation and substantive technical breakthroughs.",
378
+ "bbox": [
379
+ 169,
380
+ 489,
381
+ 823,
382
+ 587
383
+ ],
384
+ "page_idx": 2
385
+ },
386
+ {
387
+ "type": "text",
388
+ "text": "3 Methods",
389
+ "text_level": 1,
390
+ "bbox": [
391
+ 171,
392
+ 602,
393
+ 279,
394
+ 617
395
+ ],
396
+ "page_idx": 2
397
+ },
398
+ {
399
+ "type": "text",
400
+ "text": "To explore the video reasoning capabilities of small-scale models, we conduct experiments on TinyLLaVA-Video [27]. We utilize the GRPO algorithm on the general Video-QA dataset NextQA and made specific modifications to the reward rules: adding a continuous length reward to the format reward and introducing penalties for incorrect answers. The experimental results in Section 4 demonstrate the effectiveness of these modifications.",
401
+ "bbox": [
402
+ 169,
403
+ 628,
404
+ 823,
405
+ 696
406
+ ],
407
+ "page_idx": 2
408
+ },
409
+ {
410
+ "type": "text",
411
+ "text": "3.1 TinyLLaVA-Video",
412
+ "text_level": 1,
413
+ "bbox": [
414
+ 171,
415
+ 709,
416
+ 341,
417
+ 724
418
+ ],
419
+ "page_idx": 2
420
+ },
421
+ {
422
+ "type": "text",
423
+ "text": "TinyLLaVA-Video is a fully open-source small-scale video understanding model that employs Qwen2.5-3B [8] as its language model and SigLIP [25] as its visual encoder. It delivers competitive performance across multiple benchmarks. Crucially, its training data are fully open-sourced, and the entire training process remains traceable. This effectively prevents the repeated use of identical data across different training phases, thereby avoiding the introduction of uncontrolled variables and ensuring more reliable experimental results and conclusions. Such reproducibility and controllability represent a distinct advantage over models that only release weights, making TinyLLaVA-Video an ideal foundational model for our experiments on investigating video reasoning.",
424
+ "bbox": [
425
+ 169,
426
+ 734,
427
+ 823,
428
+ 847
429
+ ],
430
+ "page_idx": 2
431
+ },
432
+ {
433
+ "type": "text",
434
+ "text": "3.2 Group Relative Policy Optimization (GRPO)",
435
+ "text_level": 1,
436
+ "bbox": [
437
+ 171,
438
+ 857,
439
+ 524,
440
+ 872
441
+ ],
442
+ "page_idx": 2
443
+ },
444
+ {
445
+ "type": "text",
446
+ "text": "We follow the GRPO algorithm [17] to train the model. For each question $\\mathbf{q}$ , the policy model generates a set of candidate responses $\\{O_1, O_2, \\dots, O_G\\}$ , computes the corresponding rewards",
447
+ "bbox": [
448
+ 169,
449
+ 883,
450
+ 823,
451
+ 912
452
+ ],
453
+ "page_idx": 2
454
+ },
455
+ {
456
+ "type": "page_number",
457
+ "text": "3",
458
+ "bbox": [
459
+ 493,
460
+ 935,
461
+ 503,
462
+ 946
463
+ ],
464
+ "page_idx": 2
465
+ },
466
+ {
467
+ "type": "image",
468
+ "img_path": "images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg",
469
+ "image_caption": [
470
+ "Figure 3: Cases of \"aha moment\", where the model demonstrates reflection and backtracking during its reasoning process (highlighted in blue). The cases are from MVBench and MMVU respectively."
471
+ ],
472
+ "image_footnote": [],
473
+ "bbox": [
474
+ 176,
475
+ 89,
476
+ 823,
477
+ 392
478
+ ],
479
+ "page_idx": 3
480
+ },
481
+ {
482
+ "type": "text",
483
+ "text": "$\\{r_1, r_2, \\ldots, r_G\\}$ based on the reward rules. And then these rewards are normalized to calculate the advantage for each response. Subsequently, the model is optimized through maximization of the following objective function:",
484
+ "bbox": [
485
+ 169,
486
+ 449,
487
+ 823,
488
+ 494
489
+ ],
490
+ "page_idx": 3
491
+ },
492
+ {
493
+ "type": "equation",
494
+ "text": "\n$$\nJ _ {G R P O} (\\theta) = \\mathbb {E} _ {[ q, \\{o _ {i} \\} ]} \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\left\\{\\min \\left[ \\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}} A _ {i}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i} \\right] - \\beta \\mathbb {D} _ {K L} [ \\pi_ {\\theta} \\| \\pi_ {r e f} ] \\right\\} \\tag {1}\n$$\n",
495
+ "text_format": "latex",
496
+ "bbox": [
497
+ 197,
498
+ 503,
499
+ 823,
500
+ 532
501
+ ],
502
+ "page_idx": 3
503
+ },
504
+ {
505
+ "type": "text",
506
+ "text": "where $\\pi_{\\theta}$ and $\\pi_{\\theta_{old}}$ are the current and old policy, $\\epsilon$ and $\\beta$ are hyper-parameters, and $A_{i}$ is the advantages defined as:",
507
+ "bbox": [
508
+ 169,
509
+ 541,
510
+ 823,
511
+ 570
512
+ ],
513
+ "page_idx": 3
514
+ },
515
+ {
516
+ "type": "equation",
517
+ "text": "\n$$\nA _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}. \\tag {2}\n$$\n",
518
+ "text_format": "latex",
519
+ "bbox": [
520
+ 375,
521
+ 574,
522
+ 823,
523
+ 608
524
+ ],
525
+ "page_idx": 3
526
+ },
527
+ {
528
+ "type": "text",
529
+ "text": "In addition, our experimental observations reveal an issue analogous to DAPO [24]: when all responses in a set $\\{O_i\\}$ are correct and given equal rewards, their computed advantages vanish to zero. This phenomenon affects policy updates and diminishes sample efficiency. To maximize the utility of each sample, we introduce an additional gaussian noise $\\mathcal{N}(0, 0.02^2)$ to the advantages. Although the noise induces only minor perturbations, it ensures intra-group advantage diversity across responses.",
530
+ "bbox": [
531
+ 169,
532
+ 621,
533
+ 823,
534
+ 705
535
+ ],
536
+ "page_idx": 3
537
+ },
538
+ {
539
+ "type": "text",
540
+ "text": "3.3 Training Data and Template.",
541
+ "text_level": 1,
542
+ "bbox": [
543
+ 171,
544
+ 724,
545
+ 415,
546
+ 739
547
+ ],
548
+ "page_idx": 3
549
+ },
550
+ {
551
+ "type": "text",
552
+ "text": "We select multiple choice questions from the NextQA [23] subset of LLaVA-Video-178K [28] as training data. To maintain manageable training time with limited computational resources, we only choose the subset of data with a duration of 0 to 30 seconds, which contains 5,496 samples. It is a weak reasoning dataset, where the questions are more perception-oriented and exhibit weaker logical reasoning. However, we hypothesize that the model's reasoning abilities are likely predominantly derived from reinforcement learning, and we still aim to guide it to demonstrate its thought process by articulating the reasoning behind its choices, rather than merely providing an answer.",
553
+ "bbox": [
554
+ 169,
555
+ 752,
556
+ 823,
557
+ 851
558
+ ],
559
+ "page_idx": 3
560
+ },
561
+ {
562
+ "type": "text",
563
+ "text": "During training, for each input question, in addition to the system template, we append the following prompt at the end: Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags. Moreover, when computing rewards for responses, we strictly enforce the model to adhere to this format.",
564
+ "bbox": [
565
+ 169,
566
+ 854,
567
+ 823,
568
+ 912
569
+ ],
570
+ "page_idx": 3
571
+ },
572
+ {
573
+ "type": "page_number",
574
+ "text": "4",
575
+ "bbox": [
576
+ 493,
577
+ 935,
578
+ 504,
579
+ 946
580
+ ],
581
+ "page_idx": 3
582
+ },
583
+ {
584
+ "type": "table",
585
+ "img_path": "images/35ec78c343f589aa72f0f2b4daca8a7ec35abc3ba14def0a6a833156be5653cd.jpg",
586
+ "table_caption": [],
587
+ "table_footnote": [],
588
+ "table_body": "<table><tr><td>Model</td><td>LLM size</td><td>Answer Type</td><td>MVBench</td><td>Video-MME (wo sub)</td><td>MLVU</td><td>MMVU (mc)</td></tr><tr><td>LLaMA-VID [10]</td><td>7B</td><td>Option</td><td>41.4</td><td>-</td><td>33.2</td><td>-</td></tr><tr><td>LLaVA-NeXT [12]</td><td>7B</td><td>Option</td><td>-</td><td>-</td><td>39.3</td><td>29.2</td></tr><tr><td>VideoLLaVA [11]</td><td>7B</td><td>Option</td><td>-</td><td>39.9</td><td>47.3</td><td>-</td></tr><tr><td>ShareGPT4Video [2]</td><td>8B</td><td>Option</td><td>-</td><td>39.9</td><td>46.4</td><td>-</td></tr><tr><td>LLaVA-Mini [26]</td><td>7B</td><td>Option</td><td>44.5</td><td>-</td><td>42.8</td><td>-</td></tr><tr><td>InternVideo2 [22]</td><td>8B</td><td>Option</td><td>-</td><td>41.9</td><td>-</td><td>39.0</td></tr><tr><td>TinyLLaVA-Video-SFT</td><td>3B</td><td>Option</td><td>49.0</td><td>42.2</td><td>49.2</td><td>46.1</td></tr><tr><td>TinyLLaVA-Video-ColdStart</td><td>3B</td><td>Reason</td><td>33.2</td><td>26.6</td><td>28.6</td><td>22.7</td></tr><tr><td>TinyLLaVA-Video-R1</td><td>3B</td><td>Reason</td><td>49.5</td><td>46.6</td><td>52.4</td><td>46.9</td></tr></table>",
589
+ "bbox": [
590
+ 173,
591
+ 88,
592
+ 823,
593
+ 281
594
+ ],
595
+ "page_idx": 4
596
+ },
597
+ {
598
+ "type": "text",
599
+ "text": "Table 1: The performance of TinyLLaVA-Video-R1 on multiple benchmarks. \"Option\" indicates that the model only needs to answer with the selected choice, while \"Reason\" means the model must output both the answer and the reasoning process according to the format requirements. Here, MMVU is categorized as a video reasoning benchmark, the remaining benchmarks are designed for general-purpose video evaluation. The best results are indicated by boldface.",
600
+ "bbox": [
601
+ 169,
602
+ 290,
603
+ 823,
604
+ 361
605
+ ],
606
+ "page_idx": 4
607
+ },
608
+ {
609
+ "type": "text",
610
+ "text": "3.4 Reward Rules.",
611
+ "text_level": 1,
612
+ "bbox": [
613
+ 171,
614
+ 382,
615
+ 315,
616
+ 396
617
+ ],
618
+ "page_idx": 4
619
+ },
620
+ {
621
+ "type": "text",
622
+ "text": "We also avoid using a reward model and define reward rules based on the format and accuracy of the responses as follows:",
623
+ "bbox": [
624
+ 169,
625
+ 410,
626
+ 823,
627
+ 439
628
+ ],
629
+ "page_idx": 4
630
+ },
631
+ {
632
+ "type": "text",
633
+ "text": "Format reward. We require the thought process to be enclosed within $\\langle \\text{think} \\rangle \\langle \\text{/think} \\rangle$ , and the final answer to be enclosed within $\\langle \\text{answer} \\rangle \\langle \\text{/answer} \\rangle$ . These four tags can appear only once in the entire response, and if followed, the model will receive a format reward $FR = r_0 + LR$ . Here, $r_0$ represents the base reward for adhering to the required response format, and $LR$ is the continuous length reward designed to encourage the model to generate longer outputs, calculated as:",
634
+ "bbox": [
635
+ 169,
636
+ 458,
637
+ 823,
638
+ 529
639
+ ],
640
+ "page_idx": 4
641
+ },
642
+ {
643
+ "type": "equation",
644
+ "text": "\n$$\nL R = \\min \\left(1, \\frac {L e n}{M L}\\right) \\times r _ {1}. \\tag {3}\n$$\n",
645
+ "text_format": "latex",
646
+ "bbox": [
647
+ 401,
648
+ 540,
649
+ 823,
650
+ 574
651
+ ],
652
+ "page_idx": 4
653
+ },
654
+ {
655
+ "type": "text",
656
+ "text": "Here, $Len$ represents the length of the response extracted from within the $<\\text{think}>$ / $<\\text{think}>$ tags, and $ML$ represents the maximum length corresponding to the upper limit of the reward. In our experiments, we set $r_0 = r_1 = 0.5$ , thus the format reward is limited to a maximum of 1.",
657
+ "bbox": [
658
+ 169,
659
+ 585,
660
+ 823,
661
+ 628
662
+ ],
663
+ "page_idx": 4
664
+ },
665
+ {
666
+ "type": "text",
667
+ "text": "Accuracy reward. We design the accuracy reward $AR$ based on the answer. We extract the final answer from <answer></answer> and compare it with the label. The model will receive an accuracy reward of $AR = r_2 > 0$ , if the answer is correct. Responses with either format errors preventing answer extraction or incorrect answers will result in zero accuracy reward, i.e. $AR = 0$ . To ensure that the accuracy reward and the format reward have equal importance, we set $r_2 = r_0 + r_1$ in our experiments.",
668
+ "bbox": [
669
+ 169,
670
+ 647,
671
+ 823,
672
+ 732
673
+ ],
674
+ "page_idx": 4
675
+ },
676
+ {
677
+ "type": "text",
678
+ "text": "To encourage the model to increase the response length only when answering correctly, rather than arbitrarily increasing the length at the cost of accuracy, we deviate from most existing approaches that simply define the total reward as the sum of format reward and accuracy reward. Instead, we introduce a penalty for incorrect answers, with the total reward $R$ defined by the following formula:",
679
+ "bbox": [
680
+ 169,
681
+ 750,
682
+ 823,
683
+ 808
684
+ ],
685
+ "page_idx": 4
686
+ },
687
+ {
688
+ "type": "equation",
689
+ "text": "\n$$\nR = \\left\\{ \\begin{array}{l l} A R + F R, & \\text {i f} F R > 0 \\text {a n d} A R = r _ {2} \\\\ - F R, & \\text {i f} F R > 0 \\text {a n d} A R = 0 \\\\ - \\left(r _ {0} + r _ {1} + r _ {2}\\right), & \\text {i f} F R = 0 \\end{array} \\right. \\tag {4}\n$$\n",
690
+ "text_format": "latex",
691
+ "bbox": [
692
+ 323,
693
+ 820,
694
+ 823,
695
+ 871
696
+ ],
697
+ "page_idx": 4
698
+ },
699
+ {
700
+ "type": "text",
701
+ "text": "When the model's answer is correct, the longer the reasoning process, the higher the reward. In contrast, if the answer is incorrect, the longer the reasoning process, the higher the penalty incurred.",
702
+ "bbox": [
703
+ 169,
704
+ 883,
705
+ 823,
706
+ 912
707
+ ],
708
+ "page_idx": 4
709
+ },
710
+ {
711
+ "type": "page_number",
712
+ "text": "5",
713
+ "bbox": [
714
+ 493,
715
+ 935,
716
+ 503,
717
+ 946
718
+ ],
719
+ "page_idx": 4
720
+ },
721
+ {
722
+ "type": "text",
723
+ "text": "4 Experiments",
724
+ "text_level": 1,
725
+ "bbox": [
726
+ 171,
727
+ 89,
728
+ 313,
729
+ 107
730
+ ],
731
+ "page_idx": 5
732
+ },
733
+ {
734
+ "type": "text",
735
+ "text": "4.1 Experimental Settings",
736
+ "text_level": 1,
737
+ "bbox": [
738
+ 171,
739
+ 126,
740
+ 369,
741
+ 142
742
+ ],
743
+ "page_idx": 5
744
+ },
745
+ {
746
+ "type": "text",
747
+ "text": "We conduct experiments on 8 NVIDIA A100-40G GPUs. During training, we keep the vision encoder frozen and update the connector and language model. We set the learning rate at 1e-6 for stable training.",
748
+ "bbox": [
749
+ 169,
750
+ 154,
751
+ 823,
752
+ 198
753
+ ],
754
+ "page_idx": 5
755
+ },
756
+ {
757
+ "type": "text",
758
+ "text": "To facilitate rapid adaptation to reasoning format and ensure training stability, we first finetune the model using 16 human-annotated cold-start samples, resulting in TinyLLaVA-Video-ColdStart. We then adopt it as the base model for reinforcement learning and train on 5,496 NextQA data for one epoch to obtain TinyLLaVA-Video-R1.",
759
+ "bbox": [
760
+ 169,
761
+ 203,
762
+ 823,
763
+ 258
764
+ ],
765
+ "page_idx": 5
766
+ },
767
+ {
768
+ "type": "text",
769
+ "text": "For evaluation, we select four commonly used video understanding and reasoning benchmarks: MVBench [9], VideoMME[4], MLVU [31], and MMVU [29]. These benchmarks encompass videos from multiple disciplines and domains, with a wide range of durations, enabling a comprehensive assessment of the model's capabilities.",
770
+ "bbox": [
771
+ 169,
772
+ 265,
773
+ 825,
774
+ 321
775
+ ],
776
+ "page_idx": 5
777
+ },
778
+ {
779
+ "type": "text",
780
+ "text": "4.2 Main Results and Aha Moment",
781
+ "text_level": 1,
782
+ "bbox": [
783
+ 171,
784
+ 347,
785
+ 433,
786
+ 361
787
+ ],
788
+ "page_idx": 5
789
+ },
790
+ {
791
+ "type": "text",
792
+ "text": "As shown in Figure 4, during training, both the response length and rewards demonstrate stable growth. As presented in Table 1, compared to TinyLLaVA-Video-SFT, which is trained on the same dataset using supervised learning, TinyLLaVA-Video-R1 shows superior performance across multiple benchmarks. Additionally, compared to the base model TinyLLaVA-Video-ColdStart, TinyLLaVA-Video-R1 not only adheres to the required response format but also demonstrates improved reasoning capabilities.",
793
+ "bbox": [
794
+ 169,
795
+ 376,
796
+ 823,
797
+ 460
798
+ ],
799
+ "page_idx": 5
800
+ },
801
+ {
802
+ "type": "image",
803
+ "img_path": "images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg",
804
+ "image_caption": [
805
+ "(a) Evolution in completion length."
806
+ ],
807
+ "image_footnote": [],
808
+ "bbox": [
809
+ 173,
810
+ 481,
811
+ 380,
812
+ 609
813
+ ],
814
+ "page_idx": 5
815
+ },
816
+ {
817
+ "type": "image",
818
+ "img_path": "images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg",
819
+ "image_caption": [
820
+ "Figure 4: Evolution in key metrics during the training of TinyLLaVA-Video-R1. Under our reward rule settings, both the response length and rewards of TinyLLaVA-Video-R1 gradually increased during training."
821
+ ],
822
+ "image_footnote": [],
823
+ "bbox": [
824
+ 395,
825
+ 481,
826
+ 602,
827
+ 609
828
+ ],
829
+ "page_idx": 5
830
+ },
831
+ {
832
+ "type": "image",
833
+ "img_path": "images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg",
834
+ "image_caption": [
835
+ "(b) Evolution in accuracy reward.",
836
+ "(c) Evolution in format reward."
837
+ ],
838
+ "image_footnote": [],
839
+ "bbox": [
840
+ 614,
841
+ 481,
842
+ 821,
843
+ 609
844
+ ],
845
+ "page_idx": 5
846
+ },
847
+ {
848
+ "type": "text",
849
+ "text": "As shown in Figures 1 and 2, we further illustrate the model's reasoning ability. The model can comprehend and analyze video content, evaluate each option step-by-step, and ultimately provide an answer. Compared to models that only output answers without reasoning, TinyLLaVA-Video-R1 generates meaningful thought processes, making its responses more interpretable and valuable. This represents a significant advantage of video reasoning models over conventional video understanding models.",
850
+ "bbox": [
851
+ 169,
852
+ 704,
853
+ 823,
854
+ 787
855
+ ],
856
+ "page_idx": 5
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "Similar to other works that use reinforcement learning to enhance model reasoning capabilities, we also reproduce the \"aha moment\" in TinyLLaVA-Video-R1, where the model exhibits emergent behaviors such as self-verification during its reasoning process. Our experimental results confirm that even when trained with weakly-reasoned general video data through reinforcement learning, the smaller model can still demonstrate retrospection and reflection.",
861
+ "bbox": [
862
+ 169,
863
+ 792,
864
+ 823,
865
+ 864
866
+ ],
867
+ "page_idx": 5
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "As highlighted in the blue annotations in Figures 1 and 3, the model revisits and verifies its initial reasoning after completing a round of thought. This behavior indicates that the model does not merely perform perception but also engages in continuous thinking and self-checking.",
872
+ "bbox": [
873
+ 169,
874
+ 869,
875
+ 823,
876
+ 912
877
+ ],
878
+ "page_idx": 5
879
+ },
880
+ {
881
+ "type": "page_number",
882
+ "text": "6",
883
+ "bbox": [
884
+ 493,
885
+ 936,
886
+ 504,
887
+ 946
888
+ ],
889
+ "page_idx": 5
890
+ },
891
+ {
892
+ "type": "text",
893
+ "text": "4.3 Ablation Study",
894
+ "text_level": 1,
895
+ "bbox": [
896
+ 171,
897
+ 90,
898
+ 318,
899
+ 104
900
+ ],
901
+ "page_idx": 6
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "In this section, we present ablation studies on methods and key experimental findings that contribute significantly to the performance enhancement of TinyLLaVA-Video-R1.",
906
+ "bbox": [
907
+ 169,
908
+ 116,
909
+ 823,
910
+ 146
911
+ ],
912
+ "page_idx": 6
913
+ },
914
+ {
915
+ "type": "text",
916
+ "text": "4.3.1 Impact of Cold-Start Data",
917
+ "text_level": 1,
918
+ "bbox": [
919
+ 169,
920
+ 159,
921
+ 410,
922
+ 174
923
+ ],
924
+ "page_idx": 6
925
+ },
926
+ {
927
+ "type": "text",
928
+ "text": "Due to the limitations of language models, when we directly use TinyLLaVA-Video as the base model without length reward, we find that as training progresses, the model has a certain probability of learning to 'take shortcuts'. While adhering to the required format, all responses omit the reasoning process and are structured strictly as <think> </think> <answer> option </answer>. We observe similar experimental phenomena when conducting experiments on Qwen2-VL-2B [20], so we believe this is a common issue with small-scale models.",
929
+ "bbox": [
930
+ 169,
931
+ 183,
932
+ 823,
933
+ 267
934
+ ],
935
+ "page_idx": 6
936
+ },
937
+ {
938
+ "type": "text",
939
+ "text": "However, when we perform a cold start with 16 human-annotated CoT data, this phenomenon no longer appear during the experiments. At the same time, the model also learn to comply with the format requirements more quickly. Therefore, we believe that cold starting is necessary for reasoning in small-scale models. Even a small amount of cold start data can be very helpful for stabilizing model training.",
940
+ "bbox": [
941
+ 169,
942
+ 272,
943
+ 825,
944
+ 343
945
+ ],
946
+ "page_idx": 6
947
+ },
948
+ {
949
+ "type": "text",
950
+ "text": "4.3.2 Impact of Refinement of Format Rewards",
951
+ "text_level": 1,
952
+ "bbox": [
953
+ 169,
954
+ 356,
955
+ 516,
956
+ 371
957
+ ],
958
+ "page_idx": 6
959
+ },
960
+ {
961
+ "type": "text",
962
+ "text": "In our initial experiments, similar to other works, we only apply format reward without incorporating continuous length reward. However, constrained by the capabilities of small-scale language models, training under this setup does not lead to an increase in response length, and even results in a slight decline. After introducing continuous length reward, the model's response length significantly increases during training, as shown in Figure 5. However, we observe that under this setup, the model engages in some meaningless reasoning to increase response length, which does not improve performance and even leads to a significant increase in training time. When incorporating answer correctness penalty into the total reward as described in Section 3.4, we observe both qualitative improvements in model responses and continued growth in output length and rewards throughout training as shown in Figure 4.",
963
+ "bbox": [
964
+ 169,
965
+ 380,
966
+ 517,
967
+ 630
968
+ ],
969
+ "page_idx": 6
970
+ },
971
+ {
972
+ "type": "image",
973
+ "img_path": "images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg",
974
+ "image_caption": [
975
+ "Figure 5: The variation in response length during training under different settings."
976
+ ],
977
+ "image_footnote": [],
978
+ "bbox": [
979
+ 532,
980
+ 383,
981
+ 821,
982
+ 563
983
+ ],
984
+ "page_idx": 6
985
+ },
986
+ {
987
+ "type": "text",
988
+ "text": "4.3.3 Other Experimental Explorations",
989
+ "text_level": 1,
990
+ "bbox": [
991
+ 169,
992
+ 643,
993
+ 460,
994
+ 657
995
+ ],
996
+ "page_idx": 6
997
+ },
998
+ {
999
+ "type": "text",
1000
+ "text": "Meanwhile, we also experiment with some existing improvements to GRPO. Some studies [24, 13] suggest that the distribution of reasoning models may differ significantly from the initial model, so removing the KL divergence can eliminate constraints on the model. As shown in Figure 6, our experiments similarly demonstrate that eliminating the KL divergence improves model performance. Additionally, Dr. GRPO [13] argues that the increase in response length may also stem from inherent biases in the GRPO objective function. After removing the KL divergence, we further exclude the response length term from the objective function and the reward variance term from the advantage calculation. As shown in Figure 6, the performance of the model improves again. At the same time, we observe a noticeable reduction in response length, the model tends to only provide descriptions of the video content while omitting analysis of the answer. We attribute this to the lack of strong reasoning in the training dataset, which fails to stimulate deep logical reasoning in the models.",
1001
+ "bbox": [
1002
+ 169,
1003
+ 667,
1004
+ 826,
1005
+ 820
1006
+ ],
1007
+ "page_idx": 6
1008
+ },
1009
+ {
1010
+ "type": "text",
1011
+ "text": "5 Conclusion and Future Work",
1012
+ "text_level": 1,
1013
+ "bbox": [
1014
+ 169,
1015
+ 838,
1016
+ 450,
1017
+ 854
1018
+ ],
1019
+ "page_idx": 6
1020
+ },
1021
+ {
1022
+ "type": "text",
1023
+ "text": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, which is trained using reinforcement learning on a general Video-QA dataset. It not only significantly enhances reasoning and thinking capabilities, but also exhibits the emergent characteristic of \"aha",
1024
+ "bbox": [
1025
+ 169,
1026
+ 869,
1027
+ 823,
1028
+ 912
1029
+ ],
1030
+ "page_idx": 6
1031
+ },
1032
+ {
1033
+ "type": "page_number",
1034
+ "text": "7",
1035
+ "bbox": [
1036
+ 493,
1037
+ 935,
1038
+ 504,
1039
+ 946
1040
+ ],
1041
+ "page_idx": 6
1042
+ },
1043
+ {
1044
+ "type": "image",
1045
+ "img_path": "images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg",
1046
+ "image_caption": [
1047
+ "Figure 6: Ablation study on TinyLLaVA-R1 variants across multiple benchmarks. We compare the original TinyLLaVA-Video-R1 with two ablated versions: removing the KL divergence term (Del KL) and replacing the original GRPO with Dr. GRPO. Results are reported on MVBench, Video-MME (without subtitle input), MLVU, and MMVU (multiple-choice subset). Bold values indicate the best performance for each benchmark."
1048
+ ],
1049
+ "image_footnote": [],
1050
+ "bbox": [
1051
+ 178,
1052
+ 93,
1053
+ 821,
1054
+ 334
1055
+ ],
1056
+ "page_idx": 7
1057
+ },
1058
+ {
1059
+ "type": "text",
1060
+ "text": "moment\". Additionally, we present a series of experimental findings, hoping this work will provide valuable insights for future practitioners exploring the video reasoning abilities of small-scale models. We will further investigate small-scale video reasoning models, with potential future directions as follows:",
1061
+ "bbox": [
1062
+ 169,
1063
+ 431,
1064
+ 823,
1065
+ 487
1066
+ ],
1067
+ "page_idx": 7
1068
+ },
1069
+ {
1070
+ "type": "list",
1071
+ "sub_type": "text",
1072
+ "list_items": [
1073
+ "- Introducing high-quality video reasoning data. Currently, TinyLLaVA-Video-R1 is trained only on general video question-answering data. We aim to explore the upper limits of the model's reasoning capabilities by introducing higher-quality video reasoning data.",
1074
+ "- Improving reinforcement learning algorithms. Currently, TinyLLaVA-Video-R1 employs the GRPO algorithm for training. However, this approach exhibits notable limitations. To enhance its effectiveness in video reasoning tasks, we plan to refine the algorithm by addressing the key challenges observed in our experiment."
1075
+ ],
1076
+ "bbox": [
1077
+ 215,
1078
+ 500,
1079
+ 823,
1080
+ 604
1081
+ ],
1082
+ "page_idx": 7
1083
+ },
1084
+ {
1085
+ "type": "text",
1086
+ "text": "Acknowledgment. This work was partially supported by the National Science and Technology Major Project (Grant No. 2022ZD0116310), National Natural Science Foundation of China (Grant No. 62476016), the Fundamental Research Funds for the Central Universities.",
1087
+ "bbox": [
1088
+ 169,
1089
+ 619,
1090
+ 823,
1091
+ 672
1092
+ ],
1093
+ "page_idx": 7
1094
+ },
1095
+ {
1096
+ "type": "text",
1097
+ "text": "References",
1098
+ "text_level": 1,
1099
+ "bbox": [
1100
+ 173,
1101
+ 693,
1102
+ 267,
1103
+ 708
1104
+ ],
1105
+ "page_idx": 7
1106
+ },
1107
+ {
1108
+ "type": "list",
1109
+ "sub_type": "ref_text",
1110
+ "list_items": [
1111
+ "[1] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. 1, 2, 3",
1112
+ "[2] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. 5",
1113
+ "[3] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. 1, 2, 3",
1114
+ "[4] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 6"
1115
+ ],
1116
+ "bbox": [
1117
+ 179,
1118
+ 715,
1119
+ 825,
1120
+ 911
1121
+ ],
1122
+ "page_idx": 7
1123
+ },
1124
+ {
1125
+ "type": "page_number",
1126
+ "text": "8",
1127
+ "bbox": [
1128
+ 493,
1129
+ 935,
1130
+ 503,
1131
+ 946
1132
+ ],
1133
+ "page_idx": 7
1134
+ },
1135
+ {
1136
+ "type": "list",
1137
+ "sub_type": "ref_text",
1138
+ "list_items": [
1139
+ "[5] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. 2",
1140
+ "[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 2",
1141
+ "[7] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 1, 2",
1142
+ "[8] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.3",
1143
+ "[9] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 6",
1144
+ "[10] Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In European Conference on Computer Vision, pages 323–340. Springer, 2025. 5",
1145
+ "[11] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023. 5",
1146
+ "[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llavanext: Improved reasoning,OCR, and world knowledge, 2024.5",
1147
+ "[13] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. 2, 7",
1148
+ "[14] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. 1",
1149
+ "[15] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. 1",
1150
+ "[16] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 1, 2, 3",
1151
+ "[17] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 2, 3",
1152
+ "[18] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15. 1",
1153
+ "[19] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 2",
1154
+ "[20] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7",
1155
+ "[21] Xiaodong Wang and Peixi Peng. Open-r1-video. https://github.com/Wang-Xiaodong1899/Open-R1-Video, 2025.1",
1156
+ "[22] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024. 5",
1157
+ "[23] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021. 4"
1158
+ ],
1159
+ "bbox": [
1160
+ 171,
1161
+ 90,
1162
+ 883,
1163
+ 912
1164
+ ],
1165
+ "page_idx": 8
1166
+ },
1167
+ {
1168
+ "type": "page_number",
1169
+ "text": "9",
1170
+ "bbox": [
1171
+ 493,
1172
+ 935,
1173
+ 504,
1174
+ 946
1175
+ ],
1176
+ "page_idx": 8
1177
+ },
1178
+ {
1179
+ "type": "list",
1180
+ "sub_type": "ref_text",
1181
+ "list_items": [
1182
+ "[24] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. 2, 4, 7",
1183
+ "[25] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 3",
1184
+ "[26] Shaolei Zhang, Qingkai Fang, Zhe Yang, and Yang Feng. Llava-mini: Efficient image and video large multimodal models with one vision token. arXiv preprint arXiv:2501.03895, 2025. 5",
1185
+ "[27] Xingjian Zhang, Xi Weng, Yihao Yue, Zhaoxin Fan, Wenjun Wu, and Lei Huang. Tinyllava-video: A simple framework of small-scale large multimodal models for video understanding. arXiv preprint arXiv:2501.15513, 2025. 1, 2, 3",
1186
+ "[28] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024. 4",
1187
+ "[29] Yilun Zhao, Lujing Xie, Haowei Zhang, Guo Gan, Yitao Long, Zhiyuan Hu, Tongyan Hu, Weiyuan Chen, Chuhan Li, Junyang Song, et al. Mmvu: Measuring expert-level multi-discipline video understanding. arXiv preprint arXiv:2501.12380, 2025. 6",
1188
+ "[30] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. 1, 2, 3",
1189
+ "[31] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 6"
1190
+ ],
1191
+ "bbox": [
1192
+ 171,
1193
+ 90,
1194
+ 825,
1195
+ 428
1196
+ ],
1197
+ "page_idx": 9
1198
+ },
1199
+ {
1200
+ "type": "page_number",
1201
+ "text": "10",
1202
+ "bbox": [
1203
+ 490,
1204
+ 935,
1205
+ 508,
1206
+ 946
1207
+ ],
1208
+ "page_idx": 9
1209
+ }
1210
+ ]
data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_model.json ADDED
@@ -0,0 +1,1584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.263,
8
+ 0.061,
9
+ 0.709
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2504.09641v1 [cs.CV] 13 Apr 2025"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.187,
18
+ 0.122,
19
+ 0.817,
20
+ 0.175
21
+ ],
22
+ "angle": 0,
23
+ "content": "TinyLLaVA-Video-R1: Towards Smaller LMMs for Video Reasoning"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.246,
29
+ 0.224,
30
+ 0.768,
31
+ 0.243
32
+ ],
33
+ "angle": 0,
34
+ "content": "Xingjian Zhang\\(^{1,*}\\) Siwei Wen\\(^{1,2,*}\\) Wenjun Wu\\(^{1,2,3}\\) Lei Huang\\(^{1,2,3,\\boxtimes}\\)"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.186,
40
+ 0.254,
41
+ 0.812,
42
+ 0.298
43
+ ],
44
+ "angle": 0,
45
+ "content": "\\(^{1}\\)SKLCCSE, Institute of Artificial Intelligence, Beihang University, Beijing, China \n\\(^{2}\\)Beijing Advanced Innovation Center for Future Blockchain and Privacy Computing, Beihang University \n\\(^{3}\\)Hangzhou International Innovation Institute, Beihang University, Hangzhou, China"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.394,
51
+ 0.309,
52
+ 0.605,
53
+ 0.326
54
+ ],
55
+ "angle": 0,
56
+ "content": "{huangleiai}@buaa.edu.cn"
57
+ },
58
+ {
59
+ "type": "title",
60
+ "bbox": [
61
+ 0.46,
62
+ 0.36,
63
+ 0.542,
64
+ 0.377
65
+ ],
66
+ "angle": 0,
67
+ "content": "Abstract"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.23,
73
+ 0.393,
74
+ 0.77,
75
+ 0.617
76
+ ],
77
+ "angle": 0,
78
+ "content": "Recently, improving the reasoning ability of large multimodal models (LMMs) through reinforcement learning has made great progress. However, most existing works are based on highly reasoning-intensive datasets such as mathematics and code, and researchers generally choose large-scale models as the foundation. We argue that exploring small-scale models' reasoning capabilities remains valuable for researchers with limited computational resources. Moreover, enabling models to explain their reasoning processes on general question-answering datasets is equally meaningful. Therefore, we present the small-scale video reasoning model TinyLLaVA-Video-R1. Based on TinyLLaVA-Video [27], a traceably trained video understanding model with no more than 4B parameters, it not only demonstrates significantly improved reasoning and thinking capabilities after using reinforcement learning on general Video-QA datasets, but also exhibits the emergent characteristic of \"aha moments\". Furthermore, we share a series of experimental findings, aiming to provide practical insights for future exploration of video reasoning (thinking) abilities in small-scale models. It is available at https://github.com/ZhangXJ199/TinyLLaVA-Video-R1."
79
+ },
80
+ {
81
+ "type": "title",
82
+ "bbox": [
83
+ 0.172,
84
+ 0.646,
85
+ 0.316,
86
+ 0.663
87
+ ],
88
+ "angle": 0,
89
+ "content": "1 Introduction"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.17,
95
+ 0.678,
96
+ 0.827,
97
+ 0.779
98
+ ],
99
+ "angle": 0,
100
+ "content": "Since DeepSeek-R1 [6] demonstrated that pure reinforcement learning can significantly enhance a model's reasoning capabilities, many subsequent works [1, 7, 15, 30, 16] have also explored improving the reasoning abilities of multimodal models, achieving notable progress. Most of these efforts focus on extending reasoning capabilities to the image modality [15, 1], conducting research using strong reasoning data such as math-image pairs and spatial reasoning [16, 7, 30], or task-specific data like grounding [14, 18]. However, existing research on video reasoning models has not made significant progress due to the scarcity of highly reasoning-intensive data."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.17,
106
+ 0.782,
107
+ 0.829,
108
+ 0.882
109
+ ],
110
+ "angle": 0,
111
+ "content": "Open-R1-Video [21] is the first to introduce reasoning into the video domain, however, its performance on general Video-QA datasets is unsatisfactory, with benchmark results even showing a decline. Subsequently, Video-R1 [3] successfully integrates strong reasoning image-text pairs for video reasoning, achieving remarkable performance, but they make preliminary attempts and argue that small-scale models cannot produce effective reasoning processes. However, the high computational cost of large-scale models remains a significant barrier for many researchers with limited resources. Therefore, exploring the reasoning capabilities of smaller models is still necessary."
112
+ },
113
+ {
114
+ "type": "page_footnote",
115
+ "bbox": [
116
+ 0.191,
117
+ 0.894,
118
+ 0.566,
119
+ 0.909
120
+ ],
121
+ "angle": 0,
122
+ "content": "\\*denotes equal contributor; \\(\\boxtimes\\) denotes corresponding author."
123
+ }
124
+ ],
125
+ [
126
+ {
127
+ "type": "image",
128
+ "bbox": [
129
+ 0.187,
130
+ 0.1,
131
+ 0.812,
132
+ 0.179
133
+ ],
134
+ "angle": 0,
135
+ "content": null
136
+ },
137
+ {
138
+ "type": "image",
139
+ "bbox": [
140
+ 0.183,
141
+ 0.196,
142
+ 0.212,
143
+ 0.217
144
+ ],
145
+ "angle": 0,
146
+ "content": null
147
+ },
148
+ {
149
+ "type": "text",
150
+ "bbox": [
151
+ 0.214,
152
+ 0.192,
153
+ 0.792,
154
+ 0.221
155
+ ],
156
+ "angle": 0,
157
+ "content": "Question: What is the action performed by the person in the video? A. drop. B. sit down. C. pick up. D.squat down. Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags."
158
+ },
159
+ {
160
+ "type": "title",
161
+ "bbox": [
162
+ 0.19,
163
+ 0.233,
164
+ 0.316,
165
+ 0.242
166
+ ],
167
+ "angle": 0,
168
+ "content": "Owen2-VL-7B-Instruct:"
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.184,
174
+ 0.242,
175
+ 0.794,
176
+ 0.27
177
+ ],
178
+ "angle": 0,
179
+ "content": "<think> The person in the video is seen bending down towards the floor. Given the context of the room and the action being performed, it is reasonable to infer that the person is picking something up from the floor. </think> <answer> C. pick up </answer>"
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.192,
185
+ 0.281,
186
+ 0.385,
187
+ 0.293
188
+ ],
189
+ "angle": 0,
190
+ "content": "TinyLLaVA-Video-SFT: C. pick up."
191
+ },
192
+ {
193
+ "type": "title",
194
+ "bbox": [
195
+ 0.191,
196
+ 0.306,
197
+ 0.304,
198
+ 0.314
199
+ ],
200
+ "angle": 0,
201
+ "content": "TinyLLaVA-Video-R1:"
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.184,
207
+ 0.315,
208
+ 0.8,
209
+ 0.414
210
+ ],
211
+ "angle": 0,
212
+ "content": "<think> Let's start by analyzing the video. The video takes place in a room with white curtains, a green chair, a white table, and a white cabinet. The person is wearing a light green shirt, dark pants, and black shoes. The video begins with the person standing in the room, and then they start to move, bending down and picking up a colorful book or magazine from the floor. The video ends with the person standing up and holding the book or magazine. The question asks what the person is doing in the video. Let's look at the options: A. drop, B. sit down, C. pick up, D. squat down. Option A is incorrect because the person does not drop anything. Option B is incorrect because the person does not sit down. Option D is incorrect because the person does not squat down. Option C is correct because the person is picking up a colorful book or magazine from the floor. Let's confirm this by looking at the video again. The person is seen bending down and picking up a colorful book or magazine from the floor, and then standing up holding it. Therefore, the correct answer is C. </think> <answer> C </answer>"
213
+ },
214
+ {
215
+ "type": "image_caption",
216
+ "bbox": [
217
+ 0.171,
218
+ 0.435,
219
+ 0.825,
220
+ 0.478
221
+ ],
222
+ "angle": 0,
223
+ "content": "Figure 1: A case of TinyLLaVA-Video-R1 on video understanding data, sourced from MVBench. The model demonstrates the ability to perceive video scenes and analyze options, while also exhibiting reflective and backtracking behavior (highlighted in blue)."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.171,
229
+ 0.5,
230
+ 0.825,
231
+ 0.598
232
+ ],
233
+ "angle": 0,
234
+ "content": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, based on the traceably trained model TinyLLaVA-Video [27]. After reinforcement learning on general VideoQA datasets, the model not only significantly improves its reasoning and thinking abilities, but also exhibits the emergent characteristic of \"aha moments\", which is more meaningful than simply generating answers through perception. Moreover, through extensive experiments under various configurations, we have obtained a series of insightful findings. We believe these discoveries will provide valuable guidance for future exploration of video reasoning capabilities in small-scale models."
235
+ },
236
+ {
237
+ "type": "title",
238
+ "bbox": [
239
+ 0.172,
240
+ 0.622,
241
+ 0.323,
242
+ 0.637
243
+ ],
244
+ "angle": 0,
245
+ "content": "2 Related Work"
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.171,
251
+ 0.656,
252
+ 0.827,
253
+ 0.795
254
+ ],
255
+ "angle": 0,
256
+ "content": "Large Language Reasoning Models. Recently, Kimi K1.5 [19] and DeepSeek-R1 [6] have gained widespread attention for their significant improvements in model performance on reasoning tasks through using reinforcement learning. Unlike approaches relying on process-supervised reward models [5], the Group Relative Policy Optimization (GRPO) algorithm [17] relying on rule-based rewards not only substantially reduces computational costs but has also sparked a new wave of interest due to intriguing phenomena such as \"aha moments\" observed during training. Under the influence of this research paradigm, numerous follow-up studies have advanced along this technical path, some have successfully replicated the method and extended it to multimodal domains [1, 7, 30], achieving notable progress, while others [13, 24] have focused on algorithmic optimization to further enhance the reasoning efficiency and performance of models."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.171,
262
+ 0.815,
263
+ 0.827,
264
+ 0.913
265
+ ],
266
+ "angle": 0,
267
+ "content": "Multimodal Reasoning Models. With the remarkable advancement in reasoning capabilities of language models, an increasing number of studies have attempted to extend them to multimodal domains. LMM-R1 [16] proposes a two-stage training strategy to enhance the reasoning performance of multimodal models; Vision-R1 [7] attempts to address the post-cold-start overthinking issue in multimodal models; Video-R1 [3] develops T-GRPO to further enhance model's video comprehension. While these studies have made notable progress, most tend to adopt base models with 7B or more parameters to ensure superior reasoning effectiveness and robust performance."
268
+ },
269
+ {
270
+ "type": "page_number",
271
+ "bbox": [
272
+ 0.494,
273
+ 0.936,
274
+ 0.505,
275
+ 0.948
276
+ ],
277
+ "angle": 0,
278
+ "content": "2"
279
+ }
280
+ ],
281
+ [
282
+ {
283
+ "type": "image",
284
+ "bbox": [
285
+ 0.187,
286
+ 0.1,
287
+ 0.81,
288
+ 0.182
289
+ ],
290
+ "angle": 0,
291
+ "content": null
292
+ },
293
+ {
294
+ "type": "image",
295
+ "bbox": [
296
+ 0.184,
297
+ 0.197,
298
+ 0.212,
299
+ 0.218
300
+ ],
301
+ "angle": 0,
302
+ "content": null
303
+ },
304
+ {
305
+ "type": "text",
306
+ "bbox": [
307
+ 0.214,
308
+ 0.193,
309
+ 0.813,
310
+ 0.222
311
+ ],
312
+ "angle": 0,
313
+ "content": "Question: What is the treatment used in the video? A. Bone grafting. B. Hormone therapy. C. Chemotherapy. D. Photodynamic therapy. Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags."
314
+ },
315
+ {
316
+ "type": "title",
317
+ "bbox": [
318
+ 0.19,
319
+ 0.234,
320
+ 0.316,
321
+ 0.243
322
+ ],
323
+ "angle": 0,
324
+ "content": "Owen2-VL-7B-Instruct:"
325
+ },
326
+ {
327
+ "type": "text",
328
+ "bbox": [
329
+ 0.184,
330
+ 0.243,
331
+ 0.81,
332
+ 0.271
333
+ ],
334
+ "angle": 0,
335
+ "content": "<think> The video shows a medical procedure involving bone chips and fibrin glue, which are used to repair a damaged knee joint. This procedure is known as autologous bone grafting (ABG). Therefore, the treatment used in the video is bone grafting. </think> <answer> A. Bone grafting. </answer>"
336
+ },
337
+ {
338
+ "type": "title",
339
+ "bbox": [
340
+ 0.192,
341
+ 0.282,
342
+ 0.333,
343
+ 0.293
344
+ ],
345
+ "angle": 0,
346
+ "content": "TinyLLaVA-Video-SFT: A."
347
+ },
348
+ {
349
+ "type": "title",
350
+ "bbox": [
351
+ 0.191,
352
+ 0.307,
353
+ 0.304,
354
+ 0.315
355
+ ],
356
+ "angle": 0,
357
+ "content": "TinyLLaVA-Video-R1:"
358
+ },
359
+ {
360
+ "type": "text",
361
+ "bbox": [
362
+ 0.183,
363
+ 0.315,
364
+ 0.81,
365
+ 0.414
366
+ ],
367
+ "angle": 0,
368
+ "content": "<think> To determine the treatment for osteochondral lesions, I will first review the types of osteochondral lesions to focus on the focus. The different types of osteochondral lesions include superficial and severe articular cartilage injuries with or without bone fragmentation. After defining the type of osteochondral lesion, the next step is to identify the treatment for it. I will first consider the available options, such as bone grafting (A), hormone therapy (B), chemotherapy (C), photodynamic therapy (D), and radiation therapy (E). Looking at the video, I can see the focus on a patient with a severe articular cartilage injury and bone fragmentation. Based on our knowledge of cartilage and bone diseases, I will select the treatment A, which is bone grafting. The video will show a comparison between normal and severely damaged cartilage, illustrating the treatment of severe articular cartilage injury with bone grafting using a surgical tool to fill the gap. Bone grafting is the correct treatment for this type of osteochondral lesion. Therefore, I will select option A. </think> <answer> A </answer>"
369
+ },
370
+ {
371
+ "type": "image_caption",
372
+ "bbox": [
373
+ 0.171,
374
+ 0.436,
375
+ 0.825,
376
+ 0.478
377
+ ],
378
+ "angle": 0,
379
+ "content": "Figure 2: A case of TinyLLaVA-Video-R1 on video reasoning data, sourced from MMVU. The model demonstrates comprehensive video content understanding and the capability to derive correct answers through analytical reasoning."
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.171,
385
+ 0.491,
386
+ 0.825,
387
+ 0.588
388
+ ],
389
+ "angle": 0,
390
+ "content": "Although some preliminary work has explored small-scale multimodal models [16, 1, 30], these investigations have primarily focused on the image modality, leaving research on video reasoning significantly underdeveloped. Due to the scarcity of high-quality video reasoning datasets and the inherent capability limitations of models, small-scale video reasoning models face multiple challenges including limited response length, difficulties in presenting coherent reasoning chains, and constrained overall performance improvement [3]. These critical challenges demand thorough investigation and substantive technical breakthroughs."
391
+ },
392
+ {
393
+ "type": "title",
394
+ "bbox": [
395
+ 0.172,
396
+ 0.603,
397
+ 0.28,
398
+ 0.618
399
+ ],
400
+ "angle": 0,
401
+ "content": "3 Methods"
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.171,
407
+ 0.63,
408
+ 0.825,
409
+ 0.698
410
+ ],
411
+ "angle": 0,
412
+ "content": "To explore the video reasoning capabilities of small-scale models, we conduct experiments on TinyLLaVA-Video [27]. We utilize the GRPO algorithm on the general Video-QA dataset NextQA and made specific modifications to the reward rules: adding a continuous length reward to the format reward and introducing penalties for incorrect answers. The experimental results in Section 4 demonstrate the effectiveness of these modifications."
413
+ },
414
+ {
415
+ "type": "title",
416
+ "bbox": [
417
+ 0.172,
418
+ 0.71,
419
+ 0.343,
420
+ 0.725
421
+ ],
422
+ "angle": 0,
423
+ "content": "3.1 TinyLLaVA-Video"
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.171,
429
+ 0.735,
430
+ 0.825,
431
+ 0.848
432
+ ],
433
+ "angle": 0,
434
+ "content": "TinyLLaVA-Video is a fully open-source small-scale video understanding model that employs Qwen2.5-3B [8] as its language model and SigLIP [25] as its visual encoder. It delivers competitive performance across multiple benchmarks. Crucially, its training data are fully open-sourced, and the entire training process remains traceable. This effectively prevents the repeated use of identical data across different training phases, thereby avoiding the introduction of uncontrolled variables and ensuring more reliable experimental results and conclusions. Such reproducibility and controllability represent a distinct advantage over models that only release weights, making TinyLLaVA-Video an ideal foundational model for our experiments on investigating video reasoning."
435
+ },
436
+ {
437
+ "type": "title",
438
+ "bbox": [
439
+ 0.172,
440
+ 0.858,
441
+ 0.526,
442
+ 0.873
443
+ ],
444
+ "angle": 0,
445
+ "content": "3.2 Group Relative Policy Optimization (GRPO)"
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.171,
451
+ 0.884,
452
+ 0.825,
453
+ 0.913
454
+ ],
455
+ "angle": 0,
456
+ "content": "We follow the GRPO algorithm [17] to train the model. For each question \\( \\mathbf{q} \\), the policy model generates a set of candidate responses \\( \\{O_1, O_2, \\dots, O_G\\} \\), computes the corresponding rewards"
457
+ },
458
+ {
459
+ "type": "page_number",
460
+ "bbox": [
461
+ 0.494,
462
+ 0.936,
463
+ 0.504,
464
+ 0.948
465
+ ],
466
+ "angle": 0,
467
+ "content": "3"
468
+ }
469
+ ],
470
+ [
471
+ {
472
+ "type": "image",
473
+ "bbox": [
474
+ 0.177,
475
+ 0.09,
476
+ 0.824,
477
+ 0.393
478
+ ],
479
+ "angle": 0,
480
+ "content": null
481
+ },
482
+ {
483
+ "type": "image_caption",
484
+ "bbox": [
485
+ 0.171,
486
+ 0.401,
487
+ 0.825,
488
+ 0.43
489
+ ],
490
+ "angle": 0,
491
+ "content": "Figure 3: Cases of \"aha moment\", where the model demonstrates reflection and backtracking during its reasoning process (highlighted in blue). The cases are from MVBench and MMVU respectively."
492
+ },
493
+ {
494
+ "type": "text",
495
+ "bbox": [
496
+ 0.171,
497
+ 0.45,
498
+ 0.825,
499
+ 0.495
500
+ ],
501
+ "angle": 0,
502
+ "content": "\\(\\{r_1, r_2, \\ldots, r_G\\}\\) based on the reward rules. And then these rewards are normalized to calculate the advantage for each response. Subsequently, the model is optimized through maximization of the following objective function:"
503
+ },
504
+ {
505
+ "type": "equation",
506
+ "bbox": [
507
+ 0.198,
508
+ 0.504,
509
+ 0.825,
510
+ 0.533
511
+ ],
512
+ "angle": 0,
513
+ "content": "\\[\nJ _ {G R P O} (\\theta) = \\mathbb {E} _ {[ q, \\{o _ {i} \\} ]} \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\left\\{\\min \\left[ \\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}} A _ {i}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i} \\right] - \\beta \\mathbb {D} _ {K L} [ \\pi_ {\\theta} \\| \\pi_ {r e f} ] \\right\\} \\tag {1}\n\\]"
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.171,
519
+ 0.542,
520
+ 0.825,
521
+ 0.571
522
+ ],
523
+ "angle": 0,
524
+ "content": "where \\(\\pi_{\\theta}\\) and \\(\\pi_{\\theta_{old}}\\) are the current and old policy, \\(\\epsilon\\) and \\(\\beta\\) are hyper-parameters, and \\(A_{i}\\) is the advantages defined as:"
525
+ },
526
+ {
527
+ "type": "equation",
528
+ "bbox": [
529
+ 0.376,
530
+ 0.575,
531
+ 0.825,
532
+ 0.609
533
+ ],
534
+ "angle": 0,
535
+ "content": "\\[\nA _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}. \\tag {2}\n\\]"
536
+ },
537
+ {
538
+ "type": "text",
539
+ "bbox": [
540
+ 0.171,
541
+ 0.622,
542
+ 0.825,
543
+ 0.707
544
+ ],
545
+ "angle": 0,
546
+ "content": "In addition, our experimental observations reveal an issue analogous to DAPO [24]: when all responses in a set \\(\\{O_i\\}\\) are correct and given equal rewards, their computed advantages vanish to zero. This phenomenon affects policy updates and diminishes sample efficiency. To maximize the utility of each sample, we introduce an additional gaussian noise \\(\\mathcal{N}(0, 0.02^2)\\) to the advantages. Although the noise induces only minor perturbations, it ensures intra-group advantage diversity across responses."
547
+ },
548
+ {
549
+ "type": "title",
550
+ "bbox": [
551
+ 0.172,
552
+ 0.726,
553
+ 0.416,
554
+ 0.741
555
+ ],
556
+ "angle": 0,
557
+ "content": "3.3 Training Data and Template."
558
+ },
559
+ {
560
+ "type": "text",
561
+ "bbox": [
562
+ 0.171,
563
+ 0.753,
564
+ 0.825,
565
+ 0.852
566
+ ],
567
+ "angle": 0,
568
+ "content": "We select multiple choice questions from the NextQA [23] subset of LLaVA-Video-178K [28] as training data. To maintain manageable training time with limited computational resources, we only choose the subset of data with a duration of 0 to 30 seconds, which contains 5,496 samples. It is a weak reasoning dataset, where the questions are more perception-oriented and exhibit weaker logical reasoning. However, we hypothesize that the model's reasoning abilities are likely predominantly derived from reinforcement learning, and we still aim to guide it to demonstrate its thought process by articulating the reasoning behind its choices, rather than merely providing an answer."
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.171,
574
+ 0.856,
575
+ 0.825,
576
+ 0.913
577
+ ],
578
+ "angle": 0,
579
+ "content": "During training, for each input question, in addition to the system template, we append the following prompt at the end: Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags. Moreover, when computing rewards for responses, we strictly enforce the model to adhere to this format."
580
+ },
581
+ {
582
+ "type": "page_number",
583
+ "bbox": [
584
+ 0.494,
585
+ 0.936,
586
+ 0.505,
587
+ 0.948
588
+ ],
589
+ "angle": 0,
590
+ "content": "4"
591
+ }
592
+ ],
593
+ [
594
+ {
595
+ "type": "table",
596
+ "bbox": [
597
+ 0.174,
598
+ 0.089,
599
+ 0.825,
600
+ 0.282
601
+ ],
602
+ "angle": 0,
603
+ "content": "<table><tr><td>Model</td><td>LLM size</td><td>Answer Type</td><td>MVBench</td><td>Video-MME (wo sub)</td><td>MLVU</td><td>MMVU (mc)</td></tr><tr><td>LLaMA-VID [10]</td><td>7B</td><td>Option</td><td>41.4</td><td>-</td><td>33.2</td><td>-</td></tr><tr><td>LLaVA-NeXT [12]</td><td>7B</td><td>Option</td><td>-</td><td>-</td><td>39.3</td><td>29.2</td></tr><tr><td>VideoLLaVA [11]</td><td>7B</td><td>Option</td><td>-</td><td>39.9</td><td>47.3</td><td>-</td></tr><tr><td>ShareGPT4Video [2]</td><td>8B</td><td>Option</td><td>-</td><td>39.9</td><td>46.4</td><td>-</td></tr><tr><td>LLaVA-Mini [26]</td><td>7B</td><td>Option</td><td>44.5</td><td>-</td><td>42.8</td><td>-</td></tr><tr><td>InternVideo2 [22]</td><td>8B</td><td>Option</td><td>-</td><td>41.9</td><td>-</td><td>39.0</td></tr><tr><td>TinyLLaVA-Video-SFT</td><td>3B</td><td>Option</td><td>49.0</td><td>42.2</td><td>49.2</td><td>46.1</td></tr><tr><td>TinyLLaVA-Video-ColdStart</td><td>3B</td><td>Reason</td><td>33.2</td><td>26.6</td><td>28.6</td><td>22.7</td></tr><tr><td>TinyLLaVA-Video-R1</td><td>3B</td><td>Reason</td><td>49.5</td><td>46.6</td><td>52.4</td><td>46.9</td></tr></table>"
604
+ },
605
+ {
606
+ "type": "table_caption",
607
+ "bbox": [
608
+ 0.171,
609
+ 0.291,
610
+ 0.825,
611
+ 0.362
612
+ ],
613
+ "angle": 0,
614
+ "content": "Table 1: The performance of TinyLLaVA-Video-R1 on multiple benchmarks. \"Option\" indicates that the model only needs to answer with the selected choice, while \"Reason\" means the model must output both the answer and the reasoning process according to the format requirements. Here, MMVU is categorized as a video reasoning benchmark, the remaining benchmarks are designed for general-purpose video evaluation. The best results are indicated by boldface."
615
+ },
616
+ {
617
+ "type": "title",
618
+ "bbox": [
619
+ 0.172,
620
+ 0.383,
621
+ 0.316,
622
+ 0.397
623
+ ],
624
+ "angle": 0,
625
+ "content": "3.4 Reward Rules."
626
+ },
627
+ {
628
+ "type": "text",
629
+ "bbox": [
630
+ 0.171,
631
+ 0.411,
632
+ 0.825,
633
+ 0.44
634
+ ],
635
+ "angle": 0,
636
+ "content": "We also avoid using a reward model and define reward rules based on the format and accuracy of the responses as follows:"
637
+ },
638
+ {
639
+ "type": "text",
640
+ "bbox": [
641
+ 0.171,
642
+ 0.459,
643
+ 0.825,
644
+ 0.53
645
+ ],
646
+ "angle": 0,
647
+ "content": "Format reward. We require the thought process to be enclosed within \\( \\langle \\text{think} \\rangle \\langle \\text{/think} \\rangle \\), and the final answer to be enclosed within \\( \\langle \\text{answer} \\rangle \\langle \\text{/answer} \\rangle \\). These four tags can appear only once in the entire response, and if followed, the model will receive a format reward \\( FR = r_0 + LR \\). Here, \\( r_0 \\) represents the base reward for adhering to the required response format, and \\( LR \\) is the continuous length reward designed to encourage the model to generate longer outputs, calculated as:"
648
+ },
649
+ {
650
+ "type": "equation",
651
+ "bbox": [
652
+ 0.402,
653
+ 0.541,
654
+ 0.825,
655
+ 0.575
656
+ ],
657
+ "angle": 0,
658
+ "content": "\\[\nL R = \\min \\left(1, \\frac {L e n}{M L}\\right) \\times r _ {1}. \\tag {3}\n\\]"
659
+ },
660
+ {
661
+ "type": "text",
662
+ "bbox": [
663
+ 0.171,
664
+ 0.586,
665
+ 0.825,
666
+ 0.629
667
+ ],
668
+ "angle": 0,
669
+ "content": "Here, \\( Len \\) represents the length of the response extracted from within the \\( <\\text{think}> \\)/\\( <\\text{think}> \\) tags, and \\( ML \\) represents the maximum length corresponding to the upper limit of the reward. In our experiments, we set \\( r_0 = r_1 = 0.5 \\), thus the format reward is limited to a maximum of 1."
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.171,
675
+ 0.648,
676
+ 0.825,
677
+ 0.733
678
+ ],
679
+ "angle": 0,
680
+ "content": "Accuracy reward. We design the accuracy reward \\( AR \\) based on the answer. We extract the final answer from <answer></answer> and compare it with the label. The model will receive an accuracy reward of \\( AR = r_2 > 0 \\), if the answer is correct. Responses with either format errors preventing answer extraction or incorrect answers will result in zero accuracy reward, i.e. \\( AR = 0 \\). To ensure that the accuracy reward and the format reward have equal importance, we set \\( r_2 = r_0 + r_1 \\) in our experiments."
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.171,
686
+ 0.751,
687
+ 0.825,
688
+ 0.809
689
+ ],
690
+ "angle": 0,
691
+ "content": "To encourage the model to increase the response length only when answering correctly, rather than arbitrarily increasing the length at the cost of accuracy, we deviate from most existing approaches that simply define the total reward as the sum of format reward and accuracy reward. Instead, we introduce a penalty for incorrect answers, with the total reward \\( R \\) defined by the following formula:"
692
+ },
693
+ {
694
+ "type": "equation",
695
+ "bbox": [
696
+ 0.324,
697
+ 0.821,
698
+ 0.825,
699
+ 0.872
700
+ ],
701
+ "angle": 0,
702
+ "content": "\\[\nR = \\left\\{ \\begin{array}{l l} A R + F R, & \\text {i f} F R > 0 \\text {a n d} A R = r _ {2} \\\\ - F R, & \\text {i f} F R > 0 \\text {a n d} A R = 0 \\\\ - \\left(r _ {0} + r _ {1} + r _ {2}\\right), & \\text {i f} F R = 0 \\end{array} \\right. \\tag {4}\n\\]"
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.171,
708
+ 0.884,
709
+ 0.825,
710
+ 0.913
711
+ ],
712
+ "angle": 0,
713
+ "content": "When the model's answer is correct, the longer the reasoning process, the higher the reward. In contrast, if the answer is incorrect, the longer the reasoning process, the higher the penalty incurred."
714
+ },
715
+ {
716
+ "type": "page_number",
717
+ "bbox": [
718
+ 0.494,
719
+ 0.936,
720
+ 0.504,
721
+ 0.948
722
+ ],
723
+ "angle": 0,
724
+ "content": "5"
725
+ }
726
+ ],
727
+ [
728
+ {
729
+ "type": "title",
730
+ "bbox": [
731
+ 0.172,
732
+ 0.09,
733
+ 0.314,
734
+ 0.108
735
+ ],
736
+ "angle": 0,
737
+ "content": "4 Experiments"
738
+ },
739
+ {
740
+ "type": "title",
741
+ "bbox": [
742
+ 0.172,
743
+ 0.127,
744
+ 0.37,
745
+ 0.143
746
+ ],
747
+ "angle": 0,
748
+ "content": "4.1 Experimental Settings"
749
+ },
750
+ {
751
+ "type": "text",
752
+ "bbox": [
753
+ 0.171,
754
+ 0.155,
755
+ 0.825,
756
+ 0.199
757
+ ],
758
+ "angle": 0,
759
+ "content": "We conduct experiments on 8 NVIDIA A100-40G GPUs. During training, we keep the vision encoder frozen and update the connector and language model. We set the learning rate at 1e-6 for stable training."
760
+ },
761
+ {
762
+ "type": "text",
763
+ "bbox": [
764
+ 0.171,
765
+ 0.204,
766
+ 0.825,
767
+ 0.26
768
+ ],
769
+ "angle": 0,
770
+ "content": "To facilitate rapid adaptation to reasoning format and ensure training stability, we first finetune the model using 16 human-annotated cold-start samples, resulting in TinyLLaVA-Video-ColdStart. We then adopt it as the base model for reinforcement learning and train on 5,496 NextQA data for one epoch to obtain TinyLLaVA-Video-R1."
771
+ },
772
+ {
773
+ "type": "text",
774
+ "bbox": [
775
+ 0.171,
776
+ 0.266,
777
+ 0.826,
778
+ 0.323
779
+ ],
780
+ "angle": 0,
781
+ "content": "For evaluation, we select four commonly used video understanding and reasoning benchmarks: MVBench [9], VideoMME[4], MLVU [31], and MMVU [29]. These benchmarks encompass videos from multiple disciplines and domains, with a wide range of durations, enabling a comprehensive assessment of the model's capabilities."
782
+ },
783
+ {
784
+ "type": "title",
785
+ "bbox": [
786
+ 0.172,
787
+ 0.348,
788
+ 0.434,
789
+ 0.362
790
+ ],
791
+ "angle": 0,
792
+ "content": "4.2 Main Results and Aha Moment"
793
+ },
794
+ {
795
+ "type": "text",
796
+ "bbox": [
797
+ 0.171,
798
+ 0.377,
799
+ 0.825,
800
+ 0.462
801
+ ],
802
+ "angle": 0,
803
+ "content": "As shown in Figure 4, during training, both the response length and rewards demonstrate stable growth. As presented in Table 1, compared to TinyLLaVA-Video-SFT, which is trained on the same dataset using supervised learning, TinyLLaVA-Video-R1 shows superior performance across multiple benchmarks. Additionally, compared to the base model TinyLLaVA-Video-ColdStart, TinyLLaVA-Video-R1 not only adheres to the required response format but also demonstrates improved reasoning capabilities."
804
+ },
805
+ {
806
+ "type": "image",
807
+ "bbox": [
808
+ 0.174,
809
+ 0.482,
810
+ 0.382,
811
+ 0.61
812
+ ],
813
+ "angle": 0,
814
+ "content": null
815
+ },
816
+ {
817
+ "type": "image_caption",
818
+ "bbox": [
819
+ 0.174,
820
+ 0.617,
821
+ 0.382,
822
+ 0.631
823
+ ],
824
+ "angle": 0,
825
+ "content": "(a) Evolution in completion length."
826
+ },
827
+ {
828
+ "type": "image",
829
+ "bbox": [
830
+ 0.396,
831
+ 0.482,
832
+ 0.603,
833
+ 0.61
834
+ ],
835
+ "angle": 0,
836
+ "content": null
837
+ },
838
+ {
839
+ "type": "image_caption",
840
+ "bbox": [
841
+ 0.4,
842
+ 0.617,
843
+ 0.598,
844
+ 0.631
845
+ ],
846
+ "angle": 0,
847
+ "content": "(b) Evolution in accuracy reward."
848
+ },
849
+ {
850
+ "type": "image",
851
+ "bbox": [
852
+ 0.615,
853
+ 0.482,
854
+ 0.822,
855
+ 0.61
856
+ ],
857
+ "angle": 0,
858
+ "content": null
859
+ },
860
+ {
861
+ "type": "image_caption",
862
+ "bbox": [
863
+ 0.627,
864
+ 0.617,
865
+ 0.812,
866
+ 0.631
867
+ ],
868
+ "angle": 0,
869
+ "content": "(c) Evolution in format reward."
870
+ },
871
+ {
872
+ "type": "image_caption",
873
+ "bbox": [
874
+ 0.171,
875
+ 0.639,
876
+ 0.825,
877
+ 0.681
878
+ ],
879
+ "angle": 0,
880
+ "content": "Figure 4: Evolution in key metrics during the training of TinyLLaVA-Video-R1. Under our reward rule settings, both the response length and rewards of TinyLLaVA-Video-R1 gradually increased during training."
881
+ },
882
+ {
883
+ "type": "text",
884
+ "bbox": [
885
+ 0.171,
886
+ 0.705,
887
+ 0.825,
888
+ 0.788
889
+ ],
890
+ "angle": 0,
891
+ "content": "As shown in Figures 1 and 2, we further illustrate the model's reasoning ability. The model can comprehend and analyze video content, evaluate each option step-by-step, and ultimately provide an answer. Compared to models that only output answers without reasoning, TinyLLaVA-Video-R1 generates meaningful thought processes, making its responses more interpretable and valuable. This represents a significant advantage of video reasoning models over conventional video understanding models."
892
+ },
893
+ {
894
+ "type": "text",
895
+ "bbox": [
896
+ 0.171,
897
+ 0.794,
898
+ 0.825,
899
+ 0.865
900
+ ],
901
+ "angle": 0,
902
+ "content": "Similar to other works that use reinforcement learning to enhance model reasoning capabilities, we also reproduce the \"aha moment\" in TinyLLaVA-Video-R1, where the model exhibits emergent behaviors such as self-verification during its reasoning process. Our experimental results confirm that even when trained with weakly-reasoned general video data through reinforcement learning, the smaller model can still demonstrate retrospection and reflection."
903
+ },
904
+ {
905
+ "type": "text",
906
+ "bbox": [
907
+ 0.171,
908
+ 0.87,
909
+ 0.825,
910
+ 0.914
911
+ ],
912
+ "angle": 0,
913
+ "content": "As highlighted in the blue annotations in Figures 1 and 3, the model revisits and verifies its initial reasoning after completing a round of thought. This behavior indicates that the model does not merely perform perception but also engages in continuous thinking and self-checking."
914
+ },
915
+ {
916
+ "type": "page_number",
917
+ "bbox": [
918
+ 0.494,
919
+ 0.937,
920
+ 0.505,
921
+ 0.948
922
+ ],
923
+ "angle": 0,
924
+ "content": "6"
925
+ }
926
+ ],
927
+ [
928
+ {
929
+ "type": "title",
930
+ "bbox": [
931
+ 0.172,
932
+ 0.092,
933
+ 0.32,
934
+ 0.106
935
+ ],
936
+ "angle": 0,
937
+ "content": "4.3 Ablation Study"
938
+ },
939
+ {
940
+ "type": "text",
941
+ "bbox": [
942
+ 0.171,
943
+ 0.117,
944
+ 0.825,
945
+ 0.147
946
+ ],
947
+ "angle": 0,
948
+ "content": "In this section, we present ablation studies on methods and key experimental findings that contribute significantly to the performance enhancement of TinyLLaVA-Video-R1."
949
+ },
950
+ {
951
+ "type": "title",
952
+ "bbox": [
953
+ 0.171,
954
+ 0.16,
955
+ 0.411,
956
+ 0.175
957
+ ],
958
+ "angle": 0,
959
+ "content": "4.3.1 Impact of Cold-Start Data"
960
+ },
961
+ {
962
+ "type": "text",
963
+ "bbox": [
964
+ 0.171,
965
+ 0.184,
966
+ 0.825,
967
+ 0.268
968
+ ],
969
+ "angle": 0,
970
+ "content": "Due to the limitations of language models, when we directly use TinyLLaVA-Video as the base model without length reward, we find that as training progresses, the model has a certain probability of learning to 'take shortcuts'. While adhering to the required format, all responses omit the reasoning process and are structured strictly as <think> </think> <answer> option </answer>. We observe similar experimental phenomena when conducting experiments on Qwen2-VL-2B [20], so we believe this is a common issue with small-scale models."
971
+ },
972
+ {
973
+ "type": "text",
974
+ "bbox": [
975
+ 0.171,
976
+ 0.273,
977
+ 0.826,
978
+ 0.344
979
+ ],
980
+ "angle": 0,
981
+ "content": "However, when we perform a cold start with 16 human-annotated CoT data, this phenomenon no longer appear during the experiments. At the same time, the model also learn to comply with the format requirements more quickly. Therefore, we believe that cold starting is necessary for reasoning in small-scale models. Even a small amount of cold start data can be very helpful for stabilizing model training."
982
+ },
983
+ {
984
+ "type": "title",
985
+ "bbox": [
986
+ 0.171,
987
+ 0.357,
988
+ 0.517,
989
+ 0.372
990
+ ],
991
+ "angle": 0,
992
+ "content": "4.3.2 Impact of Refinement of Format Rewards"
993
+ },
994
+ {
995
+ "type": "text",
996
+ "bbox": [
997
+ 0.171,
998
+ 0.381,
999
+ 0.518,
1000
+ 0.631
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "In our initial experiments, similar to other works, we only apply format reward without incorporating continuous length reward. However, constrained by the capabilities of small-scale language models, training under this setup does not lead to an increase in response length, and even results in a slight decline. After introducing continuous length reward, the model's response length significantly increases during training, as shown in Figure 5. However, we observe that under this setup, the model engages in some meaningless reasoning to increase response length, which does not improve performance and even leads to a significant increase in training time. When incorporating answer correctness penalty into the total reward as described in Section 3.4, we observe both qualitative improvements in model responses and continued growth in output length and rewards throughout training as shown in Figure 4."
1004
+ },
1005
+ {
1006
+ "type": "image",
1007
+ "bbox": [
1008
+ 0.533,
1009
+ 0.385,
1010
+ 0.822,
1011
+ 0.564
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": null
1015
+ },
1016
+ {
1017
+ "type": "image_caption",
1018
+ "bbox": [
1019
+ 0.527,
1020
+ 0.573,
1021
+ 0.825,
1022
+ 0.603
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "Figure 5: The variation in response length during training under different settings."
1026
+ },
1027
+ {
1028
+ "type": "title",
1029
+ "bbox": [
1030
+ 0.171,
1031
+ 0.644,
1032
+ 0.461,
1033
+ 0.659
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "4.3.3 Other Experimental Explorations"
1037
+ },
1038
+ {
1039
+ "type": "text",
1040
+ "bbox": [
1041
+ 0.17,
1042
+ 0.668,
1043
+ 0.827,
1044
+ 0.821
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "Meanwhile, we also experiment with some existing improvements to GRPO. Some studies [24, 13] suggest that the distribution of reasoning models may differ significantly from the initial model, so removing the KL divergence can eliminate constraints on the model. As shown in Figure 6, our experiments similarly demonstrate that eliminating the KL divergence improves model performance. Additionally, Dr. GRPO [13] argues that the increase in response length may also stem from inherent biases in the GRPO objective function. After removing the KL divergence, we further exclude the response length term from the objective function and the reward variance term from the advantage calculation. As shown in Figure 6, the performance of the model improves again. At the same time, we observe a noticeable reduction in response length, the model tends to only provide descriptions of the video content while omitting analysis of the answer. We attribute this to the lack of strong reasoning in the training dataset, which fails to stimulate deep logical reasoning in the models."
1048
+ },
1049
+ {
1050
+ "type": "title",
1051
+ "bbox": [
1052
+ 0.171,
1053
+ 0.839,
1054
+ 0.452,
1055
+ 0.856
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "5 Conclusion and Future Work"
1059
+ },
1060
+ {
1061
+ "type": "text",
1062
+ "bbox": [
1063
+ 0.17,
1064
+ 0.87,
1065
+ 0.825,
1066
+ 0.914
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, which is trained using reinforcement learning on a general Video-QA dataset. It not only significantly enhances reasoning and thinking capabilities, but also exhibits the emergent characteristic of \"aha"
1070
+ },
1071
+ {
1072
+ "type": "page_number",
1073
+ "bbox": [
1074
+ 0.494,
1075
+ 0.936,
1076
+ 0.505,
1077
+ 0.948
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "7"
1081
+ }
1082
+ ],
1083
+ [
1084
+ {
1085
+ "type": "image",
1086
+ "bbox": [
1087
+ 0.179,
1088
+ 0.094,
1089
+ 0.823,
1090
+ 0.335
1091
+ ],
1092
+ "angle": 0,
1093
+ "content": null
1094
+ },
1095
+ {
1096
+ "type": "image_caption",
1097
+ "bbox": [
1098
+ 0.171,
1099
+ 0.343,
1100
+ 0.825,
1101
+ 0.414
1102
+ ],
1103
+ "angle": 0,
1104
+ "content": "Figure 6: Ablation study on TinyLLaVA-R1 variants across multiple benchmarks. We compare the original TinyLLaVA-Video-R1 with two ablated versions: removing the KL divergence term (Del KL) and replacing the original GRPO with Dr. GRPO. Results are reported on MVBench, Video-MME (without subtitle input), MLVU, and MMVU (multiple-choice subset). Bold values indicate the best performance for each benchmark."
1105
+ },
1106
+ {
1107
+ "type": "text",
1108
+ "bbox": [
1109
+ 0.171,
1110
+ 0.433,
1111
+ 0.825,
1112
+ 0.488
1113
+ ],
1114
+ "angle": 0,
1115
+ "content": "moment\". Additionally, we present a series of experimental findings, hoping this work will provide valuable insights for future practitioners exploring the video reasoning abilities of small-scale models. We will further investigate small-scale video reasoning models, with potential future directions as follows:"
1116
+ },
1117
+ {
1118
+ "type": "text",
1119
+ "bbox": [
1120
+ 0.217,
1121
+ 0.501,
1122
+ 0.825,
1123
+ 0.544
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "- Introducing high-quality video reasoning data. Currently, TinyLLaVA-Video-R1 is trained only on general video question-answering data. We aim to explore the upper limits of the model's reasoning capabilities by introducing higher-quality video reasoning data."
1127
+ },
1128
+ {
1129
+ "type": "text",
1130
+ "bbox": [
1131
+ 0.217,
1132
+ 0.549,
1133
+ 0.825,
1134
+ 0.606
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": "- Improving reinforcement learning algorithms. Currently, TinyLLaVA-Video-R1 employs the GRPO algorithm for training. However, this approach exhibits notable limitations. To enhance its effectiveness in video reasoning tasks, we plan to refine the algorithm by addressing the key challenges observed in our experiment."
1138
+ },
1139
+ {
1140
+ "type": "list",
1141
+ "bbox": [
1142
+ 0.217,
1143
+ 0.501,
1144
+ 0.825,
1145
+ 0.606
1146
+ ],
1147
+ "angle": 0,
1148
+ "content": null
1149
+ },
1150
+ {
1151
+ "type": "text",
1152
+ "bbox": [
1153
+ 0.171,
1154
+ 0.621,
1155
+ 0.825,
1156
+ 0.673
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": "Acknowledgment. This work was partially supported by the National Science and Technology Major Project (Grant No. 2022ZD0116310), National Natural Science Foundation of China (Grant No. 62476016), the Fundamental Research Funds for the Central Universities."
1160
+ },
1161
+ {
1162
+ "type": "title",
1163
+ "bbox": [
1164
+ 0.174,
1165
+ 0.694,
1166
+ 0.268,
1167
+ 0.709
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": "References"
1171
+ },
1172
+ {
1173
+ "type": "ref_text",
1174
+ "bbox": [
1175
+ 0.18,
1176
+ 0.717,
1177
+ 0.826,
1178
+ 0.757
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": "[1] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. 1, 2, 3"
1182
+ },
1183
+ {
1184
+ "type": "ref_text",
1185
+ "bbox": [
1186
+ 0.18,
1187
+ 0.769,
1188
+ 0.826,
1189
+ 0.809
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": "[2] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. 5"
1193
+ },
1194
+ {
1195
+ "type": "ref_text",
1196
+ "bbox": [
1197
+ 0.18,
1198
+ 0.821,
1199
+ 0.826,
1200
+ 0.859
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": "[3] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. 1, 2, 3"
1204
+ },
1205
+ {
1206
+ "type": "ref_text",
1207
+ "bbox": [
1208
+ 0.18,
1209
+ 0.873,
1210
+ 0.826,
1211
+ 0.912
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "[4] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 6"
1215
+ },
1216
+ {
1217
+ "type": "list",
1218
+ "bbox": [
1219
+ 0.18,
1220
+ 0.717,
1221
+ 0.826,
1222
+ 0.912
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": null
1226
+ },
1227
+ {
1228
+ "type": "page_number",
1229
+ "bbox": [
1230
+ 0.494,
1231
+ 0.936,
1232
+ 0.504,
1233
+ 0.948
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": "8"
1237
+ }
1238
+ ],
1239
+ [
1240
+ {
1241
+ "type": "ref_text",
1242
+ "bbox": [
1243
+ 0.182,
1244
+ 0.092,
1245
+ 0.826,
1246
+ 0.131
1247
+ ],
1248
+ "angle": 0,
1249
+ "content": "[5] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. 2"
1250
+ },
1251
+ {
1252
+ "type": "ref_text",
1253
+ "bbox": [
1254
+ 0.182,
1255
+ 0.14,
1256
+ 0.825,
1257
+ 0.18
1258
+ ],
1259
+ "angle": 0,
1260
+ "content": "[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 2"
1261
+ },
1262
+ {
1263
+ "type": "ref_text",
1264
+ "bbox": [
1265
+ 0.182,
1266
+ 0.187,
1267
+ 0.825,
1268
+ 0.227
1269
+ ],
1270
+ "angle": 0,
1271
+ "content": "[7] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 1, 2"
1272
+ },
1273
+ {
1274
+ "type": "ref_text",
1275
+ "bbox": [
1276
+ 0.182,
1277
+ 0.235,
1278
+ 0.823,
1279
+ 0.274
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": "[8] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.3"
1283
+ },
1284
+ {
1285
+ "type": "ref_text",
1286
+ "bbox": [
1287
+ 0.182,
1288
+ 0.282,
1289
+ 0.826,
1290
+ 0.322
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": "[9] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 6"
1294
+ },
1295
+ {
1296
+ "type": "ref_text",
1297
+ "bbox": [
1298
+ 0.173,
1299
+ 0.33,
1300
+ 0.824,
1301
+ 0.357
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": "[10] Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In European Conference on Computer Vision, pages 323–340. Springer, 2025. 5"
1305
+ },
1306
+ {
1307
+ "type": "ref_text",
1308
+ "bbox": [
1309
+ 0.173,
1310
+ 0.365,
1311
+ 0.824,
1312
+ 0.392
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": "[11] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023. 5"
1316
+ },
1317
+ {
1318
+ "type": "ref_text",
1319
+ "bbox": [
1320
+ 0.173,
1321
+ 0.4,
1322
+ 0.824,
1323
+ 0.427
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": "[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llavanext: Improved reasoning,OCR, and world knowledge, 2024.5"
1327
+ },
1328
+ {
1329
+ "type": "ref_text",
1330
+ "bbox": [
1331
+ 0.173,
1332
+ 0.435,
1333
+ 0.824,
1334
+ 0.462
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": "[13] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. 2, 7"
1338
+ },
1339
+ {
1340
+ "type": "ref_text",
1341
+ "bbox": [
1342
+ 0.173,
1343
+ 0.47,
1344
+ 0.824,
1345
+ 0.497
1346
+ ],
1347
+ "angle": 0,
1348
+ "content": "[14] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. 1"
1349
+ },
1350
+ {
1351
+ "type": "ref_text",
1352
+ "bbox": [
1353
+ 0.173,
1354
+ 0.505,
1355
+ 0.824,
1356
+ 0.544
1357
+ ],
1358
+ "angle": 0,
1359
+ "content": "[15] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. 1"
1360
+ },
1361
+ {
1362
+ "type": "ref_text",
1363
+ "bbox": [
1364
+ 0.173,
1365
+ 0.552,
1366
+ 0.824,
1367
+ 0.592
1368
+ ],
1369
+ "angle": 0,
1370
+ "content": "[16] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 1, 2, 3"
1371
+ },
1372
+ {
1373
+ "type": "ref_text",
1374
+ "bbox": [
1375
+ 0.173,
1376
+ 0.6,
1377
+ 0.824,
1378
+ 0.64
1379
+ ],
1380
+ "angle": 0,
1381
+ "content": "[17] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 2, 3"
1382
+ },
1383
+ {
1384
+ "type": "ref_text",
1385
+ "bbox": [
1386
+ 0.173,
1387
+ 0.647,
1388
+ 0.824,
1389
+ 0.687
1390
+ ],
1391
+ "angle": 0,
1392
+ "content": "[18] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15. 1"
1393
+ },
1394
+ {
1395
+ "type": "ref_text",
1396
+ "bbox": [
1397
+ 0.173,
1398
+ 0.695,
1399
+ 0.824,
1400
+ 0.735
1401
+ ],
1402
+ "angle": 0,
1403
+ "content": "[19] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 2"
1404
+ },
1405
+ {
1406
+ "type": "ref_text",
1407
+ "bbox": [
1408
+ 0.173,
1409
+ 0.742,
1410
+ 0.824,
1411
+ 0.782
1412
+ ],
1413
+ "angle": 0,
1414
+ "content": "[20] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7"
1415
+ },
1416
+ {
1417
+ "type": "ref_text",
1418
+ "bbox": [
1419
+ 0.173,
1420
+ 0.79,
1421
+ 0.885,
1422
+ 0.816
1423
+ ],
1424
+ "angle": 0,
1425
+ "content": "[21] Xiaodong Wang and Peixi Peng. Open-r1-video. https://github.com/Wang-Xiaodong1899/Open-R1-Video, 2025.1"
1426
+ },
1427
+ {
1428
+ "type": "ref_text",
1429
+ "bbox": [
1430
+ 0.173,
1431
+ 0.825,
1432
+ 0.824,
1433
+ 0.865
1434
+ ],
1435
+ "angle": 0,
1436
+ "content": "[22] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024. 5"
1437
+ },
1438
+ {
1439
+ "type": "ref_text",
1440
+ "bbox": [
1441
+ 0.173,
1442
+ 0.873,
1443
+ 0.824,
1444
+ 0.913
1445
+ ],
1446
+ "angle": 0,
1447
+ "content": "[23] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021. 4"
1448
+ },
1449
+ {
1450
+ "type": "list",
1451
+ "bbox": [
1452
+ 0.173,
1453
+ 0.092,
1454
+ 0.885,
1455
+ 0.913
1456
+ ],
1457
+ "angle": 0,
1458
+ "content": null
1459
+ },
1460
+ {
1461
+ "type": "page_number",
1462
+ "bbox": [
1463
+ 0.494,
1464
+ 0.936,
1465
+ 0.505,
1466
+ 0.948
1467
+ ],
1468
+ "angle": 0,
1469
+ "content": "9"
1470
+ }
1471
+ ],
1472
+ [
1473
+ {
1474
+ "type": "ref_text",
1475
+ "bbox": [
1476
+ 0.174,
1477
+ 0.092,
1478
+ 0.826,
1479
+ 0.133
1480
+ ],
1481
+ "angle": 0,
1482
+ "content": "[24] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. 2, 4, 7"
1483
+ },
1484
+ {
1485
+ "type": "ref_text",
1486
+ "bbox": [
1487
+ 0.174,
1488
+ 0.14,
1489
+ 0.826,
1490
+ 0.18
1491
+ ],
1492
+ "angle": 0,
1493
+ "content": "[25] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 3"
1494
+ },
1495
+ {
1496
+ "type": "ref_text",
1497
+ "bbox": [
1498
+ 0.175,
1499
+ 0.187,
1500
+ 0.825,
1501
+ 0.215
1502
+ ],
1503
+ "angle": 0,
1504
+ "content": "[26] Shaolei Zhang, Qingkai Fang, Zhe Yang, and Yang Feng. Llava-mini: Efficient image and video large multimodal models with one vision token. arXiv preprint arXiv:2501.03895, 2025. 5"
1505
+ },
1506
+ {
1507
+ "type": "ref_text",
1508
+ "bbox": [
1509
+ 0.174,
1510
+ 0.223,
1511
+ 0.825,
1512
+ 0.262
1513
+ ],
1514
+ "angle": 0,
1515
+ "content": "[27] Xingjian Zhang, Xi Weng, Yihao Yue, Zhaoxin Fan, Wenjun Wu, and Lei Huang. Tinyllava-video: A simple framework of small-scale large multimodal models for video understanding. arXiv preprint arXiv:2501.15513, 2025. 1, 2, 3"
1516
+ },
1517
+ {
1518
+ "type": "ref_text",
1519
+ "bbox": [
1520
+ 0.173,
1521
+ 0.27,
1522
+ 0.825,
1523
+ 0.298
1524
+ ],
1525
+ "angle": 0,
1526
+ "content": "[28] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024. 4"
1527
+ },
1528
+ {
1529
+ "type": "ref_text",
1530
+ "bbox": [
1531
+ 0.174,
1532
+ 0.305,
1533
+ 0.825,
1534
+ 0.346
1535
+ ],
1536
+ "angle": 0,
1537
+ "content": "[29] Yilun Zhao, Lujing Xie, Haowei Zhang, Guo Gan, Yitao Long, Zhiyuan Hu, Tongyan Hu, Weiyuan Chen, Chuhan Li, Junyang Song, et al. Mmvu: Measuring expert-level multi-discipline video understanding. arXiv preprint arXiv:2501.12380, 2025. 6"
1538
+ },
1539
+ {
1540
+ "type": "ref_text",
1541
+ "bbox": [
1542
+ 0.173,
1543
+ 0.353,
1544
+ 0.825,
1545
+ 0.382
1546
+ ],
1547
+ "angle": 0,
1548
+ "content": "[30] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. 1, 2, 3"
1549
+ },
1550
+ {
1551
+ "type": "ref_text",
1552
+ "bbox": [
1553
+ 0.173,
1554
+ 0.388,
1555
+ 0.825,
1556
+ 0.429
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": "[31] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 6"
1560
+ },
1561
+ {
1562
+ "type": "list",
1563
+ "bbox": [
1564
+ 0.173,
1565
+ 0.092,
1566
+ 0.826,
1567
+ 0.429
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": null
1571
+ },
1572
+ {
1573
+ "type": "page_number",
1574
+ "bbox": [
1575
+ 0.491,
1576
+ 0.936,
1577
+ 0.509,
1578
+ 0.948
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "10"
1582
+ }
1583
+ ]
1584
+ ]
data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69dfcc98a2793c52d40f925ac820892dc6e10956b6a6439c2ee006809c9675ab
3
+ size 3597849
data/2025/2504_09xxx/2504.09641/full.md ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TinyLLaVA-Video-R1: Towards Smaller LMMs for Video Reasoning
2
+
3
+ Xingjian Zhang $^{1,*}$ Siwei Wen $^{1,2,*}$ Wenjun Wu $^{1,2,3}$ Lei Huang $^{1,2,3,\boxtimes}$
4
+
5
+ $^{1}$ SKLCCSE, Institute of Artificial Intelligence, Beihang University, Beijing, China
6
+ $^{2}$ Beijing Advanced Innovation Center for Future Blockchain and Privacy Computing, Beihang University
7
+ $^{3}$ Hangzhou International Innovation Institute, Beihang University, Hangzhou, China
8
+
9
+ {huangleiai}@buaa.edu.cn
10
+
11
+ # Abstract
12
+
13
+ Recently, improving the reasoning ability of large multimodal models (LMMs) through reinforcement learning has made great progress. However, most existing works are based on highly reasoning-intensive datasets such as mathematics and code, and researchers generally choose large-scale models as the foundation. We argue that exploring small-scale models' reasoning capabilities remains valuable for researchers with limited computational resources. Moreover, enabling models to explain their reasoning processes on general question-answering datasets is equally meaningful. Therefore, we present the small-scale video reasoning model TinyLLaVA-Video-R1. Based on TinyLLaVA-Video [27], a traceably trained video understanding model with no more than 4B parameters, it not only demonstrates significantly improved reasoning and thinking capabilities after using reinforcement learning on general Video-QA datasets, but also exhibits the emergent characteristic of "aha moments". Furthermore, we share a series of experimental findings, aiming to provide practical insights for future exploration of video reasoning (thinking) abilities in small-scale models. It is available at https://github.com/ZhangXJ199/TinyLLaVA-Video-R1.
14
+
15
+ # 1 Introduction
16
+
17
+ Since DeepSeek-R1 [6] demonstrated that pure reinforcement learning can significantly enhance a model's reasoning capabilities, many subsequent works [1, 7, 15, 30, 16] have also explored improving the reasoning abilities of multimodal models, achieving notable progress. Most of these efforts focus on extending reasoning capabilities to the image modality [15, 1], conducting research using strong reasoning data such as math-image pairs and spatial reasoning [16, 7, 30], or task-specific data like grounding [14, 18]. However, existing research on video reasoning models has not made significant progress due to the scarcity of highly reasoning-intensive data.
18
+
19
+ Open-R1-Video [21] is the first to introduce reasoning into the video domain, however, its performance on general Video-QA datasets is unsatisfactory, with benchmark results even showing a decline. Subsequently, Video-R1 [3] successfully integrates strong reasoning image-text pairs for video reasoning, achieving remarkable performance, but they make preliminary attempts and argue that small-scale models cannot produce effective reasoning processes. However, the high computational cost of large-scale models remains a significant barrier for many researchers with limited resources. Therefore, exploring the reasoning capabilities of smaller models is still necessary.
20
+
21
+ ![](images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg)
22
+ Figure 1: A case of TinyLLaVA-Video-R1 on video understanding data, sourced from MVBench. The model demonstrates the ability to perceive video scenes and analyze options, while also exhibiting reflective and backtracking behavior (highlighted in blue).
23
+
24
+ ![](images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg)
25
+
26
+ Question: What is the action performed by the person in the video? A. drop. B. sit down. C. pick up. D.squat down. Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags.
27
+
28
+ # Owen2-VL-7B-Instruct:
29
+
30
+ <think> The person in the video is seen bending down towards the floor. Given the context of the room and the action being performed, it is reasonable to infer that the person is picking something up from the floor. </think> <answer> C. pick up </answer>
31
+
32
+ TinyLLaVA-Video-SFT: C. pick up.
33
+
34
+ # TinyLLaVA-Video-R1:
35
+
36
+ <think> Let's start by analyzing the video. The video takes place in a room with white curtains, a green chair, a white table, and a white cabinet. The person is wearing a light green shirt, dark pants, and black shoes. The video begins with the person standing in the room, and then they start to move, bending down and picking up a colorful book or magazine from the floor. The video ends with the person standing up and holding the book or magazine. The question asks what the person is doing in the video. Let's look at the options: A. drop, B. sit down, C. pick up, D. squat down. Option A is incorrect because the person does not drop anything. Option B is incorrect because the person does not sit down. Option D is incorrect because the person does not squat down. Option C is correct because the person is picking up a colorful book or magazine from the floor. Let's confirm this by looking at the video again. The person is seen bending down and picking up a colorful book or magazine from the floor, and then standing up holding it. Therefore, the correct answer is C. </think> <answer> C </answer>
37
+
38
+ In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, based on the traceably trained model TinyLLaVA-Video [27]. After reinforcement learning on general VideoQA datasets, the model not only significantly improves its reasoning and thinking abilities, but also exhibits the emergent characteristic of "aha moments", which is more meaningful than simply generating answers through perception. Moreover, through extensive experiments under various configurations, we have obtained a series of insightful findings. We believe these discoveries will provide valuable guidance for future exploration of video reasoning capabilities in small-scale models.
39
+
40
+ # 2 Related Work
41
+
42
+ Large Language Reasoning Models. Recently, Kimi K1.5 [19] and DeepSeek-R1 [6] have gained widespread attention for their significant improvements in model performance on reasoning tasks through using reinforcement learning. Unlike approaches relying on process-supervised reward models [5], the Group Relative Policy Optimization (GRPO) algorithm [17] relying on rule-based rewards not only substantially reduces computational costs but has also sparked a new wave of interest due to intriguing phenomena such as "aha moments" observed during training. Under the influence of this research paradigm, numerous follow-up studies have advanced along this technical path, some have successfully replicated the method and extended it to multimodal domains [1, 7, 30], achieving notable progress, while others [13, 24] have focused on algorithmic optimization to further enhance the reasoning efficiency and performance of models.
43
+
44
+ Multimodal Reasoning Models. With the remarkable advancement in reasoning capabilities of language models, an increasing number of studies have attempted to extend them to multimodal domains. LMM-R1 [16] proposes a two-stage training strategy to enhance the reasoning performance of multimodal models; Vision-R1 [7] attempts to address the post-cold-start overthinking issue in multimodal models; Video-R1 [3] develops T-GRPO to further enhance model's video comprehension. While these studies have made notable progress, most tend to adopt base models with 7B or more parameters to ensure superior reasoning effectiveness and robust performance.
45
+
46
+ ![](images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg)
47
+ Figure 2: A case of TinyLLaVA-Video-R1 on video reasoning data, sourced from MMVU. The model demonstrates comprehensive video content understanding and the capability to derive correct answers through analytical reasoning.
48
+
49
+ ![](images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg)
50
+
51
+ Question: What is the treatment used in the video? A. Bone grafting. B. Hormone therapy. C. Chemotherapy. D. Photodynamic therapy. Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags.
52
+
53
+ # Owen2-VL-7B-Instruct:
54
+
55
+ <think> The video shows a medical procedure involving bone chips and fibrin glue, which are used to repair a damaged knee joint. This procedure is known as autologous bone grafting (ABG). Therefore, the treatment used in the video is bone grafting. </think> <answer> A. Bone grafting. </answer>
56
+
57
+ # TinyLLaVA-Video-SFT: A.
58
+
59
+ # TinyLLaVA-Video-R1:
60
+
61
+ <think> To determine the treatment for osteochondral lesions, I will first review the types of osteochondral lesions to focus on the focus. The different types of osteochondral lesions include superficial and severe articular cartilage injuries with or without bone fragmentation. After defining the type of osteochondral lesion, the next step is to identify the treatment for it. I will first consider the available options, such as bone grafting (A), hormone therapy (B), chemotherapy (C), photodynamic therapy (D), and radiation therapy (E). Looking at the video, I can see the focus on a patient with a severe articular cartilage injury and bone fragmentation. Based on our knowledge of cartilage and bone diseases, I will select the treatment A, which is bone grafting. The video will show a comparison between normal and severely damaged cartilage, illustrating the treatment of severe articular cartilage injury with bone grafting using a surgical tool to fill the gap. Bone grafting is the correct treatment for this type of osteochondral lesion. Therefore, I will select option A. </think> <answer> A </answer>
62
+
63
+ Although some preliminary work has explored small-scale multimodal models [16, 1, 30], these investigations have primarily focused on the image modality, leaving research on video reasoning significantly underdeveloped. Due to the scarcity of high-quality video reasoning datasets and the inherent capability limitations of models, small-scale video reasoning models face multiple challenges including limited response length, difficulties in presenting coherent reasoning chains, and constrained overall performance improvement [3]. These critical challenges demand thorough investigation and substantive technical breakthroughs.
64
+
65
+ # 3 Methods
66
+
67
+ To explore the video reasoning capabilities of small-scale models, we conduct experiments on TinyLLaVA-Video [27]. We utilize the GRPO algorithm on the general Video-QA dataset NextQA and made specific modifications to the reward rules: adding a continuous length reward to the format reward and introducing penalties for incorrect answers. The experimental results in Section 4 demonstrate the effectiveness of these modifications.
68
+
69
+ # 3.1 TinyLLaVA-Video
70
+
71
+ TinyLLaVA-Video is a fully open-source small-scale video understanding model that employs Qwen2.5-3B [8] as its language model and SigLIP [25] as its visual encoder. It delivers competitive performance across multiple benchmarks. Crucially, its training data are fully open-sourced, and the entire training process remains traceable. This effectively prevents the repeated use of identical data across different training phases, thereby avoiding the introduction of uncontrolled variables and ensuring more reliable experimental results and conclusions. Such reproducibility and controllability represent a distinct advantage over models that only release weights, making TinyLLaVA-Video an ideal foundational model for our experiments on investigating video reasoning.
72
+
73
+ # 3.2 Group Relative Policy Optimization (GRPO)
74
+
75
+ We follow the GRPO algorithm [17] to train the model. For each question $\mathbf{q}$ , the policy model generates a set of candidate responses $\{O_1, O_2, \dots, O_G\}$ , computes the corresponding rewards
76
+
77
+ ![](images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg)
78
+ Figure 3: Cases of "aha moment", where the model demonstrates reflection and backtracking during its reasoning process (highlighted in blue). The cases are from MVBench and MMVU respectively.
79
+
80
+ $\{r_1, r_2, \ldots, r_G\}$ based on the reward rules. And then these rewards are normalized to calculate the advantage for each response. Subsequently, the model is optimized through maximization of the following objective function:
81
+
82
+ $$
83
+ J _ {G R P O} (\theta) = \mathbb {E} _ {[ q, \{o _ {i} \} ]} \frac {1}{G} \sum_ {i = 1} ^ {G} \frac {1}{| o _ {i} |} \left\{\min \left[ \frac {\pi_ {\theta}}{\pi_ {\theta_ {o l d}}} A _ {i}, \operatorname {c l i p} \left(\frac {\pi_ {\theta}}{\pi_ {\theta_ {o l d}}}, 1 - \epsilon , 1 + \epsilon\right) A _ {i} \right] - \beta \mathbb {D} _ {K L} [ \pi_ {\theta} \| \pi_ {r e f} ] \right\} \tag {1}
84
+ $$
85
+
86
+ where $\pi_{\theta}$ and $\pi_{\theta_{old}}$ are the current and old policy, $\epsilon$ and $\beta$ are hyper-parameters, and $A_{i}$ is the advantages defined as:
87
+
88
+ $$
89
+ A _ {i} = \frac {r _ {i} - \operatorname {m e a n} \left(\left\{r _ {1} , r _ {2} , \cdots , r _ {G} \right\}\right)}{\operatorname {s t d} \left(\left\{r _ {1} , r _ {2} , \cdots , r _ {G} \right\}\right)}. \tag {2}
90
+ $$
91
+
92
+ In addition, our experimental observations reveal an issue analogous to DAPO [24]: when all responses in a set $\{O_i\}$ are correct and given equal rewards, their computed advantages vanish to zero. This phenomenon affects policy updates and diminishes sample efficiency. To maximize the utility of each sample, we introduce an additional gaussian noise $\mathcal{N}(0, 0.02^2)$ to the advantages. Although the noise induces only minor perturbations, it ensures intra-group advantage diversity across responses.
93
+
94
+ # 3.3 Training Data and Template.
95
+
96
+ We select multiple choice questions from the NextQA [23] subset of LLaVA-Video-178K [28] as training data. To maintain manageable training time with limited computational resources, we only choose the subset of data with a duration of 0 to 30 seconds, which contains 5,496 samples. It is a weak reasoning dataset, where the questions are more perception-oriented and exhibit weaker logical reasoning. However, we hypothesize that the model's reasoning abilities are likely predominantly derived from reinforcement learning, and we still aim to guide it to demonstrate its thought process by articulating the reasoning behind its choices, rather than merely providing an answer.
97
+
98
+ During training, for each input question, in addition to the system template, we append the following prompt at the end: Output the thinking process in <think> </think> and final answer (option) in <answer> </answer> tags. Moreover, when computing rewards for responses, we strictly enforce the model to adhere to this format.
99
+
100
+ <table><tr><td>Model</td><td>LLM size</td><td>Answer Type</td><td>MVBench</td><td>Video-MME (wo sub)</td><td>MLVU</td><td>MMVU (mc)</td></tr><tr><td>LLaMA-VID [10]</td><td>7B</td><td>Option</td><td>41.4</td><td>-</td><td>33.2</td><td>-</td></tr><tr><td>LLaVA-NeXT [12]</td><td>7B</td><td>Option</td><td>-</td><td>-</td><td>39.3</td><td>29.2</td></tr><tr><td>VideoLLaVA [11]</td><td>7B</td><td>Option</td><td>-</td><td>39.9</td><td>47.3</td><td>-</td></tr><tr><td>ShareGPT4Video [2]</td><td>8B</td><td>Option</td><td>-</td><td>39.9</td><td>46.4</td><td>-</td></tr><tr><td>LLaVA-Mini [26]</td><td>7B</td><td>Option</td><td>44.5</td><td>-</td><td>42.8</td><td>-</td></tr><tr><td>InternVideo2 [22]</td><td>8B</td><td>Option</td><td>-</td><td>41.9</td><td>-</td><td>39.0</td></tr><tr><td>TinyLLaVA-Video-SFT</td><td>3B</td><td>Option</td><td>49.0</td><td>42.2</td><td>49.2</td><td>46.1</td></tr><tr><td>TinyLLaVA-Video-ColdStart</td><td>3B</td><td>Reason</td><td>33.2</td><td>26.6</td><td>28.6</td><td>22.7</td></tr><tr><td>TinyLLaVA-Video-R1</td><td>3B</td><td>Reason</td><td>49.5</td><td>46.6</td><td>52.4</td><td>46.9</td></tr></table>
101
+
102
+ Table 1: The performance of TinyLLaVA-Video-R1 on multiple benchmarks. "Option" indicates that the model only needs to answer with the selected choice, while "Reason" means the model must output both the answer and the reasoning process according to the format requirements. Here, MMVU is categorized as a video reasoning benchmark, the remaining benchmarks are designed for general-purpose video evaluation. The best results are indicated by boldface.
103
+
104
+ # 3.4 Reward Rules.
105
+
106
+ We also avoid using a reward model and define reward rules based on the format and accuracy of the responses as follows:
107
+
108
+ Format reward. We require the thought process to be enclosed within $\langle \text{think} \rangle \langle \text{/think} \rangle$ , and the final answer to be enclosed within $\langle \text{answer} \rangle \langle \text{/answer} \rangle$ . These four tags can appear only once in the entire response, and if followed, the model will receive a format reward $FR = r_0 + LR$ . Here, $r_0$ represents the base reward for adhering to the required response format, and $LR$ is the continuous length reward designed to encourage the model to generate longer outputs, calculated as:
109
+
110
+ $$
111
+ L R = \min \left(1, \frac {L e n}{M L}\right) \times r _ {1}. \tag {3}
112
+ $$
113
+
114
+ Here, $Len$ represents the length of the response extracted from within the $<\text{think}>$ / $<\text{think}>$ tags, and $ML$ represents the maximum length corresponding to the upper limit of the reward. In our experiments, we set $r_0 = r_1 = 0.5$ , thus the format reward is limited to a maximum of 1.
115
+
116
+ Accuracy reward. We design the accuracy reward $AR$ based on the answer. We extract the final answer from <answer></answer> and compare it with the label. The model will receive an accuracy reward of $AR = r_2 > 0$ , if the answer is correct. Responses with either format errors preventing answer extraction or incorrect answers will result in zero accuracy reward, i.e. $AR = 0$ . To ensure that the accuracy reward and the format reward have equal importance, we set $r_2 = r_0 + r_1$ in our experiments.
117
+
118
+ To encourage the model to increase the response length only when answering correctly, rather than arbitrarily increasing the length at the cost of accuracy, we deviate from most existing approaches that simply define the total reward as the sum of format reward and accuracy reward. Instead, we introduce a penalty for incorrect answers, with the total reward $R$ defined by the following formula:
119
+
120
+ $$
121
+ R = \left\{ \begin{array}{l l} A R + F R, & \text {i f} F R > 0 \text {a n d} A R = r _ {2} \\ - F R, & \text {i f} F R > 0 \text {a n d} A R = 0 \\ - \left(r _ {0} + r _ {1} + r _ {2}\right), & \text {i f} F R = 0 \end{array} \right. \tag {4}
122
+ $$
123
+
124
+ When the model's answer is correct, the longer the reasoning process, the higher the reward. In contrast, if the answer is incorrect, the longer the reasoning process, the higher the penalty incurred.
125
+
126
+ # 4 Experiments
127
+
128
+ # 4.1 Experimental Settings
129
+
130
+ We conduct experiments on 8 NVIDIA A100-40G GPUs. During training, we keep the vision encoder frozen and update the connector and language model. We set the learning rate at 1e-6 for stable training.
131
+
132
+ To facilitate rapid adaptation to reasoning format and ensure training stability, we first finetune the model using 16 human-annotated cold-start samples, resulting in TinyLLaVA-Video-ColdStart. We then adopt it as the base model for reinforcement learning and train on 5,496 NextQA data for one epoch to obtain TinyLLaVA-Video-R1.
133
+
134
+ For evaluation, we select four commonly used video understanding and reasoning benchmarks: MVBench [9], VideoMME[4], MLVU [31], and MMVU [29]. These benchmarks encompass videos from multiple disciplines and domains, with a wide range of durations, enabling a comprehensive assessment of the model's capabilities.
135
+
136
+ # 4.2 Main Results and Aha Moment
137
+
138
+ As shown in Figure 4, during training, both the response length and rewards demonstrate stable growth. As presented in Table 1, compared to TinyLLaVA-Video-SFT, which is trained on the same dataset using supervised learning, TinyLLaVA-Video-R1 shows superior performance across multiple benchmarks. Additionally, compared to the base model TinyLLaVA-Video-ColdStart, TinyLLaVA-Video-R1 not only adheres to the required response format but also demonstrates improved reasoning capabilities.
139
+
140
+ ![](images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg)
141
+ (a) Evolution in completion length.
142
+
143
+ ![](images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg)
144
+ Figure 4: Evolution in key metrics during the training of TinyLLaVA-Video-R1. Under our reward rule settings, both the response length and rewards of TinyLLaVA-Video-R1 gradually increased during training.
145
+
146
+ ![](images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg)
147
+ (b) Evolution in accuracy reward.
148
+ (c) Evolution in format reward.
149
+
150
+ As shown in Figures 1 and 2, we further illustrate the model's reasoning ability. The model can comprehend and analyze video content, evaluate each option step-by-step, and ultimately provide an answer. Compared to models that only output answers without reasoning, TinyLLaVA-Video-R1 generates meaningful thought processes, making its responses more interpretable and valuable. This represents a significant advantage of video reasoning models over conventional video understanding models.
151
+
152
+ Similar to other works that use reinforcement learning to enhance model reasoning capabilities, we also reproduce the "aha moment" in TinyLLaVA-Video-R1, where the model exhibits emergent behaviors such as self-verification during its reasoning process. Our experimental results confirm that even when trained with weakly-reasoned general video data through reinforcement learning, the smaller model can still demonstrate retrospection and reflection.
153
+
154
+ As highlighted in the blue annotations in Figures 1 and 3, the model revisits and verifies its initial reasoning after completing a round of thought. This behavior indicates that the model does not merely perform perception but also engages in continuous thinking and self-checking.
155
+
156
+ # 4.3 Ablation Study
157
+
158
+ In this section, we present ablation studies on methods and key experimental findings that contribute significantly to the performance enhancement of TinyLLaVA-Video-R1.
159
+
160
+ # 4.3.1 Impact of Cold-Start Data
161
+
162
+ Due to the limitations of language models, when we directly use TinyLLaVA-Video as the base model without length reward, we find that as training progresses, the model has a certain probability of learning to 'take shortcuts'. While adhering to the required format, all responses omit the reasoning process and are structured strictly as <think> </think> <answer> option </answer>. We observe similar experimental phenomena when conducting experiments on Qwen2-VL-2B [20], so we believe this is a common issue with small-scale models.
163
+
164
+ However, when we perform a cold start with 16 human-annotated CoT data, this phenomenon no longer appear during the experiments. At the same time, the model also learn to comply with the format requirements more quickly. Therefore, we believe that cold starting is necessary for reasoning in small-scale models. Even a small amount of cold start data can be very helpful for stabilizing model training.
165
+
166
+ # 4.3.2 Impact of Refinement of Format Rewards
167
+
168
+ In our initial experiments, similar to other works, we only apply format reward without incorporating continuous length reward. However, constrained by the capabilities of small-scale language models, training under this setup does not lead to an increase in response length, and even results in a slight decline. After introducing continuous length reward, the model's response length significantly increases during training, as shown in Figure 5. However, we observe that under this setup, the model engages in some meaningless reasoning to increase response length, which does not improve performance and even leads to a significant increase in training time. When incorporating answer correctness penalty into the total reward as described in Section 3.4, we observe both qualitative improvements in model responses and continued growth in output length and rewards throughout training as shown in Figure 4.
169
+
170
+ ![](images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg)
171
+ Figure 5: The variation in response length during training under different settings.
172
+
173
+ # 4.3.3 Other Experimental Explorations
174
+
175
+ Meanwhile, we also experiment with some existing improvements to GRPO. Some studies [24, 13] suggest that the distribution of reasoning models may differ significantly from the initial model, so removing the KL divergence can eliminate constraints on the model. As shown in Figure 6, our experiments similarly demonstrate that eliminating the KL divergence improves model performance. Additionally, Dr. GRPO [13] argues that the increase in response length may also stem from inherent biases in the GRPO objective function. After removing the KL divergence, we further exclude the response length term from the objective function and the reward variance term from the advantage calculation. As shown in Figure 6, the performance of the model improves again. At the same time, we observe a noticeable reduction in response length, the model tends to only provide descriptions of the video content while omitting analysis of the answer. We attribute this to the lack of strong reasoning in the training dataset, which fails to stimulate deep logical reasoning in the models.
176
+
177
+ # 5 Conclusion and Future Work
178
+
179
+ In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, which is trained using reinforcement learning on a general Video-QA dataset. It not only significantly enhances reasoning and thinking capabilities, but also exhibits the emergent characteristic of "aha
180
+
181
+ ![](images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg)
182
+ Figure 6: Ablation study on TinyLLaVA-R1 variants across multiple benchmarks. We compare the original TinyLLaVA-Video-R1 with two ablated versions: removing the KL divergence term (Del KL) and replacing the original GRPO with Dr. GRPO. Results are reported on MVBench, Video-MME (without subtitle input), MLVU, and MMVU (multiple-choice subset). Bold values indicate the best performance for each benchmark.
183
+
184
+ moment". Additionally, we present a series of experimental findings, hoping this work will provide valuable insights for future practitioners exploring the video reasoning abilities of small-scale models. We will further investigate small-scale video reasoning models, with potential future directions as follows:
185
+
186
+ - Introducing high-quality video reasoning data. Currently, TinyLLaVA-Video-R1 is trained only on general video question-answering data. We aim to explore the upper limits of the model's reasoning capabilities by introducing higher-quality video reasoning data.
187
+ - Improving reinforcement learning algorithms. Currently, TinyLLaVA-Video-R1 employs the GRPO algorithm for training. However, this approach exhibits notable limitations. To enhance its effectiveness in video reasoning tasks, we plan to refine the algorithm by addressing the key challenges observed in our experiment.
188
+
189
+ Acknowledgment. This work was partially supported by the National Science and Technology Major Project (Grant No. 2022ZD0116310), National Natural Science Foundation of China (Grant No. 62476016), the Fundamental Research Funds for the Central Universities.
190
+
191
+ # References
192
+
193
+ [1] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. 1, 2, 3
194
+ [2] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. 5
195
+ [3] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. 1, 2, 3
196
+ [4] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 6
197
+
198
+ [5] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. 2
199
+ [6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 2
200
+ [7] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 1, 2
201
+ [8] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.3
202
+ [9] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 6
203
+ [10] Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In European Conference on Computer Vision, pages 323–340. Springer, 2025. 5
204
+ [11] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023. 5
205
+ [12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llavanext: Improved reasoning,OCR, and world knowledge, 2024.5
206
+ [13] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. 2, 7
207
+ [14] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. 1
208
+ [15] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. 1
209
+ [16] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 1, 2, 3
210
+ [17] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 2, 3
211
+ [18] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15. 1
212
+ [19] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 2
213
+ [20] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7
214
+ [21] Xiaodong Wang and Peixi Peng. Open-r1-video. https://github.com/Wang-Xiaodong1899/Open-R1-Video, 2025.1
215
+ [22] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024. 5
216
+ [23] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021. 4
217
+
218
+ [24] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. 2, 4, 7
219
+ [25] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 3
220
+ [26] Shaolei Zhang, Qingkai Fang, Zhe Yang, and Yang Feng. Llava-mini: Efficient image and video large multimodal models with one vision token. arXiv preprint arXiv:2501.03895, 2025. 5
221
+ [27] Xingjian Zhang, Xi Weng, Yihao Yue, Zhaoxin Fan, Wenjun Wu, and Lei Huang. Tinyllava-video: A simple framework of small-scale large multimodal models for video understanding. arXiv preprint arXiv:2501.15513, 2025. 1, 2, 3
222
+ [28] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024. 4
223
+ [29] Yilun Zhao, Lujing Xie, Haowei Zhang, Guo Gan, Yitao Long, Zhiyuan Hu, Tongyan Hu, Weiyuan Chen, Chuhan Li, Junyang Song, et al. Mmvu: Measuring expert-level multi-discipline video understanding. arXiv preprint arXiv:2501.12380, 2025. 6
224
+ [30] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's" aha moment" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. 1, 2, 3
225
+ [31] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 6
data/2025/2504_09xxx/2504.09641/images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg ADDED

Git LFS Details

  • SHA256: d5d3f6bf9efc6143d083056da2685a02b2fd5e8d381204672efeee57cb01ce4e
  • Pointer size: 130 Bytes
  • Size of remote file: 14.1 kB
data/2025/2504_09xxx/2504.09641/images/05940639d82d38045b2319a9d1473fb595debc46d1d349a320af83d414a9b9cc.jpg ADDED

Git LFS Details

  • SHA256: d3e7b370e1d9e2928192b643e1984f4e40774192d1862028816a3ed5b6d5681f
  • Pointer size: 129 Bytes
  • Size of remote file: 5.37 kB
data/2025/2504_09xxx/2504.09641/images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg ADDED

Git LFS Details

  • SHA256: 4b3e7e88eb17b27be223e880a676fecf721fa446afc4cf968f594473f6c109d2
  • Pointer size: 130 Bytes
  • Size of remote file: 27.3 kB
data/2025/2504_09xxx/2504.09641/images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg ADDED

Git LFS Details

  • SHA256: 38c27bcc0c610c3939ab2c72d24dd800a423d343d34ad80a72ac57ba9bc00e85
  • Pointer size: 130 Bytes
  • Size of remote file: 12.1 kB
data/2025/2504_09xxx/2504.09641/images/35ec78c343f589aa72f0f2b4daca8a7ec35abc3ba14def0a6a833156be5653cd.jpg ADDED

Git LFS Details

  • SHA256: 589b865dbfa9887710afbaffa90f7a2f3098f585e8b6142d67513c19a54dd1e1
  • Pointer size: 130 Bytes
  • Size of remote file: 67.9 kB
data/2025/2504_09xxx/2504.09641/images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg ADDED

Git LFS Details

  • SHA256: b117008456b935b48fd786c3476ba8a1e7d12134fab941393ea8fdf161dbc514
  • Pointer size: 130 Bytes
  • Size of remote file: 27.6 kB
data/2025/2504_09xxx/2504.09641/images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg ADDED

Git LFS Details

  • SHA256: 190c69f1a2cea03db97f71bc3da60222f8a108b0aa72bb7164a0551004144274
  • Pointer size: 131 Bytes
  • Size of remote file: 134 kB
data/2025/2504_09xxx/2504.09641/images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg ADDED

Git LFS Details

  • SHA256: 678c5eb23ae866c55cf6ae1f510e2003d743d1f1b8d70aebb0f77693ab0f8aa2
  • Pointer size: 130 Bytes
  • Size of remote file: 30.5 kB
data/2025/2504_09xxx/2504.09641/images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg ADDED

Git LFS Details

  • SHA256: 21806f53589a64ad5b8fa868b1a52d32ee1b0d29454a8cada249b7c8ed5fc42b
  • Pointer size: 129 Bytes
  • Size of remote file: 1.73 kB
data/2025/2504_09xxx/2504.09641/images/b7f84bb0c6fdf03dc58d42b8a8e2990ec5f33aee8c4c7afb886a25430c954304.jpg ADDED

Git LFS Details

  • SHA256: a13cc73240bd94a115358bebc61b374c0e5ebe986fd8445d814c58a5930139b8
  • Pointer size: 129 Bytes
  • Size of remote file: 7.34 kB
data/2025/2504_09xxx/2504.09641/images/c028c6a75c1c71f4e565c9681cf2ede2f12cf9fc48793116e93b6718b118b7ec.jpg ADDED

Git LFS Details

  • SHA256: b6faa166d5e49e1899b026bde6e6da3780d916b5d842be480e4dde82fde40751
  • Pointer size: 130 Bytes
  • Size of remote file: 10.4 kB
data/2025/2504_09xxx/2504.09641/images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg ADDED

Git LFS Details

  • SHA256: 2d14ba77460d9b6280f06ccda8de283e16fab9b2eda91e88cfaa99d90a0f7e6a
  • Pointer size: 130 Bytes
  • Size of remote file: 77.8 kB
data/2025/2504_09xxx/2504.09641/images/d230ef1b85351ef5e90a50e68411b841c08e2f6235d4f5db26fdbb49de9bca68.jpg ADDED

Git LFS Details

  • SHA256: 18585901e6da76ff68138dfbfd35ba3bfe932d456189b56e8ef0ee66d49e557a
  • Pointer size: 130 Bytes
  • Size of remote file: 12.3 kB
data/2025/2504_09xxx/2504.09641/images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg ADDED

Git LFS Details

  • SHA256: 8de325d2e15875e700101cab4140bf73521c84c58f32f75ca1889c08db495b6e
  • Pointer size: 130 Bytes
  • Size of remote file: 12.2 kB
data/2025/2504_09xxx/2504.09641/images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg ADDED

Git LFS Details

  • SHA256: a57b6fb8f2a1a55791bf40023456cfcb38a4d74c996c6d4729a046497bdd1a6d
  • Pointer size: 129 Bytes
  • Size of remote file: 1.66 kB
data/2025/2504_09xxx/2504.09641/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f77ed9e351ff7ad8423a280b586142eea42e3cbd4cf8f8b82c4aaa5187c3fe70
3
+ size 4284339