Chelsea707 commited on
Commit
371d8c4
·
verified ·
1 Parent(s): e6c4d41

MinerU Batch 3990d73e-6cc2-4120-b9fe-f3637dc0140b (Part 5/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2503_13xxx/2503.13939/d5a0555e-490e-40dc-adae-70053e3513ed_content_list.json +0 -0
  3. data/2025/2503_13xxx/2503.13939/d5a0555e-490e-40dc-adae-70053e3513ed_model.json +0 -0
  4. data/2025/2503_13xxx/2503.13939/d5a0555e-490e-40dc-adae-70053e3513ed_origin.pdf +3 -0
  5. data/2025/2503_13xxx/2503.13939/full.md +340 -0
  6. data/2025/2503_13xxx/2503.13939/images/036dbbc8e10a472ebd88f0260be12f30dd4d7b331d23503ef14be8a8ea7fc14b.jpg +3 -0
  7. data/2025/2503_13xxx/2503.13939/images/18e45ba1a2c44e5ac250b9ca66403bae6b5bffe1e1c5aa3bd86a169ec719b2fb.jpg +3 -0
  8. data/2025/2503_13xxx/2503.13939/images/3ea719bc07cf656d52313d02342b2739f9eaa3bf1cd6fe9e230f960e2a263d7d.jpg +3 -0
  9. data/2025/2503_13xxx/2503.13939/images/3fc59ddd0c4cab56129b011962e2b7f2376ccde8d72b6efda8f479849cdf7b76.jpg +3 -0
  10. data/2025/2503_13xxx/2503.13939/images/5c19b24114a185413141accf26bb2110e546c00cdf7ea2d0b184a5b47c97c79f.jpg +3 -0
  11. data/2025/2503_13xxx/2503.13939/images/6372a63453c4981b4e8d60008db2f8680108fa0511a31026c42f0512a8233e1e.jpg +3 -0
  12. data/2025/2503_13xxx/2503.13939/images/6d17f5e39033555d7897b638223806d890c898d39fb22d406e37032593579ca3.jpg +3 -0
  13. data/2025/2503_13xxx/2503.13939/images/7cbb27596f9220b47bd10a54fcf49780df3b484df48d9a1db188d7c947e017ff.jpg +3 -0
  14. data/2025/2503_13xxx/2503.13939/images/831930b6814bd29bd180cce811827c75f5f6dcce738a7e34e742d11c03d96411.jpg +3 -0
  15. data/2025/2503_13xxx/2503.13939/images/8b7e996d31c55436cbe2572cef3ce2510e40baa10d74a32cbb697fcfeef6ea1a.jpg +3 -0
  16. data/2025/2503_13xxx/2503.13939/images/a5eba32c917bc72a3b520819edd3c426960b311554b18317853ada95377fa1d6.jpg +3 -0
  17. data/2025/2503_13xxx/2503.13939/images/a9fbe11f3609932ea9df90b68079cd1bdc769556cdec807f6e2e30e58aa055fa.jpg +3 -0
  18. data/2025/2503_13xxx/2503.13939/images/b82effbb9b726f9312287f34c3abf5897ba152a5a07ed17488f2a75f7aee435c.jpg +3 -0
  19. data/2025/2503_13xxx/2503.13939/images/c6a1cbfe551d72704f48e8c2194a01849188ccb0ff3355aeeee9f0cc8c3be341.jpg +3 -0
  20. data/2025/2503_13xxx/2503.13939/images/cae1a1c69fc436bed9659cf12dbaccbd6101db506055d35dd62850b1405948a2.jpg +3 -0
  21. data/2025/2503_13xxx/2503.13939/images/d99add9d2b0e92e41ac2a6489deeebcc0d91c7537c2459f870b34336be11f415.jpg +3 -0
  22. data/2025/2503_13xxx/2503.13939/images/e531cfc87b3651b7f14e2dd6bb1d4fd968e84a2b8d9560ce487a2f138bd6d0d2.jpg +3 -0
  23. data/2025/2503_13xxx/2503.13939/images/e68b3ed71271c5ada12379607a0316ed743c4dbfa4fc68e2cddf0aba55fe3db4.jpg +3 -0
  24. data/2025/2503_13xxx/2503.13939/images/e7ab5651b239aae2da14ff72175987bba5cfc015db19b604e83d31379ca2c7c3.jpg +3 -0
  25. data/2025/2503_13xxx/2503.13939/images/eeab33d0e9f3175157bcbf5f4c343acf7158adca3522f510f1bde6d2d2b056bb.jpg +3 -0
  26. data/2025/2503_13xxx/2503.13939/layout.json +0 -0
  27. data/2025/2503_13xxx/2503.13964/78ea4382-37bc-4ec9-850c-29e0e03d0588_content_list.json +0 -0
  28. data/2025/2503_13xxx/2503.13964/78ea4382-37bc-4ec9-850c-29e0e03d0588_model.json +0 -0
  29. data/2025/2503_13xxx/2503.13964/78ea4382-37bc-4ec9-850c-29e0e03d0588_origin.pdf +3 -0
  30. data/2025/2503_13xxx/2503.13964/full.md +408 -0
  31. data/2025/2503_13xxx/2503.13964/images/1f00b2158f4baa2e6c8177096d270b27b92ab0d3605d80704357732f68428092.jpg +3 -0
  32. data/2025/2503_13xxx/2503.13964/images/2bd7c09de03471bde09aeab739f64575a2199a4ba0a5b0e6b991aabd27b2fe58.jpg +3 -0
  33. data/2025/2503_13xxx/2503.13964/images/433cce92c4fa9b0ed6396c768f7c88a7d517383e10709be57e826f4b55732810.jpg +3 -0
  34. data/2025/2503_13xxx/2503.13964/images/48dd2c6b9561c177c32582203772f073323eb12cd85b4bb5af966477db5e2ccf.jpg +3 -0
  35. data/2025/2503_13xxx/2503.13964/images/4e1dc163721351e4b1723c336237a6658cf99d52867c870146f5e16e57995ace.jpg +3 -0
  36. data/2025/2503_13xxx/2503.13964/images/54193b1ec25a174ac9a5be0796af15788bb43ab4e2f179cd9684b69bf1429505.jpg +3 -0
  37. data/2025/2503_13xxx/2503.13964/images/5917de7b382bac0920c45bddea247ed059c33945008b92bec6a0a1a037bd24c0.jpg +3 -0
  38. data/2025/2503_13xxx/2503.13964/images/65f09d9d9bf8a5d98d1a88b5078f75dbbadfdb206dedf12a188259313b7f6cbc.jpg +3 -0
  39. data/2025/2503_13xxx/2503.13964/images/7f99184156c64d7981aec59cb5ccae09a33552db937f003b0b4c448c4c943b4f.jpg +3 -0
  40. data/2025/2503_13xxx/2503.13964/images/82db518b3d61ee7469d02fdb602f817f79c7da8d397039599091ab230cc5649f.jpg +3 -0
  41. data/2025/2503_13xxx/2503.13964/images/889dc2b074d81ad1d1cd17f9ed0c0bd65ba90c1ee94d250f3794b6698ce56777.jpg +3 -0
  42. data/2025/2503_13xxx/2503.13964/images/9b5b3aee3c02295fb484e50c85b0a9b598cbc5a7e5c4664da6351e57fd7c0c06.jpg +3 -0
  43. data/2025/2503_13xxx/2503.13964/images/bbaef9c5613382d31f0f6cb67083f12e6430ea3dbf2c820eed48659ce97c7c81.jpg +3 -0
  44. data/2025/2503_13xxx/2503.13964/images/bc82f417c5756372f5029d6b6ff6aed9a580609b40de53909fd949e594a23756.jpg +3 -0
  45. data/2025/2503_13xxx/2503.13964/images/bcf89c5d74f4c278c908dec983bf5ab2a4f1ed2bb9dd128735f8fc782e0d2401.jpg +3 -0
  46. data/2025/2503_13xxx/2503.13964/images/c2f25214c60f23849de616ca343020e1890b570d8b13257ca0d07ab92a7b4341.jpg +3 -0
  47. data/2025/2503_13xxx/2503.13964/images/d91ccdfc0d2ffe61a73e1a10c2178fe6dbc5f6967910e5ec9ea341203cc40cf6.jpg +3 -0
  48. data/2025/2503_13xxx/2503.13964/images/e3302d47576efedcf4711a805d0e69760f3609ad80ae292bb265d550bded9952.jpg +3 -0
  49. data/2025/2503_13xxx/2503.13964/layout.json +0 -0
  50. data/2025/2503_13xxx/2503.13975/339fa74d-2282-48ca-80ce-795c4c3bfb08_content_list.json +0 -0
.gitattributes CHANGED
@@ -1640,3 +1640,11 @@ data/2025/2503_14xxx/2503.14269/afca9e2c-ea12-42ab-9b42-97d8276eddd3_origin.pdf
1640
  data/2025/2503_14xxx/2503.14286/9dbad829-fd18-4a34-aac3-79960b0d41f3_origin.pdf filter=lfs diff=lfs merge=lfs -text
1641
  data/2025/2503_14xxx/2503.14421/6e5b24bb-0f59-4985-b826-0c2e6a2161ef_origin.pdf filter=lfs diff=lfs merge=lfs -text
1642
  data/2025/2503_18xxx/2503.18955/0d22a232-d11f-468e-8c78-540cccdbc362_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1640
  data/2025/2503_14xxx/2503.14286/9dbad829-fd18-4a34-aac3-79960b0d41f3_origin.pdf filter=lfs diff=lfs merge=lfs -text
1641
  data/2025/2503_14xxx/2503.14421/6e5b24bb-0f59-4985-b826-0c2e6a2161ef_origin.pdf filter=lfs diff=lfs merge=lfs -text
1642
  data/2025/2503_18xxx/2503.18955/0d22a232-d11f-468e-8c78-540cccdbc362_origin.pdf filter=lfs diff=lfs merge=lfs -text
1643
+ data/2025/2503_13xxx/2503.13939/d5a0555e-490e-40dc-adae-70053e3513ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
1644
+ data/2025/2503_13xxx/2503.13964/78ea4382-37bc-4ec9-850c-29e0e03d0588_origin.pdf filter=lfs diff=lfs merge=lfs -text
1645
+ data/2025/2503_13xxx/2503.13975/339fa74d-2282-48ca-80ce-795c4c3bfb08_origin.pdf filter=lfs diff=lfs merge=lfs -text
1646
+ data/2025/2503_14xxx/2503.14023/a3422a21-0229-4d5a-af66-ea0007377b3e_origin.pdf filter=lfs diff=lfs merge=lfs -text
1647
+ data/2025/2503_14xxx/2503.14088/240a9a75-8db1-4b3f-b6a1-a38880c80455_origin.pdf filter=lfs diff=lfs merge=lfs -text
1648
+ data/2025/2503_14xxx/2503.14118/5585c5a4-326b-4f41-99b2-afa63d72d72a_origin.pdf filter=lfs diff=lfs merge=lfs -text
1649
+ data/2025/2503_16xxx/2503.16528/ee4ac0dd-8eff-4341-983b-5242b8162c88_origin.pdf filter=lfs diff=lfs merge=lfs -text
1650
+ data/2025/2503_17xxx/2503.17395/5d1453fc-cf43-46d6-9479-151164433999_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2503_13xxx/2503.13939/d5a0555e-490e-40dc-adae-70053e3513ed_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_13xxx/2503.13939/d5a0555e-490e-40dc-adae-70053e3513ed_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_13xxx/2503.13939/d5a0555e-490e-40dc-adae-70053e3513ed_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2964373849a7b2d9ffc0409fecaa00f37cfe8dadd93cda6cbe89d1802d407fd6
3
+ size 17746497
data/2025/2503_13xxx/2503.13939/full.md ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Med-R1: Reinforcement Learning for Generalizable Medical Reasoning in Vision-Language Models
2
+
3
+ Yuxiang Lai, Jike Zhong, Ming Li, Shitian Zhao, Yuheng Li, Konstantinos Psounis, Fellow, IEEE, and Xiaofeng Yang, Member, IEEE
4
+
5
+ Abstract—Vision-language models (VLMs) have achieved impressive progress in natural image reasoning, yet their potential in medical imaging remains underexplored. Medical vision-language tasks demand precise understanding and clinically coherent answers, which are difficult to achieve due to complexity of medical data and the scarcity of high-quality expert annotations. These challenges limit the effectiveness of conventional supervised fine-tuning (SFT) and Chain-of-Thought (CoT) strategies that work well in general domains. To address these challenges, we propose Med-R1, a reinforcement learning (RL)-enhanced VLM designed to improve generalization and reliability in medical reasoning. Med-R1 adopts Group Relative Policy Optimization (GRPO) to encourage reward-guided learning beyond static annotations. We comprehensively evaluate Med-R1 across eight distinct medical imaging modalities. Med-R1 achieves a $29.94\%$ improvement in average accuracy over its base model Qwen2-VL-2B, and even outperforms Qwen2-VL-72B—a model with $36 \times$ more parameters. To assess cross-task generalization, we further evaluate Med-R1 on five question types. Med-R1 outperforms Qwen2-VL-2B by $32.06\%$ in question-type generalization, also surpassing Qwen2-VL-72B. We further explore the thinking process in Med-R1, a crucial component of Deepseek-R1. Our results show that omitting intermediate rationales (No-Thinking Med-R1) not only improves cross-domain generalization with less training, but also challenges the common assumption that more reasoning always helps. Nevertheless, we also find that the Think-After Med-R1 variant further improves performance while maintaining interpretability. These findings suggest that in medical VQA, it is not the presence of reasoning itself, but rather its quality and position. Together, these results highlight that RL improves medical reasoning
6
+
7
+ Equal contribution: Yuxiang Lai, Jike Zhong, and Ming Li. Corresponding author: Xiaofeng Yang (email: xiaofeng.yang@emory.edu).
8
+
9
+ This work is supported in part by the National Institutes of Health under award numbers R01CA272991, R01EB032680, R01DE033512, and U54CA274513.
10
+
11
+ Yuxiang Lai and Xiaofeng Yang are with the Department of Computer Science and Informatics, Emory University, Atlanta, GA 30322, USA.
12
+
13
+ Jake Zhong and Konstantinos Psounis are with Department of Computer Science and Department of Electrical and Computer Engineering, University of Southern California, Los Angeles, CA 90089, USA.
14
+
15
+ Ming Li is with the Department of Computer Science, University of Tokyo, Tokyo 113-8654, Japan.
16
+
17
+ Shitian Zhao is with the Department of Computer Science, Johns Hopkins University, Baltimore, MD 21218, USA.
18
+
19
+ Yuheng Li and Xiaofeng Yang are with the Department of Biomedical Engineering, Georgia Institute of Technology and Emory University, Atlanta, GA 30332, USA. Xiaofeng Yang is also with the Department of Radiation Oncology and Winship Cancer Institute, Emory University, Atlanta, GA 30322, USA.
20
+
21
+ and generalization, enabling efficient and reliable VLMs.
22
+
23
+ Index Terms—Reinforcement Learning, Vision Language Models, Multimodal LLM, Post-Training, Medical Reasoning.
24
+
25
+ # I. INTRODUCTION
26
+
27
+ Vision-Language Models (VLMs) have achieved strong performance on natural image understanding tasks such as visual question answering (VQA) and multimodal dialogue [1]–[3], enabled by large-scale pretraining and supervised fine-tuning (SFT). However, applying VLMs to medical imaging remains challenging due to the need for clinically sound interpretations and decision-making processes. Medical tasks often involve multi-step analysis—e.g., diagnosing a lung nodule may require integrating lesion localization, morphology, and context. In addition, the diversity of imaging modalities (e.g., CT, MRI) and task types (e.g., diagnosis, grading) imposes demands on generalizability. This raises a key question: how can we enable VLMs to perform well across medical domains while ensuring reliable and context-aware behavior?
28
+
29
+ In this paper, we identify that the limitations of current medical VLMs primarily stem from the inherent drawbacks of Supervised Fine-Tuning (SFT) [4], [5]. While SFT has been widely adopted to adapt foundation models to medical imaging [6]-[10], it suffers from two fundamental issues that hinder medical applicability. First, SFT inherently biases models toward memorizing task-specific shortcuts rather than learning generalizable reasoning. By directly aligning model outputs with final answers (e.g., diagnostic labels), SFT encourages overfitting to superficial patterns in training data. Second, the scarcity of high-quality Chain-of-Thought (CoT) annotations severely limits the effectiveness of SFT in medical reasoning. Unlike general-domain tasks, where large-scale CoT datasets can be crowdsourced, medical reasoning requires domain-specific logical structuring (e.g., systematically ruling out differential diagnoses before confirming malignancy). However, curating such CoT datasets is prohibitively expensive, as it demands meticulous annotation by experienced medical professionals to ensure diagnostic validity and clinical coherence. As a result, existing SFT-based medical VLMs [6], [7] lack access to high-quality CoT data, leading to shallow reasoning with limited clinical rigor. These models frequently produce "black-box" predictions, struggling to provide traceable reasoning
30
+
31
+ or maintain performance in out-of-domain tasks. This lack of transparency and robustness poses a significant challenge to medical adoption, where explainability and reliability are indispensable requirements.
32
+
33
+ To address these challenges, we propose Med-R1, a reinforcement learning (RL)-based framework for enhancing the generalizability and interpretability of medical VLMs. Unlike SFT, which aligns outputs to fixed supervision and often leads to shortcut learning, RL encourages exploration of diverse reasoning strategies through reward signals—without requiring explicit CoT annotations [11]. We adopt Group Relative Policy Optimization (GRPO) [12], a lightweight alternative to PPO (Proximal Policy Optimization) [13] that stabilizes training via rule-based rewards and group-relative comparisons. These mechanisms reduce computational overhead while encouraging clinically grounded reasoning, making GRPO well-suited for medical tasks where scalability and reliability are essential.
34
+
35
+ As shown in Figure 1, we evaluate Med-R1 across eight diverse medical imaging modalities, including CT, MRI, Ultrasound, Dermoscopy, Fundus Photography, OCT, Microscopy, and X-ray. These modalities cover a wide range of clinical imaging—from macroscopic anatomy to cellular-level and functional assessments. Med-R1 (2B parameters) achieves $69.91\%$ average accuracy, a $29.94\%$ gain over its base model Qwen2-VL2B, and even outperforms the 72B-parameter Qwen2-VL-72B (Table IV), highlighting the benefit of RL-driven adaptation. We further assess generalizability across five question types: modality recognition, anatomy identification, disease diagnosis, lesion grading, and biological attribute analysis. Med-R1 improves question-type generalization accuracy by $32.06\%$ over Qwen2-VL-2B and surpasses Qwen2-VL-72B in this setting as well (Table VIII). These demonstrate that RL enhances both parameter efficiency and generalizability in medical VLMs.
36
+
37
+ Finally, we investigate the impact of intermediate rationales in Med-R1. Conventional wisdom suggests that explicit step-by-step reasoning ("Think") enhances generalization, yet our results show this is not always true in medical VQA. We compare three RL post-training strategies: (1) Think, which generates rationales before answering; (2) No-Think, which directly predicts the answer; and (3) our proposed Think After, which performs concise reasoning after prediction. We find that No-Think improves generalization across modalities, while Think often causes hallucinated rationales due to domain shift. In contrast, Think After preserves interpretability without sacrificing accuracy, striking the best balance between reliability and explainability. These findings reveal that in specialized domains, the effectiveness of reasoning depends more on its quality, timing, and domain alignment than its length, challenging the notion that "more thinking is better."
38
+
39
+ Med-R1 a comprehensive and systematic study of rule-based RL for medical reasoning across eight imaging modalities (Figure 2). We summarized our contribution as follows:
40
+
41
+ 1) Multi-modality and multi-task medical reasoning VLM. We propose Med-R1, a comprehensive and systematic study of rule-based reinforcement learning for medical reasoning, supporting eight imaging modalities (CT, MRI, Ultrasound, etc.) across five distinct clinical tasks. We demonstrate that RL-based fine-tuning
42
+
43
+ effectively promotes modality-specific as well as cross-modality reasoning in the medical domain without the need for token-level supervision as in SFT. The proposed Med-R1 is capable of generating step-by-step, accurate, and plausible explanations.
44
+
45
+ 2) Robust Generalization with Efficiency. We show that alongside the solid modality and task-specific performance, Med-R1 exhibits strong generalization. Med-R1 outperforms the base model by $29.94\%$ and SFT baselines by $15.84\%$ in average generalization accuracy across modalities. In cross-task settings, it outperforms the base model and SFT baseline by $32.06\%$ and $11.25\%$ respectively. Moreover, Med-R1 surpasses other larger generic or medical-specific models including Qwen2-VL-72B, and MedVInT-7B, warranting its efficiency and reliability for real-world deployment.
46
+
47
+ 3) Rethinking the "More Thinking is Better" Assumption: Our results challenge the common belief that generating longer or more explicit reasoning chains necessarily improves generalization. We find that reinforcement learning without explicit reasoning often yields higher accuracy, as free-form reasoning learned from general-domain data can induce hallucinations under domain shift. However, this may reduce reliability and interpretability in medical applications. To address this, we introduce Think-After—a reasoning scheme where the model provides rationalization for its chosen answer after the prediction. This design preserves interpretability while mitigating the instability introduced by lengthy reasoning chains, offering a balance between accuracy and explainability. Our findings suggest that the quality and timing of reasoning are critical for robust generalization.
48
+
49
+ # II. RELATED WORKS
50
+
51
+ General VLMs and Medical VLMs. General-purpose VLMs such as CLIP [14] and BLIP-2 [15] have advanced natural image-text understanding via large-scale pretraining, but struggle with domain-specific tasks like medical reasoning. Recent efforts adapt VLMs through supervised fine-tuning (SFT) on medical datasets, as seen in LLaVA-Med [8] and Med-Flamingo [6]. While effective in-domain, these models often overfit to narrow corpora and lack generalization across modalities or task types. Our work addresses this limitation by introducing RL for scalable, modality-agnostic adaptation.
52
+
53
+ Reinforcement Learning for Post-Training. RL has shown promise for aligning language models with desired behavior via reward feedback [16], [17]. In vision-language tasks, RL improves VQA accuracy [11] and reduces hallucination [12], but often relies on complex reward models or costly human supervision. GRPO [12] offers a scalable alternative by using rule-based rewards and group-relative comparisons. We extend this approach to medical VQA, enabling efficient adaptation without modality-specific supervision.
54
+
55
+ Medical Reasoning and Interpretability Interpretable reasoning is critical in medical AI, with recent work exploring CoT prompting [18] and program-guided logic [4]. However, such methods rely on costly expert annotations [19], limiting
56
+
57
+ ![](images/c6a1cbfe551d72704f48e8c2194a01849188ccb0ff3355aeeee9f0cc8c3be341.jpg)
58
+ Fig. 1. Overview of the Evaluation Framework: Eight Medical Imaging Modalities and Five Medical Vision Question Answering Tasks. We evaluate Med-R1 across eight distinct medical imaging modalities—Computed Tomography (CT), Magnetic Resonance Imaging (MRI), Ultrasound, Dermoscopy, Fundus Photography, OCT (Optical coherence tomography), Microscopy Images, and X-ray Imaging—as well as five medical vision question answering tasks: anatomy identification, disease diagnosis, lesion grading, modality recognition, and biological attribute analysis. Example images and corresponding clinical questions illustrate the diversity of medical data and the challenges in developing generalizable vision-language models for automated medical reasoning.
59
+
60
+ scalability in clinical domains. Reinforcement learning offers an alternative by enabling emergent reasoning without explicit supervision. Concurrently, MedVLM-R1 [20] also applies GRPO-based RL to radiology VQA, which is focuses on a single radiology-specific setting (training on MRI and testing on CT and X-ray) and reports generalization within radiology, using roughly 600 training samples. Nonetheless, it highlights the growing interest in RL for medical VLMs.
61
+
62
+ # III. METHOD
63
+
64
+ We adopt rule-based RL to encourage multimodal reasoning and generalization in medical domains. Recent studies have demonstrated that RL can incentivize emergent logical reasoning and generalization in multimodal tasks such as mathematical reasoning [21] and visual navigation [11]. Building on these insights, we extend its application to the medical domain and systematically assess its effectiveness in this context. Specifically, we leverage the popular RL-based post-training method GRPO [22] to train a large base MLLM [23] across 8 different modalities for medical reasoning and compare it with zero-shot and SFT performance of popular existing VLMs. We introduce the details of the SFT and RL algorithm, our reward design, and data structure below.
65
+
66
+ Supervised Fine-tuning: We performed supervised fine-tuning
67
+
68
+ (SFT) on the Qwen2-VL-2B and Qwen2.5-VL-3 models. Specifically, we fine-tuned the model using mixed-precision training (bfloat16) with gradient accumulation and checkpointing enabled for memory efficiency. The maximum sequence length was set to 4096 tokens, allowing the model to process full visual-text reasoning sequences. Training was conducted for one epoch with a learning rate of $2 \times 10^{-5}$ , cosine learning-rate scheduling, and a warm-up ratio of 0.1. The per-device batch size was 4, and the gradient accumulation step was 16, resulting in an effective batch size of 64.
69
+
70
+ # A. Group Relative Policy Optimization (GRPO)
71
+
72
+ Overview: RL-based algorithms such as PPO [13] and GRPO [12] belong to a family of fine-tuning and alignment strategies explicitly designed to enhance models' reasoning capacities. Unlike supervised fine-tuning, which directly optimizes maximum likelihood, these RL-based methods instead optimize the policy gradient using reward signals, encouraging reasoning by exploring a much larger solution space. GRPO is closely related to PPO but differs in two key aspects: first, GRPO estimates the advantage using group-based estimation rather than a value function; second, it employs a set of fixed rules as the reward signal instead of a learned reward model. These optimizations
73
+
74
+ ![](images/6d17f5e39033555d7897b638223806d890c898d39fb22d406e37032593579ca3.jpg)
75
+ Problem: What does this image depict in terms of its content?
76
+ Fig. 2. Medical VQA examples of Med-R1 The left panel shows a modality recognition task, where the model correctly identifies the presence of breast tissue in a mammogram (X-ray). The right panel illustrates a disease diagnosis task, where Med-R1 accurately detects urolithiasis in an abdominal CT scan. In both cases, the model provides reasoning ("Think"), explaining its decision-making process before selecting the final answer.
77
+
78
+ # Med-R1 Response:
79
+
80
+ Think: The image appears to be a mammogram, which is a type of X-ray used to detect abnormalities in the breast tissue. The presence of dense, irregular patterns and a central area of lighter density suggest the presence of breast tissue.
81
+
82
+ Answer: D
83
+
84
+ Groundtruth: D
85
+
86
+ ![](images/cae1a1c69fc436bed9659cf12dbaccbd6101db506055d35dd62850b1405948a2.jpg)
87
+ A)Brain tissue B)Intestinal tissue C)Kidney tissue D)Breast tissue
88
+ Problem: What is the specific abnormality present in this image?
89
+
90
+ A)Pneumonia
91
+
92
+ B)Sinusitis
93
+
94
+ C)Urolithiasis
95
+
96
+ D)Osteoporosis
97
+
98
+ # Med-R1 Response :
99
+
100
+ Think: The image is a computed tomography (CT) scan of the abdomen, which is a medical imaging technique used to visualize internal structures. The specific abnormality present in this image is not immediately clear from the visible structures alone. However, the presence of a large, dense mass in the right lower quadrant of the abdomen could suggest a urolithiasis.
101
+
102
+ Answer: C
103
+
104
+ Groundtruth: C
105
+
106
+ make GRPO $50\%$ more resource- and computation-efficient than PPO [12].
107
+
108
+ Definition: Formally, let $P(Q)$ denote the question set used for training, where $q$ is a sampled question in the current iteration. Let $\pi_{\theta_{\mathrm{old}}}$ and $\pi_{\theta_{\mathrm{new}}}$ denote the old policy and current (new) policy, respectively, where $o$ is a complete response sampled from a policy. Let $\pi_{\theta_{\mathrm{ref}}}$ denote the reference policy, which in practice is the frozen base MLLM. Let $G$ be the number of responses sampled per question in each iteration. The GRPO objective is given by:
109
+
110
+ $$
111
+ \begin{array}{l} \mathcal {J} _ {\mathrm {G R P O}} (\theta) = \mathbb {E} _ {q \sim P (Q), \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\theta_ {\mathrm {o l d}}} (O | q)} \\ \frac {1}{G} \sum_ {i = 1} ^ {G} \left[ \min \left(\frac {\pi_ {\theta_ {\text {n e w}}} (o _ {i} \mid q)}{\pi_ {\theta_ {\text {o l d}}} (o _ {i} \mid q)} A _ {i}, \right. \right. \\ \left. \operatorname {c l i p} \left(\frac {\pi_ {\theta_ {\text {n e w}}} (o _ {i} \mid q)}{\pi_ {\theta_ {\text {o l d}}} (o _ {i} \mid q)}, 1 - \epsilon , 1 + \epsilon\right) A _ {i}\right) \\ \left. - \beta \mathbb {D} _ {\mathrm {K L}} \left(\pi_ {\theta_ {\text {n e w}}} \| \pi_ {\text {r e f}}\right) \right] \tag {1} \\ \end{array}
112
+ $$
113
+
114
+ where $\frac{\pi_{\theta}(o_i|q)}{\pi_{\theta_{\mathrm{old}}}(o_i|q)}$ is the policy ratio, and $A_{i}$ is the estimated advantage, and $\epsilon$ is the clipping threshold for policy updates. The KL divergence term [24] regularizes the policy update, ensuring that $\pi_{\theta}$ does not deviate excessively from the reference model $\pi_{\theta_{\mathrm{ref}}}$ . Unlike PPO, which uses a critic model to estimate the advantage $A_{i}$ for a single response $o$ , GRPO estimates the relative advantage by sampling a group of responses $\{o_i\}_{i=1}^G$ and normalizing their rewards within the group to compute a relative advantage [12], [25]. Each reward is calculated based on rules without reward models. We detail reward design below.
115
+
116
+ Reward design: We follow [12] and use two types of reward:
117
+
118
+ format and accuracy. Firstly, we prompt the model to explicitly output its thinking process in the "<think>...</think>" tag and the final answer in the "<answer>...</answer>" tag. The format reward is designed to check if the aforementioned tags are present in the final response. A reward score of 1 will be given if they exist and are correct. This helps the model to organize its thoughts and answer in a structured format for the ease of reading. The accuracy reward is a rule-based reward that checks if the actual answer matches with the ground truth. Similarly, a reward score of 1 is given when the results match. In practice, the ground truths are letter options "A, B, C, D" for multiple choice questions, and we treat all responses with correct letter options as the leading word as correct ("A...").
119
+
120
+ # B. No-Thinking Med-R1
121
+
122
+ Previous work [26] found that, in image classification tasks, removing both reasoning and format supervision during RL could sometimes improve performance. However, that finding is specific to structured classification tasks with simple output space, where reasoning is rarely required. In contrast, medical VQA involves multimodal, semantically complex inputs, and reasoning failures can arise from domain mismatch rather than vocabulary alone [6], [12]. Given the difficulty of obtaining reliable CoT annotations in medical settings, we further investigate the role of intermediate reasoning in RL posttraining. We revise the instruction prompt as {Question}. Output the single-letter choice (A, B, C, D,...) in <answer>...</answer> tags., where {Question} will be replaced by each specific question. By doing so, we encourage the model only to output the final answer without any explicit thinking process. The accuracy reward is maintained, where the reward score is 1 when the
123
+
124
+ TABLEI
125
+
126
+ CROSS-MODALITY GENERALIZATION PERFORMANCE OF MED-R1 WITH RL POST-TRAINING. ACCURACY $(\%)$ ACROSS EIGHT MEDICAL IMAGING MODALITIES, WHERE ROWS INDICATE TRAINING MODALITIES AND COLUMNNS TEST MODALITIES. DARKER CELL SHADES INDICATE HIGHER ACCURACY FOR CORRESPONDING TRAINING-TEST PAIRS IN EACH COLUMN. THE BASE MODEL IS QWEN2.5-VL-3B.
127
+
128
+ RL fine-tuned Qwen2.5-VL-3B
129
+
130
+ <table><tr><td>Test Train</td><td>CT</td><td>MRI</td><td>X-Ray</td><td>Ultrasound</td><td>Dermoscopy</td><td>Fundus</td><td>OCT</td><td>Microscopy</td><td>Overall</td></tr><tr><td>CT</td><td>94.35 ± 0.79</td><td>70.85 ± 1.10</td><td>86.50 ± 1.70</td><td>35.44 ± 2.05</td><td>66.77 ± 2.53</td><td>76.41 ± 2.51</td><td>84.08 ± 2.48</td><td>66.58 ± 2.79</td><td>72.62 ± 0.65</td></tr><tr><td>MRI</td><td>72.79 ± 1.53</td><td>98.57 ± 0.29</td><td>83.72 ± 1.80</td><td>38.38 ± 2.07</td><td>68.30 ± 2.53</td><td>79.78 ± 2.32</td><td>85.97 ± 2.36</td><td>67.12 ± 2.79</td><td>74.33 ± 0.64</td></tr><tr><td>X-Ray</td><td>78.90 ± 1.39</td><td>59.73 ± 1.20</td><td>93.50 ± 1.21</td><td>38.28 ± 2.10</td><td>65.85 ± 2.57</td><td>76.05 ± 2.55</td><td>78.89 ± 2.77</td><td>65.14 ± 2.79</td><td>69.54 ± 0.68</td></tr><tr><td>Ultrasound</td><td>62.20 ± 1.67</td><td>63.00 ± 1.19</td><td>79.57 ± 1.98</td><td>98.84 ± 0.46</td><td>64.09 ± 2.57</td><td>72.22 ± 2.60</td><td>78.54 ± 2.77</td><td>69.82 ± 2.75</td><td>73.53 ± 0.65</td></tr><tr><td>Dermoscopy</td><td>64.30 ± 1.65</td><td>60.39 ± 1.18</td><td>80.56 ± 1.92</td><td>40.21 ± 2.12</td><td>84.99 ± 1.91</td><td>71.86 ± 2.69</td><td>74.76 ± 2.89</td><td>65.23 ± 2.75</td><td>67.79 ± 0.70</td></tr><tr><td>Fundus</td><td>68.59 ± 1.59</td><td>61.32 ± 1.19</td><td>81.98 ± 1.86</td><td>37.42 ± 2.10</td><td>67.84 ± 2.49</td><td>90.62 ± 1.68</td><td>79.13 ± 2.71</td><td>66.76 ± 2.79</td><td>69.21 ± 0.67</td></tr><tr><td>OCT</td><td>81.06 ± 1.34</td><td>73.92 ± 1.06</td><td>83.65 ± 1.80</td><td>35.78 ± 2.03</td><td>68.99 ± 2.53</td><td>80.87 ± 2.32</td><td>98.70 ± 0.77</td><td>67.12 ± 2.79</td><td>73.76 ± 0.65</td></tr><tr><td>Microscopy</td><td>65.50 ± 1.62</td><td>62.34 ± 1.21</td><td>80.31 ± 1.95</td><td>37.37 ± 2.10</td><td>64.24 ± 2.60</td><td>71.40 ± 2.64</td><td>77.36 ± 2.83</td><td>88.02 ± 1.94</td><td>68.32 ± 0.69</td></tr><tr><td>Overall</td><td>73.46 ± 1.51</td><td>68.77 ± 1.12</td><td>83.72 ± 1.79</td><td>45.21 ± 2.14</td><td>68.88 ± 2.45</td><td>77.40 ± 2.50</td><td>82.18 ± 2.53</td><td>69.47 ± 2.74</td><td>71.14 ± 0.67</td></tr></table>
131
+
132
+ CT - Computed Tomography; MRI - Magnetic Resonance Imaging; US - Ultrasound; Der - Dermoscopy; FP - Fundus Photography. OCT - Optical Coherence Tomography; Micro - Microscopy Images; X-Ray - X-Ray Imaging
133
+
134
+ TABLE II
135
+
136
+ CROSS-MODALITY GENERALIZATION PERFORMANCE OF MED-R1 WITH NO-THINK RL POST-TRAINING. ACCURACY (%) ACROSS EIGHT MEDICAL IMAGING MODALITIES, WHERE ROWS INDICATE TRAINING MODALITIES AND COLUMN TEST MODALITIES. DARKER CELL SHADES INDICATE HIGHER ACCURACY FOR CORRESPONDING TRAINING-TEST PAIRS IN EACH COLUMN. THE BASE MODEL IS QWEN2.5-VL-3B
137
+
138
+ No-Think RL fine-tuned Qwen2.5-VL-3B
139
+
140
+ <table><tr><td>Test Train</td><td>CT</td><td>MRI</td><td>X-Ray</td><td>Ultrasound</td><td>Dermoscopy</td><td>Fundus</td><td>OCT</td><td>Microscopy</td><td>Overall</td></tr><tr><td>CT</td><td>98.27 ± 0.45</td><td>74.77 ± 1.04</td><td>85.33 ± 1.70</td><td>37.61 ± 2.05</td><td>69.07 ± 2.53</td><td>78.87 ± 2.46</td><td>87.26 ± 2.30</td><td>67.75 ± 2.75</td><td>74.87 ± 0.64</td></tr><tr><td>MRI</td><td>69.70 ± 1.56</td><td>99.61 ± 0.15</td><td>81.73 ± 1.89</td><td>34.91 ± 2.03</td><td>75.19 ± 2.34</td><td>80.51 ± 2.32</td><td>89.98 ± 2.00</td><td>67.48 ± 2.75</td><td>74.89 ± 0.65</td></tr><tr><td>X-Ray</td><td>80.10 ± 1.36</td><td>69.78 ± 1.14</td><td>95.79 ± 0.99</td><td>34.33 ± 2.05</td><td>71.29 ± 2.41</td><td>81.79 ± 2.28</td><td>83.14 ± 2.54</td><td>68.65 ± 2.79</td><td>73.11 ± 0.63</td></tr><tr><td>Ultrasound</td><td>56.40 ± 1.70</td><td>64.93 ± 1.18</td><td>80.25 ± 1.92</td><td>100.0 ± 0.00</td><td>68.38 ± 2.53</td><td>75.32 ± 2.60</td><td>76.30 ± 2.89</td><td>71.71 ± 2.66</td><td>74.16 ± 0.66</td></tr><tr><td>Dermoscopy</td><td>55.45 ± 1.70</td><td>66.48 ± 1.15</td><td>80.37 ± 1.95</td><td>37.42 ± 2.12</td><td>92.88 ± 1.38</td><td>71.86 ± 2.69</td><td>76.53 ± 2.83</td><td>67.12 ± 2.70</td><td>68.51 ± 0.69</td></tr><tr><td>Fundus</td><td>58.65 ± 1.68</td><td>66.08 ± 1.16</td><td>82.79 ± 1.83</td><td>34.33 ± 2.10</td><td>68.68 ± 2.49</td><td>93.81 ± 1.41</td><td>79.72 ± 2.71</td><td>65.86 ± 2.79</td><td>68.74 ± 0.69</td></tr><tr><td>OCT</td><td>80.38 ± 1.36</td><td>75.02 ± 1.05</td><td>86.81 ± 1.67</td><td>36.11 ± 2.05</td><td>72.21 ± 2.41</td><td>81.60 ± 2.28</td><td>99.88 ± 0.18</td><td>65.32 ± 2.79</td><td>74.67 ± 0.64</td></tr><tr><td>Microscopy</td><td>60.17 ± 1.73</td><td>66.59 ± 1.16</td><td>83.10 ± 1.86</td><td>36.60 ± 2.10</td><td>71.29 ± 2.45</td><td>77.05 ± 2.46</td><td>80.66 ± 2.71</td><td>97.66 ± 0.86</td><td>71.64 ± 0.67</td></tr><tr><td>Overall</td><td>69.89 ± 1.59</td><td>72.91 ± 1.08</td><td>84.52 ± 1.73</td><td>43.91 ± 2.14</td><td>73.62 ± 2.37</td><td>80.10 ± 2.41</td><td>84.18 ± 2.47</td><td>71.44 ± 2.65</td><td>72.57 ± 0.66</td></tr></table>
141
+
142
+ TABLE III
143
+
144
+ CROSS-MODALITY GENERALIZATION PERFORMANCE OF MED-R1 WITH THINK-AFTER RL POST-TRAINING. ACCURACY $(\%)$ ACROSS EIGHT MEDICAL IMAGING MODALITIES, WHERE ROWS INDICATE TRAINING MODALITIES AND COLUMN TEST MODALITIES. DARKER CELL SHADES INDICATE HIGHER ACCURACY FOR CORRESPONDING TRAINING-TEST PAIRS IN EACH COLUMN. THE BASE MODEL IS QWEN2.5-VL-3B.
145
+
146
+ Think-after RL fine-tuned Qwen2.5-VL-3B
147
+
148
+ <table><tr><td>Test Train</td><td>CT</td><td>MRI</td><td>X-Ray</td><td>Ultrasound</td><td>Dermoscopy</td><td>Fundus</td><td>OCT</td><td>Microscopy</td><td>Overall</td></tr><tr><td>CT</td><td>96.64 ± 0.62</td><td>72.53 ± 1.08</td><td>85.45 ± 1.67</td><td>36.31 ± 2.03</td><td>66.16 ± 2.53</td><td>79.78 ± 2.37</td><td>86.20 ± 2.30</td><td>68.20 ± 2.75</td><td>73.91 ± 0.65</td></tr><tr><td>MRI</td><td>63.65 ± 1.67</td><td>98.87 ± 0.26</td><td>81.11 ± 1.92</td><td>33.70 ± 2.03</td><td>71.82 ± 2.45</td><td>81.51 ± 2.32</td><td>86.91 ± 2.30</td><td>67.57 ± 2.70</td><td>73.14 ± 0.67</td></tr><tr><td>X-Ray</td><td>79.20 ± 1.42</td><td>72.70 ± 1.10</td><td>94.61 ± 1.08</td><td>31.58 ± 2.00</td><td>70.21 ± 2.45</td><td>80.60 ± 2.32</td><td>85.97 ± 2.30</td><td>68.65 ± 2.70</td><td>72.94 ± 0.66</td></tr><tr><td>Ultrasound</td><td>59.06 ± 1.70</td><td>66.23 ± 1.15</td><td>80.25 ± 1.95</td><td>99.37 ± 0.34</td><td>67.38 ± 2.53</td><td>74.95 ± 2.55</td><td>78.77 ± 2.77</td><td>67.93 ± 2.75</td><td>74.24 ± 0.64</td></tr><tr><td>Dermoscopy</td><td>56.77 ± 1.71</td><td>66.86 ± 1.15</td><td>80.37 ± 1.95</td><td>34.72 ± 2.07</td><td>89.51 ± 1.68</td><td>75.05 ± 2.55</td><td>78.66 ± 2.77</td><td>67.30 ± 2.75</td><td>68.65 ± 0.67</td></tr><tr><td>Fundus</td><td>58.01 ± 1.68</td><td>65.89 ± 1.16</td><td>81.30 ± 1.86</td><td>32.84 ± 2.03</td><td>68.68 ± 2.53</td><td>92.53 ± 1.55</td><td>79.36 ± 2.77</td><td>66.22 ± 2.79</td><td>68.10 ± 0.69</td></tr><tr><td>OCT</td><td>79.98 ± 1.37</td><td>75.45 ± 1.05</td><td>85.51 ± 1.73</td><td>34.14 ± 2.03</td><td>71.21 ± 2.45</td><td>81.69 ± 2.32</td><td>99.29 ± 0.53</td><td>66.94 ± 2.75</td><td>74.28 ± 0.65</td></tr><tr><td>Microscopy</td><td>59.27 ± 1.68</td><td>64.22 ± 1.19</td><td>81.11 ± 1.89</td><td>34.47 ± 2.05</td><td>63.86 ± 2.60</td><td>73.68 ± 2.64</td><td>78.42 ± 2.77</td><td>86.67 ± 2.03</td><td>67.71 ± 0.69</td></tr><tr><td>Overall</td><td>69.07 ± 1.62</td><td>72.84 ± 1.10</td><td>83.72 ± 1.76</td><td>42.14 ± 2.12</td><td>71.10 ± 2.53</td><td>79.97 ± 2.37</td><td>84.20 ± 2.47</td><td>69.93 ± 2.65</td><td>71.62 ± 0.66</td></tr></table>
149
+
150
+ CT - Computed Tomography; MRI - Magnetic Resonance Imaging; US - Ultrasound; Der - Dermoscopy; FP - Fundus Photography. OCT - Optical Coherence Tomography; Micro - Microscopy Images; X-Ray - X-Ray Imaging
151
+
152
+ extracted answer matches the ground truth labels. Note that when there is any text outside the <answer> tag, i.e., an explicit thinking process exists, the extracted content will be null, and therefore the accuracy reward will be 0. Therefore, the model will be forced to generate only the answers.
153
+
154
+ # C. Think-after Med-R1
155
+
156
+ As said in the previous section, removing both reasoning and format supervision during RL could sometimes improve performance. However, as medical AI systems must not
157
+
158
+ only achieve high accuracy but also provide reasoning that physicians can review and validate. To address this, we introduce a new reasoning protocol termed Think-After, in which the model first predicts the answer and then generates a post-hoc rationale explaining that decision. This design preserves interpretability while minimizing the instability introduced by lengthy reasoning chains. Concretely, the instruction prompt is revised as: {Question}. Output the single-letter choice (A, B, C, D,...) in <answer>...</answer> tags. Then provide
159
+
160
+ # TABLE IV
161
+
162
+ PERFORMANCE COMPARISON ACROSS EIGHT MEDICAL MODALITIES. OUR GRPO-FINETUNED MODEL OUTPERFORMS ZERO-SHOT GENERAL-PURPOSE VLMS, MEDICAL-DOMAIN VLMS, AND SUPERVISED FINE-TUNING BASELINES, WHILE MAINTAINING SCALABILITY. THE BEST AND SECOND-BEST PERFORMANCES PER COLUMN ARE HIGHLIGHTED IN RED AND BLUE. MODALITY ABBREVIATIONS: CT - COMPUTED TOMOGRAPHY; MRI - MAGNETIC RESONANCE IMAGING; US - ULTRASOUND; DER - DERMOSCOPY; FP - FUNDUS PHOTOGRAPHY; OCT - OPTICAL COHERENCE TOMOGRAPHY; MICRO-MicroSCOPY; X-RAY-X-RAY IMAGING.
163
+
164
+ <table><tr><td>Modality
165
+ Methods</td><td>CT</td><td>MRI</td><td>X-Ray</td><td>Ultrasound</td><td>Dermoscopy</td><td>Fundus</td><td>OCT</td><td>Microscopy</td><td>Overall</td></tr><tr><td colspan="10">Zero-shot VLMs</td></tr><tr><td>BLIP-2† [15]</td><td>56.74</td><td>41.32</td><td>67.58</td><td>37.27</td><td>40.65</td><td>46.24</td><td>68.08</td><td>50.40</td><td>51.04</td></tr><tr><td>InstructBLIP† [27]</td><td>28.72</td><td>33.15</td><td>61.04</td><td>41.25</td><td>62.22</td><td>50.31</td><td>42.59</td><td>46.29</td><td>45.70</td></tr><tr><td>LLaVA† [28]</td><td>17.73</td><td>26.72</td><td>30.70</td><td>18.66</td><td>49.74</td><td>47.11</td><td>33.73</td><td>28.87</td><td>31.66</td></tr><tr><td>LLaMA Adapter v2† [29]</td><td>21.41</td><td>26.63</td><td>46.44</td><td>34.05</td><td>51.76</td><td>50.74</td><td>33.00</td><td>38.66</td><td>37.83</td></tr><tr><td>MiniGPT-4† [30]</td><td>22.81</td><td>27.48</td><td>38.30</td><td>25.50</td><td>40.25</td><td>38.33</td><td>31.40</td><td>36.23</td><td>32.54</td></tr><tr><td>InternVL2 [31]</td><td>40.20</td><td>58.10</td><td>57.90</td><td>49.10</td><td>51.90</td><td>53.20</td><td>59.10</td><td>64.00</td><td>54.19</td></tr><tr><td>Qwen2-VL-2B [3]</td><td>45.10</td><td>38.57</td><td>39.32</td><td>30.86</td><td>35.83</td><td>43.17</td><td>35.14</td><td>36.85</td><td>38.11</td></tr><tr><td>Qwen2-VL-7B [3]</td><td>61.46</td><td>45.77</td><td>64.27</td><td>36.01</td><td>49.08</td><td>59.84</td><td>59.32</td><td>61.08</td><td>54.60</td></tr><tr><td>Qwen2-VL-72B [32]</td><td>67.97</td><td>69.39</td><td>77.21</td><td>51.39</td><td>65.31</td><td>72.58</td><td>72.76</td><td>67.83</td><td>68.05</td></tr><tr><td>Qwen2.5-VL-3B [33]</td><td>53.87</td><td>54.23</td><td>61.84</td><td>32.69</td><td>52.94</td><td>62.47</td><td>56.23</td><td>59.64</td><td>54.24</td></tr><tr><td>Qwen2.5-VL-7B [33]</td><td>60.44</td><td>58.44</td><td>73.99</td><td>30.66</td><td>62.48</td><td>67.30</td><td>61.20</td><td>67.84</td><td>60.29</td></tr><tr><td>Qwen2.5-VL-72B [33]</td><td>66.18</td><td>68.74</td><td>77.59</td><td>49.81</td><td>69.75</td><td>71.04</td><td>69.22</td><td>69.37</td><td>67.71</td></tr><tr><td colspan="10">Zero-shot Medical VLMs</td></tr><tr><td>LLaVA-Med† [8]</td><td>18.69</td><td>27.47</td><td>30.68</td><td>29.88</td><td>44.95</td><td>39.03</td><td>34.61</td><td>33.29</td><td>32.33</td></tr><tr><td>RadFM† [34]</td><td>27.56</td><td>24.06</td><td>30.95</td><td>16.57</td><td>39.21</td><td>36.89</td><td>32.80</td><td>27.97</td><td>29.50</td></tr><tr><td>Med-Flamingo† [6]</td><td>38.47</td><td>40.56</td><td>30.34</td><td>24.64</td><td>32.43</td><td>30.12</td><td>26.51</td><td>19.93</td><td>30.38</td></tr><tr><td>MedVInT† [7]</td><td>40.74</td><td>43.10</td><td>55.10</td><td>41.26</td><td>29.11</td><td>31.84</td><td>23.26</td><td>32.00</td><td>37.05</td></tr><tr><td>HuatuoGPT-Vision [9]</td><td>35.30</td><td>40.40</td><td>41.50</td><td>60.10</td><td>53.10</td><td>51.40</td><td>59.30</td><td>62.30</td><td>50.43</td></tr><tr><td>HealthGPT [35]</td><td>35.50</td><td>78.50</td><td>81.90</td><td>51.40</td><td>64.90</td><td>54.60</td><td>89.30</td><td>88.20</td><td>68.04</td></tr><tr><td colspan="10">Fine-tuned VLMs</td></tr><tr><td>Qwen2-VL-2B (SFT)</td><td>51.74</td><td>52.83</td><td>65.57</td><td>47.65</td><td>51.91</td><td>52.26</td><td>53.99</td><td>56.58</td><td>54.07</td></tr><tr><td>Qwen2.5-VL-3B (SFT)</td><td>56.06</td><td>60.81</td><td>69.23</td><td>41.77</td><td>60.11</td><td>69.19</td><td>63.95</td><td>65.66</td><td>60.85</td></tr><tr><td>Qwen2-VL-2B (Think)</td><td>66.30</td><td>71.67</td><td>77.73</td><td>57.31</td><td>72.33</td><td>71.20</td><td>71.96</td><td>70.80</td><td>69.91</td></tr><tr><td>Qwen2-VL-2B (Nothink)</td><td>72.19</td><td>74.37</td><td>78.37</td><td>54.43</td><td>74.73</td><td>75.07</td><td>76.59</td><td>74.13</td><td>72.49</td></tr><tr><td>Qwen2.5-VL-3B (Think)</td><td>73.46</td><td>68.77</td><td>83.72</td><td>45.21</td><td>68.88</td><td>77.40</td><td>82.18</td><td>69.47</td><td>71.14</td></tr><tr><td>Qwen2.5-VL-3B (Think-after)</td><td>69.07</td><td>72.84</td><td>83.72</td><td>42.14</td><td>71.10</td><td>79.97</td><td>84.20</td><td>69.93</td><td>71.62</td></tr><tr><td>Qwen2.5-VL-3B (Nothink)</td><td>69.89</td><td>72.91</td><td>84.52</td><td>43.91</td><td>73.62</td><td>80.10</td><td>84.18</td><td>71.44</td><td>72.57</td></tr></table>
166
+
167
+ the reasoning step by step to explain why you chose this answer. where {Question} is replaced by each specific VQA item. By separating answer prediction and rationale generation, we reduce interference between reasoning and decision processes, achieving balanced trade-off between accuracy and interpretability.
168
+
169
+ # IV. EXPERIMENT & RESULTS
170
+
171
+ # A. Setup
172
+
173
+ Datasets. We adopt the VQA data from the open-access part of the OmniMedVQA benchmark [36], which consists of a total of 82,059 images and 88,996 vision question answering pairs. OmniMedVQA includes VQA pairs from eight imaging modalities: CT (15,808), MRI (31,877), X-Ray (7,916), Ultrasound (10,991), Dermoscopy (6,679), Fundus (5,398), OCT (4,646), and Microscopy (5,680). It is also categorized into five VQA question types, including Anatomy Identification (16,448), Disease Diagnosis (55,387), Lesion Grading (2,098), Modality Recognition (11,565), and Other Biological Attributes (3,498). We split the dataset into training and test sets following an 80-20 ratio for each setting.
174
+
175
+ Implementation Details Training is conducted on HGX H100 [37] server with 2×H100 GPUs (80GB VRAM) using
176
+
177
+ PyTorch [38] and FlashAttention-2 [39] for optimized efficiency. We initialize from Qwen2-VL-2B-Instruct [3] with full parameter tuning, employing per-GPU batch size 1 (effective batch size 4 via 2-step gradient accumulation) and bfloat16 mixed precision. Input sequences combine visual embeddings from $328 \times 328$ resolution images (max 401k pixels) with textual prompts truncated to 1,024 tokens. The GRPO policy generates four candidate rationales per sample, with a sampling temperature of $\tau = 0.7$ . Each training is run for one epoch.
178
+
179
+ Task setting. We evaluate our approach in two distinct generalization settings using the OmniMedVQA dataset [36]: cross-modality generalization and cross-task generalization.
180
+
181
+ - Cross-modality generalization: We train our model on a single modality at a time (out of 8 available modalities) and evaluate its performance on the other seven modalities.
182
+ - Cross-task generalization: We identify 5 distinct task types within the dataset and adopt the same train-test partitioning strategy as in the cross-modality setting, training on one task and evaluating on the other four.
183
+
184
+ We focus on VQA, which integrates core vision-language abilities such as classification, grounding, and reasoning into a unified framework, making it a representative precursor to detection or captioning tasks.
185
+
186
+ # TABLE V
187
+
188
+ CROSS-TASK GENERALIZATION OF MED-R1: PERFORMANCE IS EVALUATED ACROSS FIVE CLINICAL REASONING TASK TYPES (ROWS: TRAINING TASKS, COLUMN:TEST TASKS),WITH DARKER CELL SHADING INDICATING STRONGER GENERALIZATION. RESULTS DEMONSTRATE THAT DOMAIN-SPECIFIC TRAINING (E.G.,DISEASE DIAGNOSIS) PRESERVES IN-TASK EXPERTISE WHILE MAINTAINING ADAPTABILITY TO UNSEEN TASKS, PARTICULARLY FOR MODALITY-AGNOSTIC SKILLS LIKE MODALITY RECOGNITION. THE BASE MODEL IS QWEN2.5-VL-3B.
189
+
190
+ <table><tr><td colspan="7">RL fine-tuned Qwen2.5-VL-3B</td></tr><tr><td>Test Train</td><td>Anatomy Identification</td><td>Disease Diagnosis</td><td>Lesion Grading</td><td>Modality Recognition</td><td>Other Attributes</td><td>Overall</td></tr><tr><td>Anatomy Identification</td><td>94.65 ± 0.75</td><td>61.87 ± 0.90</td><td>63.30 ± 4.59</td><td>96.66 ± 0.73</td><td>78.69 ± 3.05</td><td>79.04 ± 0.65</td></tr><tr><td>Disease Diagnosis</td><td>43.83 ± 1.67</td><td>97.74 ± 0.28</td><td>79.82 ± 3.67</td><td>96.66 ± 0.73</td><td>87.78 ± 2.41</td><td>81.16 ± 0.49</td></tr><tr><td>Lesion Grading</td><td>47.10 ± 1.71</td><td>59.33 ± 0.93</td><td>88.99 ± 2.98</td><td>96.03 ± 0.77</td><td>69.03 ± 3.41</td><td>72.10 ± 0.70</td></tr><tr><td>Modality Recognition</td><td>45.96 ± 1.67</td><td>58.63 ± 0.91</td><td>67.89 ± 4.36</td><td>99.08 ± 0.40</td><td>69.46 ± 3.41</td><td>68.20 ± 0.71</td></tr><tr><td>Other Attributes</td><td>44.04 ± 1.68</td><td>60.09 ± 0.92</td><td>60.09 ± 4.59</td><td>95.74 ± 0.82</td><td>94.74 ± 1.63</td><td>70.94 ± 0.70</td></tr><tr><td>Overall</td><td>55.12 ± 0.75</td><td>67.53 ± 0.39</td><td>72.02 ± 1.86</td><td>96.83 ± 0.31</td><td>79.94 ± 1.32</td><td>74.29 ± 0.65</td></tr></table>
191
+
192
+ TABLE VI NO-THINK CROSS-TASK GENERALIZATION OF MED-R1: PERFORMANCE IS EVALUATED ACROSS FIVE CLINICAL REASONING TASK TYPES (ROWS: TRAINING TASKS, COLUMN:TEST TASKS),WITH DARKER CELL SHADING INDICATING STRONGER GENERALIZATION.
193
+
194
+ <table><tr><td colspan="7">No-Think RL fine-tuned Qwen2.5-VL-3B</td></tr><tr><td>Test Train</td><td>Anatomy Identification</td><td>Disease Diagnosis</td><td>Lesion Grading</td><td>Modality Recognition</td><td>Other Attributes</td><td>Overall</td></tr><tr><td>Anatomy Identification</td><td>96.85 ± 0.57</td><td>63.86 ± 0.90</td><td>79.59 ± 3.78</td><td>96.82 ± 0.69</td><td>77.41 ± 3.12</td><td>82.91 ± 0.64</td></tr><tr><td>Disease Diagnosis</td><td>44.82 ± 1.73</td><td>98.78 ± 0.21</td><td>85.78 ± 3.33</td><td>95.82 ± 0.82</td><td>90.91 ± 2.13</td><td>83.22 ± 0.48</td></tr><tr><td>Lesion Grading</td><td>44.61 ± 1.70</td><td>60.27 ± 0.91</td><td>97.02 ± 1.61</td><td>95.90 ± 0.79</td><td>82.10 ± 2.84</td><td>75.98 ± 0.70</td></tr><tr><td>Modality Recognition</td><td>47.07 ± 1.71</td><td>59.58 ± 0.91</td><td>75.92 ± 3.90</td><td>99.75 ± 0.19</td><td>81.53 ± 2.84</td><td>72.77 ± 0.70</td></tr><tr><td>Other Attributes</td><td>43.98 ± 1.71</td><td>61.11 ± 0.90</td><td>61.70 ± 4.59</td><td>96.07 ± 0.77</td><td>95.60 ± 1.49</td><td>71.69 ± 0.71</td></tr><tr><td>Overall</td><td>55.46 ± 0.77</td><td>68.72 ± 0.39</td><td>80.00 ± 1.67</td><td>96.87 ± 0.31</td><td>85.51 ± 1.16</td><td>77.31 ± 0.62</td></tr></table>
195
+
196
+ # Baseline Methods & Evaluation Metric.
197
+
198
+ We report our results by separating the baselines into three groups. Zero-shot VLMs are models pre-trained for general-purpose VQA without medical adaptation. Medical VLMs are models pre-trained on medical data specifically for medical VQA. Fine-tuned VLMs are models trained on OmniMedVQA using supervised fine-tuning and RL fine-tuning. We evaluate model performance using VQA choice accuracy, the standard metric for medical VQA, where the model selects the correct answer from $K$ clinically validated options. Given an image $I$ , a question $Q$ , and candidate answers $\{A_k\}_{k=1}^K$ , accuracy is defined as:
199
+
200
+ $$
201
+ \text {A c c u r a c y} = \frac {1}{N} \sum_ {i = 1} ^ {N} \mathbb {I} \left(\hat {y} _ {i} = y _ {i}\right), \tag {2}
202
+ $$
203
+
204
+ where $N$ is the total number of test cases, $\hat{y}_i$ is the predicted answer index, $y_i$ the ground truth, and $\mathbb{I}$ the indicator function (1 if correct, 0 otherwise).
205
+
206
+ To assess result reliability, we computed $95\%$ bootstrap confidence intervals (CIs) for all model variants across modalities and tasks. Each experiment performed 10,000 bootstrap resamplings of binary accuracy labels to estimate the mean and confidence bounds, reporting mean $\pm$ half-width for each model.
207
+
208
+ All fine-tuned VLM results (SFT, GRPO+Think, GRPO+Think-After, and GRPO+No-Think) are computed under the identical averaging protocol—macro-averaged accuracy across all cross-modality training-testing pairs—to ensure fair and consistent comparison.
209
+
210
+ # B. Cross-Modality Generalization
211
+
212
+ We comprehensively evaluate Med-R1's adaptability across eight medical imaging modalities, including Computed To
213
+
214
+ mography, Magnetic Resonance Imaging, Ultrasound, Dermoscopy, Fundus Photography, Optical Coherence Tomography, Microscopy Images, and X-ray Imaging. Our experiments focus on two key aspects: (1) cross-modal generalization, where the model is trained on one modality and tested on another, and (2) comparative performance against other popular VLMs and medical-specific VLMs evaluated using zero-shot and SFT.
215
+
216
+ Results on generalization. To evaluate Med-R1's cross-modality generalization, we measure its accuracy across eight distinct medical imaging modalities (Table I). Overall row and column summarize the model's average performance across training and test domains, providing insights into its generalization ability. Med-R1 achieves a strong overall accuracy of $69.91\%$ , demonstrating its ability to generalize across diverse medical imaging modalities. Notably, models trained on CT, MRI, and X-Ray exhibit the highest generalization capability, with overall scores of $71.44\%$ , $71.26\%$ , and $72.35\%$ , respectively. In contrast, models trained on Fundus Photography and Microscopy images show lower generalization, with $67.67\%$ and $67.54\%$ overall accuracy, indicating that certain modality-specific features (e.g., texture-based imaging in US and Micro) may not transfer as effectively to other domains. Importantly, the overall test accuracy of $69.91\%$ highlights Med-R1's ability to perform well across unseen imaging modalities, despite being trained on a single domain at a time. This result underscores the effectiveness of reinforcement learning in enhancing cross-modality transfer, allowing the model to maintain robust performance without requiring extensive retraining for each medical imaging modality.
217
+
218
+ Comparison to zero-shot and SFT evaluations with other VLMs. As demonstrated in Table IV, Med-R1 demonstrates its superiority across all eight medical imaging modalities while
219
+
220
+ THINK-AFTER CROSS-TASK GENERALIZATION OF MED-R1: PERFORMANCE IS EVALUATED ACROSS FIVE CLINICAL REASONING TASK TYPES (ROWS: TRAINING TASKS, COLUMN:TEST TASKS),WITH DARKER CELL SHADING INDICATING STRONGER GENERALIZATION.
221
+
222
+ TABLE VII
223
+
224
+ <table><tr><td colspan="7">Think-After fine-tuned Qwen2.5-VL-3B</td></tr><tr><td>Test Train</td><td>Anatomy Identification</td><td>Disease Diagnosis</td><td>Lesion Grading</td><td>Modality Recognition</td><td>Other Attributes</td><td>Overall</td></tr><tr><td>Anatomy Identification</td><td>95.22 ± 0.74</td><td>60.62 ± 0.90</td><td>65.83 ± 4.36</td><td>96.57 ± 0.73</td><td>81.53 ± 2.91</td><td>79.95 ± 0.65</td></tr><tr><td>Disease Diagnosis</td><td>43.11 ± 1.68</td><td>97.86 ± 0.27</td><td>81.65 ± 3.67</td><td>95.99 ± 0.77</td><td>92.33 ± 1.99</td><td>82.19 ± 0.49</td></tr><tr><td>Lesion Grading</td><td>43.56 ± 1.70</td><td>59.78 ± 0.92</td><td>85.55 ± 3.21</td><td>96.20 ± 0.77</td><td>78.55 ± 3.05</td><td>72.73 ± 0.71</td></tr><tr><td>Modality Recognition</td><td>44.73 ± 1.71</td><td>58.82 ± 0.93</td><td>68.12 ± 4.47</td><td>98.87 ± 0.44</td><td>78.12 ± 3.05</td><td>69.73 ± 0.71</td></tr><tr><td>Other Attributes</td><td>45.21 ± 1.67</td><td>59.65 ± 0.90</td><td>67.43 ± 4.36</td><td>95.74 ± 0.82</td><td>95.74 ± 1.49</td><td>72.75 ± 0.71</td></tr><tr><td>Overall</td><td>54.36 ± 0.75</td><td>67.34 ± 0.40</td><td>73.72 ± 1.86</td><td>96.67 ± 0.32</td><td>85.26 ± 1.18</td><td>75.47 ± 0.63</td></tr></table>
225
+
226
+ PERFORMANCE COMPARISON OF VLMS ON FIVE MEDICAL VQA TASKS: GRPO FINE-TUNING OUTPERFORMS ZERO-SHOT AND SFT BASELINE ACROSS DIVERSE REASONING TASKS. PERFORMANCE IS EVALUATED ON FIVE CLINICAL REASONING TYPES (COLUMNS) ACROSS THREE MODEL CATEGORIES: GENERAL-PURPOSE VLMS (ZERO-SHOT), MEDICAL VLMS (ZERO-SHOT), AND FINE-TUNED VLMS. THE BEST AND SECOND-BEST PERFORMANCES ARE MARKED IN RED AND BLUE.
227
+
228
+ TABLE VIII
229
+
230
+ <table><tr><td>Types Methods</td><td>Anatomy Identification</td><td>Disease Diagnosis</td><td>Lesion Grading</td><td>Modality Recognition</td><td>Other Attributes</td><td>Overall</td></tr><tr><td colspan="7">Zero-shot VLMs</td></tr><tr><td>BLIP-2† [15]</td><td>44.39</td><td>44.51</td><td>29.03</td><td>68.19</td><td>67.95</td><td>48.12</td></tr><tr><td>InstructBLIP† [27]</td><td>44.35</td><td>32.29</td><td>59.25</td><td>75.27</td><td>23.72</td><td>40.40</td></tr><tr><td>LLaVA† [28]</td><td>25.86</td><td>29.10</td><td>43.95</td><td>21.36</td><td>31.90</td><td>27.96</td></tr><tr><td>LLaMA Adapter v2† [29]</td><td>33.72</td><td>31.19</td><td>41.99</td><td>37.29</td><td>34.22</td><td>32.82</td></tr><tr><td>MiniGPT-4† [30]</td><td>28.88</td><td>30.47</td><td>34.56</td><td>26.43</td><td>30.36</td><td>29.74</td></tr><tr><td>Qwen2-VL-2B [3]</td><td>30.70</td><td>36.53</td><td>43.58</td><td>59.90</td><td>42.19</td><td>42.58</td></tr><tr><td>Qwen2-VL-7B [3]</td><td>42.57</td><td>48.83</td><td>52.06</td><td>84.74</td><td>59.66</td><td>57.57</td></tr><tr><td>Qwen2-VL-72B [32]</td><td>56.41</td><td>65.71</td><td>62.15</td><td>98.11</td><td>80.53</td><td>72.58</td></tr><tr><td>Qwen2.5-VL-3B [33]</td><td>35.23</td><td>50.45</td><td>52.79</td><td>85.23</td><td>54.77</td><td>55.69</td></tr><tr><td>Qwen2.5-VL-7B [33]</td><td>41.00</td><td>61.32</td><td>54.13</td><td>97.78</td><td>70.45</td><td>64.94</td></tr><tr><td>Qwen2.5-VL-72B [33]</td><td>57.22</td><td>62.55</td><td>60.32</td><td>98.20</td><td>77.41</td><td>71.14</td></tr></table>
231
+
232
+ Zero-shot Medical VLMs
233
+
234
+ <table><tr><td>LLaVA-Med† [8]</td><td>29.53</td><td>29.22</td><td>34.18</td><td>26.93</td><td>33.08</td><td>29.25</td></tr><tr><td>RadFM† [34]</td><td>13.31</td><td>21.69</td><td>30.35</td><td>26.64</td><td>43.85</td><td>26.99</td></tr><tr><td>Med-Flamingo† [6]</td><td>24.93</td><td>38.90</td><td>30.74</td><td>30.19</td><td>14.18</td><td>34.03</td></tr><tr><td>MedVInT† [7]</td><td>40.26</td><td>35.78</td><td>12.77</td><td>68.10</td><td>30.30</td><td>40.04</td></tr></table>
235
+
236
+ Fine-tuned VLMs
237
+
238
+ <table><tr><td>Qwen2-VL-2B (SFT)</td><td>53.97</td><td>51.62</td><td>60.71</td><td>86.77</td><td>63.91</td><td>63.39</td></tr><tr><td>Qwen2.5-VL-3B (SFT)</td><td>54.91</td><td>57.75</td><td>64.04</td><td>84.95</td><td>71.56</td><td>66.64</td></tr><tr><td>Qwen2-VL-2B (Think)</td><td>62.88</td><td>66.08</td><td>65.87</td><td>98.24</td><td>80.14</td><td>74.64</td></tr><tr><td>Qwen2-VL-2B (Nothink)</td><td>63.74</td><td>66.32</td><td>66.33</td><td>98.44</td><td>81.31</td><td>75.22</td></tr><tr><td>Qwen2.5-VL-3B (Think)</td><td>55.12</td><td>67.53</td><td>72.02</td><td>96.83</td><td>79.94</td><td>74.29</td></tr><tr><td>Qwen2.5-VL-3B (Think-after)</td><td>54.36</td><td>67.34</td><td>73.72</td><td>96.67</td><td>85.26</td><td>75.47</td></tr><tr><td>Qwen2.5-VL-3B (Nothink)</td><td>55.47</td><td>68.72</td><td>80.00</td><td>96.87</td><td>85.51</td><td>77.31</td></tr></table>
239
+
240
+ maintaining exceptional parameter efficiency. For zero-shot results, each cell denotes the zero-shot evaluation accuracy of the model on the particular modality. For all the finetuned VLM results, each cell represents the overall accuracy, reflecting the average generalization performance of the given modality when evaluated using models that were separately finetuned on each of the eight training modalities. Against general-purpose VLMs, our 2B-parameter model achieves $69.91\%$ overall accuracy, surpassing the 72B-parameter Qwen2-VL by $1.86\%$ a notable result given the $36\times$ parameter disparity. This advantage amplifies in critical diagnostic tasks: Med-R1 attains $71.67\%$ accuracy in MRI compared to Qwen2-VL-72B's $69.39\%$ and achieves $72.33\%$ versus $65.31\%$ in dermoscopy, challenging the prevailing scale-equals-performance paradigm. The limitations of specialized medical VLMs become evident through Med-Flamingo's $30.38\%$ average accuracy, which Med
241
+
242
+ R1 outperforms by $39.53\%$ . This stark contrast underscores the ineffectiveness of narrow medical pretraining compared to our RL-driven adaptation strategy. When compared to supervised fine-tuning approaches, GRPO delivers $15.84\%$ accuracy gains over SFT-tuned Qwen2-VL-2B (69.91% vs. 54.07%), with particularly significant improvements in CT interpretation (66.30% vs. 51.74%) and OCT analysis (71.96% vs. 53.99%).
243
+
244
+ # C. Cross-Task Generalization
245
+
246
+ We also evaluate Med-R1's generalization across five important clinical tasks [36]: Anatomy Identification, Disease Diagnosis, Lesion Grading, Modality Recognition, and Other Attributes. Similar to subsection IV-B, we focus our evaluation on two aspects: cross-modality generalization and comparison against SFT and zero-shot with other VLMs.
247
+
248
+ ![](images/8b7e996d31c55436cbe2572cef3ce2510e40baa10d74a32cbb697fcfeef6ea1a.jpg)
249
+ Fig. 3. Training accuracy-reward curves of the Think, No-Think, and Think-After models on CT and MRI datasets.
250
+
251
+ ![](images/5c19b24114a185413141accf26bb2110e546c00cdf7ea2d0b184a5b47c97c79f.jpg)
252
+
253
+ ![](images/a9fbe11f3609932ea9df90b68079cd1bdc769556cdec807f6e2e30e58aa055fa.jpg)
254
+ Fig. 4. Cross-modality accuracy difference between No-Thinking-Med-R1 and Med-R1. Each cell shows the performance gap (\%) when trained on one modality (y-axis) and evaluated on others (x-axis). Red indicates improvement; blue indicates degradation.
255
+
256
+ ![](images/e531cfc87b3651b7f14e2dd6bb1d4fd968e84a2b8d9560ce487a2f138bd6d0d2.jpg)
257
+ Fig. 5. Cross-task accuracy difference between No-Thinking-Med-R1 and Med-R1. Each cell shows the performance gap (\%) when trained on one task (y-axis) and tested on another (x-axis). Red indicates improvement; blue indicates degradation.
258
+
259
+ Results on generalization. As shown in Table V, models trained on "disease diagnosis" data achieve the best generalization, with $81.64\%$ overall accuracy. This suggests that disease diagnosis encompasses diverse feature representations that transfer well across tasks, likely due to its reliance on both anatomical and pathological cues. In contrast, models trained on "modality recognition" exhibit strong generalization in task-agnostic settings (98.24% in the "modality recognition column), indicating that learning modality distinctions aids in extracting transferable image features. However, training on "lesion grading" leads to high in-task performance (86.24%) but relatively lower transferability, implying that this task captures more specialized features that do not generalize as effectively. These results highlight the trade-off between specialization and adaptability, emphasizing the importance of task selection when designing models for broad medical applications.
260
+
261
+ Comparison to zero-shot and SFT evaluations with other VLMs. Table VIII shows the comparison results with other popular VLMs evaluated with zero-shot and SFT. For zero-shot results, each cell denotes the zero-shot evaluation accuracy of the model on the particular task. For the fine-tuned VLM results (last two rows), each cell represents the overall accuracy, reflecting the average generalization performance of the given task when evaluated using models that were separately fine-tuned on each of the five training tasks. First of all, the results clearly show that Med-R1 outperforms all other popular VLMs' zero-shot generalization. Remarkably, Med-R1 even outperforms Qwen2-VL-72B (74.64% vs. 72.58%), a model with 70 billion more parameters. More importantly, this suggests that RL can effectively elevate small models with moderate capacity, opening doors for many real-world applications where resource is a constraint. In contrast, the
262
+
263
+ average generalization with the identical base model trained with SFT is merely $63.39\%$ , $11.25\%$ below Med-R1, further demonstrating the strong generalization capability of Med-R1.
264
+
265
+ # D. Analysis of No-Think
266
+
267
+ We present the results of No-Thinking-Med-R1 in this section. Cross-task performance is shown in Table VI, and cross-modality performance is shown in Table II. Overall, No-Thinking-Med-R1 achieves stronger in-domain performance across all settings compared to Med-R1, suggesting that removing explicit reasoning generation may lead to more effective task-specific learning. To assess generalization, we compare Med-R1 and No-Thinking-Med-R1 across modalities and tasks. As visualized in Figure 4 and Figure 5, No-Thinking-Med-R1 consistently outperforms Med-R1 in cross-modality settings. In cross-task scenarios, however, the results are more mixed—showing gains in some tasks and declines in others. These findings reinforce the practical motivation behind Med-R1. In clinical domains, acquiring high-quality CoT supervision is prohibitively costly and often impractical. While reasoning improves performance in general domains, our results show that this does not necessarily hold in medical settings, where unsupervised rationales may become unreliable under domain shift. Med-R1 demonstrates that even without reasoning supervision, RL can offer a robust and efficient path for adapting VLMs to medicine.
268
+
269
+ # E. Analysis of Think-after
270
+
271
+ We introduce the Think-After strategy primarily to address the practical need for achieving both high accuracy and interpretable reasoning in medical VLMs (Table VII, Table III). Rather than serving solely as a control experiment, Think-After is designed to meet the dual objective of preserving predictive performance while providing reasoning traces that clinicians can review and validate.
272
+
273
+ Beyond satisfying the need for both accuracy and interpretability, Think-After also provides insight into why the No-Thinking strategy sometimes outperforms the conventional Thinking approach. As shown in Figure 3, Think-After achieves faster convergence and higher accuracy rewards than the Thinking variant, suggesting that generating reasoning tokens before the answer may disrupt the autoregressive generation process and hinder optimization. However, Think-After still performs slightly below No-Thinking, indicating that additional mechanisms—such as residual contextual coupling or reasoning-token noise—may also contribute to the performance gap. While Think-After does not completely resolve the question of why No-Thinking surpasses both pre- and post-answer reasoning, it fulfills the essential requirement of combining strong accuracy with interpretable outputs, and simultaneously offers valuable clues toward understanding the dynamics between reasoning generation and performance in medical VLMs.
274
+
275
+ # F Reader Study
276
+
277
+ To further assess interpretability and clinical relevance, we conducted a reader study with three researchers experienced in
278
+
279
+ TABLE IX READER STUDY RESULTS. THREE READERS INDEPENDENTLY EVALUATED 100 CORRECTLY ANSWERED VQA SAMPLES FOR FACTUAL CORRECTNESS (QUALITY, 1-5) AND REASONING-ANSWER CONSISTENCY (CONSISTENCY, %).
280
+
281
+ <table><tr><td>Reader</td><td>Quality</td><td>Consistency (%)</td></tr><tr><td>A</td><td>4.23</td><td>93.0</td></tr><tr><td>B</td><td>4.19</td><td>92.0</td></tr><tr><td>C</td><td>4.34</td><td>95.0</td></tr><tr><td>Mean</td><td>4.25</td><td>93.3</td></tr></table>
282
+
283
+ Quality reflects factual and clinical correctness of model answers; Consistency measures logical alignment between reasoning and answers.
284
+
285
+ medical imaging and vision-language models. Each independently evaluated 100 representative VQA samples generated by different model variants, covering diverse imaging modalities and reasoning types (Table IX). Evaluations focused on (1) the factual and clinical correctness of model answers and (2) the logical consistency between the reasoning ("Think") and the predicted answer ("Answer"). Across readers, the Think-After model achieved the highest agreement and was consistently judged to produce reasoning that was both coherent and clinically sound, demonstrating improved interpretability and reasoning reliability for medical decision-support applications.
286
+
287
+ # G. Limitations and Future Work.
288
+
289
+ This work marks an initial step in applying RL to medical vision-language models. We adopt a frame-level VQA setting for consistent evaluation across modalities, but this simplifies the real-world complexity of medical imaging. In practice, CT and MRI are volumetric, and ultrasound is dynamic, requiring reasoning across slices and time. Another limitation lies in the reasoning supervision and data scale. Our findings suggest that the "No-Thinking" model occasionally outperforms reasoning-enabled variants, which may stem from the limited availability of high-quality medical reasoning data.
290
+
291
+ In domains where clinically faithful chain-of-thought (CoT) annotations are scarce, the model may not learn to benefit from explicit reasoning, leading the reasoning process to introduce noise rather than insight. We believe this reflects a broader limitation of current medical datasets rather than of reasoning itself. Future work should explore scaling up medically grounded CoT data and aligning RL rewards with clinically validated reasoning quality to fully realize the benefits of explicit reasoning. Future directions also include extending Med-R1 to support multi-frame or volumetric inputs, incorporating patient context, and investigating more advanced reasoning frameworks for clinical deployment.
292
+
293
+ # V. CONCLUSION
294
+
295
+ We present Med-R1, a reinforcement learning-enhanced vision-language model for improving medical reasoning across diverse imaging modalities and clinical tasks. Leveraging GRPO-based post-training, Med-R1 achieves strong cross-modality and cross-task generalization, surpassing the limits of supervised fine-tuning. Despite its compact 3B scale, Med-R1 performs competitively with or better than larger medical and general VLMs, while remaining efficient for deployment.
296
+
297
+ Through systematic analysis, we find that removing explicit reasoning (No-Think) improves convergence and generalization, suggesting that reasoning quality, not quantity, drives medical performance. To balance interpretability and accuracy, we introduce the Think-After strategy, which decouples reasoning from answer generation and enhances clinical transparency without compromising accuracy. Overall, Med-R1 establishes a scalable framework for reinforcement learning in medical VLMs and offers new insights into how reasoning dynamics interact with domain generalization, paving the way toward reliable and interpretable medical AI systems.
298
+
299
+ # REFERENCES
300
+
301
+ [1] A. Hurst, A. Lerer, A. P. Goucher, A. Perelman, A. Ramesh, A. Clark, A. Ostrow, A. Welihinda, A. Hayes, A. Radford et al., “Gpt-4o system card,” arXiv preprint arXiv:2410.21276, 2024.
302
+ [2] G. Team, "Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context," 2024. [Online]. Available: https://arxiv.org/abs/2403.05530
303
+ [3] J. Bai, S. Bai, Y. Chu, Z. Cui, K. Deng, X. Deng, Y. Fan, W. Ge, Y. Han, F. Huang et al., “Qwen technical report,” arXiv preprint arXiv:2309.16609, 2023.
304
+ [4] W. Chen, X. Ma, X. Wang, and W. W. Cohen, "Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks," arXiv preprint arXiv:2211.12588, 2022.
305
+ [5] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., “Gpt-4 technical report,” arXiv preprint arXiv:2303.08774, 2023.
306
+ [6] M. Moor, Q. Huang, S. Wu, M. Yasunaga, Y. Dalmia, J. Leskovec, C. Zakka, E. P. Reis, and P. Rajpurkar, "Med-flamingo: a multimodal medical few-shot learner," in Machine Learning for Health (ML4H). PMLR, 2023, pp. 353-367.
307
+ [7] X. Zhang, C. Wu, Z. Zhao, W. Lin, Y. Zhang, Y. Wang, and W. Xie, “Pmcvqa: Visual instruction tuning for medical visual question answering,” arXiv preprint arXiv:2305.10415, 2023.
308
+ [8] C. Li, C. Wong, S. Zhang, N. Usuyama, H. Liu, J. Yang, T. Naumann, H. Poon, and J. Gao, “Llava-med: Training a large language-and-vision assistant for biomedicine in one day,” Advances in Neural Information Processing Systems, vol. 36, pp. 28541-28564, 2023.
309
+ [9] J. Chen, C. Gui, R. Ouyang, A. Gao, S. Chen, G. H. Chen, X. Wang, R. Zhang, Z. Cai, K. Ji et al., "Huatuogpt-vision, towards injecting medical visual knowledge into multimodal llms at scale," arXiv preprint arXiv:2406.19280, 2024.
310
+ [10] Y. Li, Y. Lai, M. Thor, D. Marshall, Z. Buchwald, D. S. Yu, and X. Yang, "Towards universal text-driven ct image segmentation," arXiv preprint arXiv:2503.06030, 2025.
311
+ [11] T. Chu, Y. Zhai, J. Yang, S. Tong, S. Xie, D. Schuurmans, Q. V. Le, S. Levine, and Y. Ma, "Sft memorizes, rl generalizes: A comparative study of foundation model post-training," arXiv preprint arXiv:2501.17161, 2025.
312
+ [12] Z. Shao, P. Wang, Q. Zhu, R. Xu, J. Song, X. Bi, H. Zhang, M. Zhang, Y. Li, Y. Wu et al., "Deepseekmath: Pushing the limits of mathematical reasoning in open language models," arXiv preprint arXiv:2402.03300, 2024.
313
+ [13] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov, "Proximal policy optimization algorithms," arXiv preprint arXiv:1707.06347, 2017.
314
+ [14] A. Radford, J. W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark et al., "Learning transferable visual models from natural language supervision," in International conference on machine learning. PmLR, 2021, pp. 8748-8763.
315
+ [15] J. Li, D. Li, S. Savarese, and S. Hoi, "Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models," in International conference on machine learning. PMLR, 2023, pp. 19730-19742.
316
+ [16] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray et al., "Training language models to follow instructions with human feedback," Advances in neural information processing systems, vol. 35, pp. 27 730-27 744, 2022.
317
+ [17] Y. Bai, S. Kadavath, S. Kundu, A. Askell, J. Kernion, A. Jones, A. Chen, A. Goldie, A. Mirhoseini, C. McKinnon et al., "Constitutional ai: Harmlessness from ai feedback," arXiv preprint arXiv:2212.08073, 2022.
318
+
319
+ [18] J. Wei, X. Wang, D. Schuurmans, M. Bosma, F. Xia, E. Chi, Q. V. Le, D. Zhou et al., "Chain-of-thought prompting elicits reasoning in large language models," Advances in neural information processing systems, vol. 35, pp. 24824-24837, 2022.
320
+ [19] W. Li, C. Qu, X. Chen, P. R. Bassi, Y. Shi, Y. Lai, Q. Yu, H. Xue, Y. Chen, X. Lin et al., "Abdomenatlas: A large-scale, detailed-annotated, & multi-center dataset for efficient transfer learning and open algorithmic benchmarking," Medical Image Analysis, vol. 97, p. 103285, 2024.
321
+ [20] J. Pan, C. Liu, J. Wu, F. Liu, J. Zhu, H. B. Li, C. Chen, C. Ouyang, and D. Rueckert, "Medvlm-r1: Incentivizing medical reasoning capability of vision-language models (vlms) via reinforcement learning," arXiv preprint arXiv:2502.19634, 2025.
322
+ [21] W. Huang, B. Jia, Z. Zhai, S. Cao, Z. Ye, F. Zhao, Z. Xu, Y. Hu, and S. Lin, "Vision-r1: Incentivizing reasoning capability in multimodal large language models," 2025. [Online]. Available: https://arxiv.org/abs/2503.06749
323
+ [22] Z. Shao, P. Wang, Q. Zhu, R. Xu, J. Song, X. Bi, H. Zhang, M. Zhang, Y. K. Li, Y. Wu, and D. Guo, "Deepseekmath: Pushing the limits of mathematical reasoning in open language models," 2024. [Online]. Available: https://arxiv.org/abs/2402.03300
324
+ [23] S. Bai, K. Chen, X. Liu, J. Wang, W. Ge, S. Song, K. Dang, P. Wang, S. Wang, J. Tang, H. Zhong, Y. Zhu, M. Yang, Z. Li, J. Wan, P. Wang, W. Ding, Z. Fu, Y. Xu, J. Ye, X. Zhang, T. Xie, Z. Cheng, H. Zhang, Z. Yang, H. Xu, and J. Lin, "Qwen2.5-v1 technical report," 2025. [Online]. Available: https://arxiv.org/abs/2502.13923
325
+ [24] S. Kullback and R. A. Leibler, “On information and sufficiency,” Annals of Mathematical Statistics, vol. 22, no. 1, pp. 79-86, 1951.
326
+ [25] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi et al., "Deepseek-r1: Incentivizing reasoning capability in lms via reinforcement learning," arXiv preprint arXiv:2501.12948, 2025.
327
+ [26] M. Li, S. Zhao, J. Zhong, Y. Lai, and K. Zhang, "Cls-rl: Image classification with rule-based reinforcement learning," arXiv preprint arXiv:2503.16188, 2025.
328
+ [27] W. Dai, J. Li, D. Li, A. M. H. Tiong, J. Zhao, W. Wang, B. Li, P. Fung, and S. Hoi, "Instructclip: Towards general-purpose vision-language models with instruction tuning," 2023. [Online]. Available: https://arxiv.org/abs/2305.06500
329
+ [28] H. Liu, C. Li, Q. Wu, and Y. J. Lee, "Visual instruction tuning," Advances in neural information processing systems, vol. 36, pp. 34892-34916, 2023.
330
+ [29] P. Gao, J. Han, R. Zhang, Z. Lin, S. Geng, A. Zhou, W. Zhang, P. Lu, C. He, X. Yue et al., "Llama-adapter v2: Parameter-efficient visual instruction model," arXiv preprint arXiv:2304.15010, 2023.
331
+ [30] D. Zhu, J. Chen, X. Shen, X. Li, and M. Elhoseiny, “Minigpt-4: Enhancing vision-language understanding with advanced large language models,” arXiv preprint arXiv:2304.10592, 2023.
332
+ [31] Z. Chen, W. Wang, Y. Cao, Y. Liu, Z. Gao, E. Cui, J. Zhu, S. Ye, H. Tian, Z. Liu et al., "Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling," arXiv preprint arXiv:2412.05271, 2024.
333
+ [32] P. Wang, S. Bai, S. Tan, S. Wang, Z. Fan, J. Bai, K. Chen, X. Liu, J. Wang, W. Ge et al., “Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution,” arXiv preprint arXiv:2409.12191, 2024.
334
+ [33] S. Bai, K. Chen, X. Liu, J. Wang, W. Ge, S. Song, K. Dang, P. Wang, S. Wang, J. Tang et al., "Qwen2. 5-vl technical report," arXiv preprint arXiv:2502.13923, 2025.
335
+ [34] C. Wu, X. Zhang, Y. Zhang, Y. Wang, and W. Xie, “Towards generalist foundation model for radiology by leveraging web-scale 2d&3d medical data,” arXiv preprint arXiv:2308.02463, 2023.
336
+ [35] T. Lin, W. Zhang, S. Li, Y. Yuan, B. Yu, H. Li, W. He, H. Jiang, M. Li, X. Song et al., "Healthgpt: A medical large vision-language model for unifying comprehension and generation via heterogeneous knowledge adaptation," arXiv preprint arXiv:2502.09838, 2025.
337
+ [36] Y. Hu, T. Li, Q. Lu, W. Shao, J. He, Y. Qiao, and P. Luo, "Omnimiedvqa: A new large-scale comprehensive evaluation benchmark for medical lvm," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 22 170-22 183.
338
+ [37] J. Choquette, "Nvidia hopper h100gpu: Scaling performance," IEEE Micro, vol. 43, no. 3, pp. 9-17, 2023.
339
+ [38] A. Paszke, S. Gross, F. Massa, A. Lerer, J. Bradbury, G. Chanan, T. Killeen, Z. Lin, N. Gimelshein, L. Antiga et al., "Pytorch: An imperative style, high-performance deep learning library," Advances in neural information processing systems, vol. 32, pp. 8026-8037, 2019.
340
+ [39] T. Dao, "Flashattention-2: Faster attention with better parallelism and work partitioning," arXiv preprint arXiv:2307.08691, 2023.
data/2025/2503_13xxx/2503.13939/images/036dbbc8e10a472ebd88f0260be12f30dd4d7b331d23503ef14be8a8ea7fc14b.jpg ADDED

Git LFS Details

  • SHA256: cc61cee747015b55514110059379df3417dee459f418dadf2c4346dbec59fd94
  • Pointer size: 130 Bytes
  • Size of remote file: 51.1 kB
data/2025/2503_13xxx/2503.13939/images/18e45ba1a2c44e5ac250b9ca66403bae6b5bffe1e1c5aa3bd86a169ec719b2fb.jpg ADDED

Git LFS Details

  • SHA256: bd1fdac32abcd3063b262df61e296529d821a645548301683b23371db58767f6
  • Pointer size: 130 Bytes
  • Size of remote file: 21.7 kB
data/2025/2503_13xxx/2503.13939/images/3ea719bc07cf656d52313d02342b2739f9eaa3bf1cd6fe9e230f960e2a263d7d.jpg ADDED

Git LFS Details

  • SHA256: 289675e2e7f9b45aee47c95f571dfce6f25e49de3864a9d68833b02de3a82e01
  • Pointer size: 131 Bytes
  • Size of remote file: 127 kB
data/2025/2503_13xxx/2503.13939/images/3fc59ddd0c4cab56129b011962e2b7f2376ccde8d72b6efda8f479849cdf7b76.jpg ADDED

Git LFS Details

  • SHA256: 0bb94791e49c5bee87b244ef37300c99bae4e334409da9dcfdb5203979ba2f22
  • Pointer size: 131 Bytes
  • Size of remote file: 224 kB
data/2025/2503_13xxx/2503.13939/images/5c19b24114a185413141accf26bb2110e546c00cdf7ea2d0b184a5b47c97c79f.jpg ADDED

Git LFS Details

  • SHA256: 20ed0ddf564c1388bd49034d83927cf2daf340c792b3c756a760ed24057d152a
  • Pointer size: 130 Bytes
  • Size of remote file: 29.7 kB
data/2025/2503_13xxx/2503.13939/images/6372a63453c4981b4e8d60008db2f8680108fa0511a31026c42f0512a8233e1e.jpg ADDED

Git LFS Details

  • SHA256: 98531d773c1d405fb10b48032ae78799545fa4f0c244b5311698c91dc5c12a62
  • Pointer size: 129 Bytes
  • Size of remote file: 5.6 kB
data/2025/2503_13xxx/2503.13939/images/6d17f5e39033555d7897b638223806d890c898d39fb22d406e37032593579ca3.jpg ADDED

Git LFS Details

  • SHA256: e506334dca4bc5b1b835a5def9acb8617e73aa6dfa50fe7bf90141c6bddce6d0
  • Pointer size: 129 Bytes
  • Size of remote file: 8.38 kB
data/2025/2503_13xxx/2503.13939/images/7cbb27596f9220b47bd10a54fcf49780df3b484df48d9a1db188d7c947e017ff.jpg ADDED

Git LFS Details

  • SHA256: 18f0c64bcad517046ccd4c4d93946240582146935dda9ac03799fe6c557f9273
  • Pointer size: 130 Bytes
  • Size of remote file: 80 kB
data/2025/2503_13xxx/2503.13939/images/831930b6814bd29bd180cce811827c75f5f6dcce738a7e34e742d11c03d96411.jpg ADDED

Git LFS Details

  • SHA256: fa6e0e153d0aa7a76b1250e0be6f78de9f758fe644646a4bd1371958fd8c4c74
  • Pointer size: 130 Bytes
  • Size of remote file: 89.8 kB
data/2025/2503_13xxx/2503.13939/images/8b7e996d31c55436cbe2572cef3ce2510e40baa10d74a32cbb697fcfeef6ea1a.jpg ADDED

Git LFS Details

  • SHA256: 2b6716bfc27b72d54b947b5a20e1bfe07554130f50e3db62af1604b3b5c4400b
  • Pointer size: 130 Bytes
  • Size of remote file: 32.1 kB
data/2025/2503_13xxx/2503.13939/images/a5eba32c917bc72a3b520819edd3c426960b311554b18317853ada95377fa1d6.jpg ADDED

Git LFS Details

  • SHA256: 074187ea1fa5d63029551ec3f7de98f8b589add070a03e1dc17e5591f792043f
  • Pointer size: 130 Bytes
  • Size of remote file: 86.1 kB
data/2025/2503_13xxx/2503.13939/images/a9fbe11f3609932ea9df90b68079cd1bdc769556cdec807f6e2e30e58aa055fa.jpg ADDED

Git LFS Details

  • SHA256: 29fdef954fe08f1de8856c5f9036a2423c999d8c21156b5effc18044ff35ba3b
  • Pointer size: 130 Bytes
  • Size of remote file: 48.6 kB
data/2025/2503_13xxx/2503.13939/images/b82effbb9b726f9312287f34c3abf5897ba152a5a07ed17488f2a75f7aee435c.jpg ADDED

Git LFS Details

  • SHA256: 20f1503ff6df797f34ba09c7b7800b39bfec00a80967db75d39f8a4c055aa6fa
  • Pointer size: 131 Bytes
  • Size of remote file: 129 kB
data/2025/2503_13xxx/2503.13939/images/c6a1cbfe551d72704f48e8c2194a01849188ccb0ff3355aeeee9f0cc8c3be341.jpg ADDED

Git LFS Details

  • SHA256: c7595194ae71a8b8239d1cc272cfd147d992292cf2a4d28165965752c5a67cb5
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
data/2025/2503_13xxx/2503.13939/images/cae1a1c69fc436bed9659cf12dbaccbd6101db506055d35dd62850b1405948a2.jpg ADDED

Git LFS Details

  • SHA256: 01e5476d2bcc196130d764437b1acb0f76e1dc40fb273460673f8014d3683215
  • Pointer size: 130 Bytes
  • Size of remote file: 14.7 kB
data/2025/2503_13xxx/2503.13939/images/d99add9d2b0e92e41ac2a6489deeebcc0d91c7537c2459f870b34336be11f415.jpg ADDED

Git LFS Details

  • SHA256: b0ad8d8518bcf853c4202b3d3ae98931c12a19d2e7178dae1658028fe6fa3813
  • Pointer size: 130 Bytes
  • Size of remote file: 24.2 kB
data/2025/2503_13xxx/2503.13939/images/e531cfc87b3651b7f14e2dd6bb1d4fd968e84a2b8d9560ce487a2f138bd6d0d2.jpg ADDED

Git LFS Details

  • SHA256: a73444e74b6b093684ecaad2ffda6b7e7a31b08fc64aefe9bcdf5ff224d2fa00
  • Pointer size: 130 Bytes
  • Size of remote file: 45.8 kB
data/2025/2503_13xxx/2503.13939/images/e68b3ed71271c5ada12379607a0316ed743c4dbfa4fc68e2cddf0aba55fe3db4.jpg ADDED

Git LFS Details

  • SHA256: 88d5f61477b4fa5acf3aeb582614549074bda01c94a700cdaedd430ae2d1459a
  • Pointer size: 130 Bytes
  • Size of remote file: 85.9 kB
data/2025/2503_13xxx/2503.13939/images/e7ab5651b239aae2da14ff72175987bba5cfc015db19b604e83d31379ca2c7c3.jpg ADDED

Git LFS Details

  • SHA256: db64241c19787dc562eb67b1584cb334c4fdbbed430188ac39ca4b24752d5d88
  • Pointer size: 130 Bytes
  • Size of remote file: 14 kB
data/2025/2503_13xxx/2503.13939/images/eeab33d0e9f3175157bcbf5f4c343acf7158adca3522f510f1bde6d2d2b056bb.jpg ADDED

Git LFS Details

  • SHA256: 8cc2ef4536eb8cec402494afe49ace2e98cdfa6643b556868f971c3cc0db2c5b
  • Pointer size: 131 Bytes
  • Size of remote file: 127 kB
data/2025/2503_13xxx/2503.13939/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_13xxx/2503.13964/78ea4382-37bc-4ec9-850c-29e0e03d0588_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_13xxx/2503.13964/78ea4382-37bc-4ec9-850c-29e0e03d0588_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_13xxx/2503.13964/78ea4382-37bc-4ec9-850c-29e0e03d0588_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bcd025b48d72958da262c895170f1a3669fffead005489a6af5a3d4f2061358
3
+ size 2577973
data/2025/2503_13xxx/2503.13964/full.md ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MDocAgent: A Multi-Modal Multi-Agent Framework for Document Understanding
2
+
3
+ Siwei Han $^{1}$ , Peng Xia $^{1}$ , Ruiyi Zhang $^{2}$ , Tong Sun $^{2}$ , Yun Li $^{1}$ , Hongtu Zhu $^{1}$ , Huaxiu Yao $^{1}$ $^{1}$ UNC-Chapel Hill, $^{2}$ Adobe Research
4
+
5
+ {siweih,huaxiu}@cs.unc.edu
6
+
7
+ # Abstract
8
+
9
+ Document Question Answering (DocQA) is a very common task. Existing methods using Large Language Models (LLMs) or Large Vision Language Models (LVLMs) and Retrieval Augmented Generation (RAG) often prioritize information from a single modal, failing to effectively integrate textual and visual cues. These approaches struggle with complex multi-modal reasoning, limiting their performance on real-world documents. We present MDocAgent (A Multi-Modal Multi-Agent Framework for Document Understanding), a novel RAG and multi-agent framework that leverages both text and image. Our system employs five specialized agents: a general agent, a critical agent, a text agent, an image agent and a summarizing agent. These agents engage in multi-modal context retrieval, combining their individual insights to achieve a more comprehensive understanding of the document's content. This collaborative approach enables the system to synthesize information from both textual and visual components, leading to improved accuracy in question answering. Preliminary experiments on five benchmarks like MMLongBench, LongDocURL demonstrate the effectiveness of our MDocAgent, achieve an average improvement of $12.1\%$ compared to current state-of-the-art method. This work contributes to the development of more robust and comprehensive DocQA systems capable of handling the complexities of real-world documents containing rich textual and visual information. Our data and code are available at https://github.com/aiming-lab/MDocAgent.
10
+
11
+ # 1. Introduction
12
+
13
+ Answering questions based on reference documents (DocQA) is a critical task in many applications [5, 8, 25, 28, 34, 35, 45], ranging from information retrieval to automated document analysis. A key challenge in DocQA lies in the diverse nature of questions and the information needed to answer them [7, 26]. Questions can refer to textual content, to visual elements within the document (e.g., charts, diagrams,
14
+
15
+ ![](images/1f00b2158f4baa2e6c8177096d270b27b92ab0d3605d80704357732f68428092.jpg)
16
+
17
+ ![](images/5917de7b382bac0920c45bddea247ed059c33945008b92bec6a0a1a037bd24c0.jpg)
18
+
19
+ ![](images/433cce92c4fa9b0ed6396c768f7c88a7d517383e10709be57e826f4b55732810.jpg)
20
+
21
+ ![](images/d91ccdfc0d2ffe61a73e1a10c2178fe6dbc5f6967910e5ec9ea341203cc40cf6.jpg)
22
+ Figure 1. Comparison of different approaches for DocQA. LVLMs often struggle with long documents and lack granular attention to detail, while also exhibiting limitations in cross-modal understanding. Single-modal context retrieval can handle long documents but still suffers from issues with detailed analysis or integrating information across modalities. Our MDocAgent addresses these challenges by combining text and image-based RAG with specialized agents for refined processing within each modality and a critical information extraction mechanism, showcasing improved DocQA performance.
23
+
24
+ images), or even require the integration of information from both modalities. Since Large Language Models (LLMs) can
25
+
26
+ only handle textual information [29], Large Vision Language Models (LVLMs) are often used in DocQA [4, 13, 24]. As illustrated in Figure 1, while LVLMs have shown promise in handling visual content, they often struggle in scenarios where key information is primarily textual, or where a nuanced understanding of the interplay between text and visual elements is required [5, 25, 34]. Another challenge in DocQA lies in the huge volume of information often present in documents. Processing entire documents directly can overwhelm computational resources and make it difficult for models to identify the most pertinent information [7, 26].
27
+
28
+ To overcome this challenge, Retrieval Augmented Generation (RAG) is used as an auxiliary tool to extract the critical information from a long document [11]. While RAG methods like ColBERT [16] and ColPali [9] have proven effective for retrieving textual or visual information respectively, they often fall short when a question requires integrating insights from both modalities. Existing RAG implementations typically operate in isolation, either retrieving text or images [18, 42], but lack the ability to synthesize information across these modalities. Consider a document containing a crucial diagram and accompanying textual explanations. If a question focuses on the diagram's content, a purely text-based RAG system would struggle to pinpoint the relevant information. Conversely, if the question pertains to a nuanced detail within the textual description, an image-based RAG would be unable to isolate the necessary textual segment. This inability to effectively combine multi-modal information restricts the performance of current RAG-based approaches in complex DocQA tasks. Moreover, the diverse and nuanced nature of these multimodal relationships requires not just retrieval, but also a mechanism for reasoning and drawing inferences across different modalities.
29
+
30
+ To further address these limitations, we present a novel framework, a Multi-Modal Multi-Agent Framework for Document Understanding (MDocAgent), which leverages the power of both RAG and a collaborative multi-agent system where specialized agents collaborate to process and integrate text and image information. MDocAgent employs two parallel RAG pipelines: a text-based RAG and an image-based RAG. These retrievers provide targeted textual and visual context for our multi-agent system. MDocAgent comprises five specialized agents: a general agent for initial multi-modal processing, a critical agent for identifying key information, a text agent, an image agent for focused analysis within their respective modalities, and a summarizing agent to synthesize the final answer. This collaborative approach enables our system to effectively tackle questions that require synthesizing information from both textual and visual elements, going beyond the capabilities of traditional RAG methods.
31
+
32
+ Specifically, MDocAgent operates in five stages: (1) Document Pre-processing: Text is extracted via OCR and pages
33
+
34
+ are preserved as images. (2) Multi-modal Context Retrieval: text-based and image-based RAG tools retrieve the top-k relevant text segments and image pages, respectively. (3) Initial Analysis and Key Extraction: The general agent generates an initial response, and the critical agent extracts key information, providing it to the specialized agents. (4) Specialized Agent Processing: Text and image agents analyze the retrieved context within their respective modalities, guided by the critical information. (5) Answer Synthesis: The summarizing agent integrates all agent responses to produce the final answer.
35
+
36
+ The primary contribution of this paper is a novel multi-agent framework for DocQA that effectively integrates specialized agents, each dedicated to a specific modality or aspect of reasoning, including text and image understanding, critical information extraction, and answer synthesis. We demonstrate the efficacy of our approach through experiments on five benchmarks: MMLongBench [26], LongDocURL [7], PaperTab [14], PaperText [14], and FetaTab [14], showing significant improvements in DocQA performance, with an average of $12.1\%$ compared to current SOTA method. The empirical improvements demonstrate the effectiveness of our collaborative multi-agent architecture in handling long, complex documents and questions. Furthermore, ablation studies validate the contribution of each agent and the importance of integrating multi-modalities.
37
+
38
+ # 2. Related Work
39
+
40
+ LVLMs in DocQA Tasks. Document Visual Question Answering (DocVQA) has evolved from focusing on short documents to handling complex, long, and multi-document tasks [8, 28, 35, 36], often involving visually rich content such as charts and tables. This shift requires models capable of integrating both textual and visual information. Large Vision Language Models (LVLMs) have emerged to address these challenges by combining the deep semantic understanding of Large Language Models (LLMs) with the ability to process document images [6, 22, 23, 37, 40, 41, 46-51]. LVLMs convert text in images into visual representations, preserving layout and visual context. However, they face challenges like input size limitations and potential loss of fine-grained textual details [13, 24], making effective integration of text and visual information crucial for accurate DocVQA performance [31].
41
+
42
+ Retrieval-Augmented Generation. Retrieval Augmented Generation (RAG) enhances LLMs by supplying them with external text-based context, thereby improving their performance in tasks such as DocQA [11, 18]. Recently, with the increasing prevalence of visually rich documents, image RAG approaches have been developed to retrieve relevant visual content for Large Vision Language Models (LVLMs) [4, 5, 42-44]. However, existing methods struggle
43
+
44
+ ![](images/4e1dc163721351e4b1723c336237a6658cf99d52867c870146f5e16e57995ace.jpg)
45
+ Figure 2. Overview of MDocAgent: A multi-modal multi-agent framework operating in five stages: (1) Documents are processed using PDF tools to extract text and images. (2) Text-based and image-based RAG retrieves the top-k relevant segments and image pages. (3) The general agent provides a preliminary answer, and the critical agent extracts critical information from both modalities. (4) Specialized agents process the retrieved information and critical information within their respective modalities and generate refined answers. (5) The summarizing agent integrates all previous outputs to generate the final answer.
46
+
47
+ to effectively integrate and reason over both text and image information, as retrieval often occurs independently. This lack of integrated reasoning limits the effectiveness of current RAG techniques, especially for complex DocQA tasks that require a nuanced understanding of both modalities.
48
+
49
+ Multi-Agent Systems. Multi-agent systems have shown promise in complex domains like medicine [17, 21, 39]. These systems use specialized agents to focus on different task aspects [3, 15, 20, 33], collaborating to achieve goals that a single model may struggle with. However, their application to DocQA introduces unique challenges stemming from the need to integrate diverse modalities. Simply combining the outputs of independent text and image agents often fails to capture the nuanced interplay between these modalities, which is crucial for accurate document understanding. Our framework addresses this by introducing a general agent for information integration alongside specialized text and image agents, enabling collaborative reasoning and a more comprehensive understanding of document content, ultimately improving DocVQA performance.
50
+
51
+ # 3. Multi-Modal Multi-Agent Framework for Document Understanding
52
+
53
+ This section details our proposed framework, MDocAgent, for tackling the complex challenges of DocQA. MDocAgent employs a novel five-stage multi-modal, multi-agent approach as shown in Figure 2, utilizing specialized agents for targeted information extraction and cross-modal synthesis to
54
+
55
+ achieve a more comprehensive understanding of document content. Subsequently, Section 3.1 through Section 3.5 provide a comprehensive description of MDocAgent's architecture. This detailed exposition will elucidate the mechanisms by which MDocAgent effectively integrates and leverages textual and visual information to achieve improved accuracy in DocQA.
56
+
57
+ Preliminary: Document Question Answering. Given a question $q$ expressed in natural language and the corresponding document $\mathcal{D}$ , the goal is to generate an answer $q$ that accurately and comprehensively addresses $q$ using the information provided within $\mathcal{D}$ .
58
+
59
+ # 3.1. Document Pre-Processing
60
+
61
+ This initial stage prepares the document corpus for subsequent processing by transforming it into a format suitable for both textual and visual analysis. $\mathcal{D}$ consists of a set of pages $\mathcal{D} = \{p_1, p_2, \ldots, p_N\}$ . For each page $p_i$ , textual content is extracted using a combination of Optical Character Recognition (OCR) and PDF parsing techniques. OCR is employed to recognize text within image-based PDFs, while PDF parsing extracts text directly from digitally encoded text within the PDF. This dual approach ensures robust text extraction across various document formats and structures. The extracted text for each page $p_i$ is represented as a sequence of textual segments or paragraphs $t_i = \{t_{i1}, t_{i2}, \ldots, t_{iM}\}$ , where $M$ represents the number of text segments on that page. Concurrently, each page $p_i$ is also preserved as an image, retaining its original visual layout and features. This
62
+
63
+ allows the framework to leverage both textual and visual cues for comprehensive understanding. This pre-processing results in two parallel representations of the document corpus: a textual representation consisting of extracted text segments and a visual representation consisting of the original page images. This dual representation forms the foundation for the multi-modal analysis performed by the framework.
64
+
65
+ # 3.2. Multi-modal Context Retrieval
66
+
67
+ The second stage focuses on efficiently retrieving the most relevant information from the document corpus, considering both text and image modalities. Algorithm 1 illustrates the whole procedure of retrieval. For the textual retrieval, extracted text segments $t_i$ of each page $p_i$ are indexed using ColBERT [16]. Given the user question $q$ , ColBERT retrieves the top- $k$ most relevant text segments, denoted as $T_q = \{t_1, t_2, \dots, t_k\}$ . This provides the textual context for subsequent agent processing. Parallel to textual retrieval, visual context is extracted using ColPali [9]. Each page image $p_i$ is processed by ColPali to generate a dense visual embedding $E^{p_i} \in \mathbb{R}^{n^v \times d}$ , where $n^v$ represents the number of visual tokens per page and $d$ represents the embedding dimension. Using these embeddings and the question $q$ , ColPali retrieves the top- $k$ most visually relevant pages, denoted as $I_q = \{i_1, i_2, \dots, i_k\}$ . The use of ColPali allows the model to capture the visual information present in the document, including layout, figures, and other visual cues.
68
+
69
+ Algorithm 1 Multi-modal Context Retrieval
70
+ Require: Question $q$ Document $D$ Text Scores $S_{t}$ Image Scores $S_{i}$ ,Text Relevance Scores $R_{t}$ ,Image Relevance Scores $R_{i}$
71
+ Ensure: Top-k text segments $T_{q}$ ,Top-k image segments $T_{q}$ 1: $S_{t}\gets \{\}$ 2: $S_{i}\gets \{\}$ $\triangleright$ Iterate through each page in the corpus 3: for each $p$ in $D$ do 4: for each text segment t in p do 5: $S_{t}[t]\leftarrow R_{t}(q,t)\triangleright$ Calculate text relevance score 6: end for 7: $S_{i}[p]\leftarrow R_{i}(q,p)\triangleright$ Calculate image relevance score 8: end for 9: $T_{q}\gets \mathrm{Top\_K}(S_{t},k)$ $\triangleright$ Select top-k text segments 10: $I_q\gets \mathrm{Top\_K}(S_i,k)$ $\triangleright$ Select top-k image segments 11: return $T_{q}$ $I_{q}$
72
+
73
+ # 3.3. Initial Analysis and Key Extraction
74
+
75
+ The third stage aims to provide an initial interpretation of the question and pinpoint the most salient information within the retrieved context. The general agent $A_{G}$ , functioning as a preliminary multi-modal integrator, receives both the retrieved textual context $T_{q}$ and the visual context $I_{q}$ . It processes these multimodal inputs by effectively combining the
76
+
77
+ information embedded within both modalities. This comprehensive understanding of the combined context allows $A_G$ to generate a preliminary answer $a_G$ , which serves as a crucial starting point for more specialized analysis in the next stage.
78
+
79
+ $$
80
+ a _ {G} = A _ {G} \left(q, T _ {q}, I _ {q}\right). \tag {1}
81
+ $$
82
+
83
+ Subsequently, the critical agent $A_{C}$ plays a vital role in refining the retrieved information. It takes as input the question $q$ , the retrieved contexts $T_{q}$ and $I_{q}$ , and the preliminary answer $a_{G}$ generated by the general agent. The primary function of $A_{C}$ is to meticulously analyze these inputs and identify the most crucial pieces of information that are essential to accurately answer the question. This critical information acts as a guide for the specialized agents in the next stage, focusing their attention on the most relevant aspects of the retrieved context.
84
+
85
+ $$
86
+ T _ {c} = A _ {C} \left(q, T _ {q}, a _ {G}\right), \quad I _ {c} = A _ {C} \left(q, I _ {q}, a _ {G}\right). \tag {2}
87
+ $$
88
+
89
+ The output of this stage consists of $T_{c} \subset T_{q}$ , representing the critical textual information extracted from the retrieved text segments, and $I_{c}$ , which provides a detailed textual description of the critical visual information extracted from the retrieved images $I_{q}$ that capture the essence of the important visual elements.
90
+
91
+ # 3.4. Specialized Agent Processing
92
+
93
+ The fourth stage delves deeper into the textual and visual modalities, leveraging specialized agents guided by the critical information extracted in the previous stage. The text agent $A_{T}$ receives the retrieved text segments $T_{q}$ and the critical textual information $T_{c}$ as input. It operates exclusively within the textual domain, leveraging its specialized knowledge and analytical capabilities to thoroughly examine the provided text segments. By focusing specifically on the critical textual information $T_{c}$ , $A_{T}$ can pinpoint the most relevant evidence within the broader textual context $T_{q}$ and perform a more focused analysis. This focused approach allows for a deeper understanding of the textual nuances related to the question and culminates in the generation of a detailed, text-based answer $a_{T}$ .
94
+
95
+ $$
96
+ a _ {T} = A _ {T} \left(q, T _ {q}, T _ {c}\right). \tag {3}
97
+ $$
98
+
99
+ Concurrently, the image agent $A_{I}$ receives the retrieved images $I_{q}$ and the critical visual information $I_{c}$ . This agent specializes in visual analysis and interpretation. It processes the images in $I_{q}$ , paying particular attention to the regions or features highlighted by the critical visual information $I_{c}$ . This targeted analysis allows the agent to extract valuable insights from the visual content, focusing its processing on the most relevant aspects of the images. The image agent's analysis results in a visually-grounded answer $a_{I}$ , which provides a response based on the interpretation of the images.
100
+
101
+ $$
102
+ a _ {I} = A _ {I} \left(q, I _ {q}, I _ {c}\right). \tag {4}
103
+ $$
104
+
105
+ # 3.5. Answer Synthesis
106
+
107
+ The final stage integrates the diverse outputs from the preceding stages, combining the initial multi-modal understanding with the specialized agent analyses to produce a comprehensive and accurate answer. The summarizing agent $A_{S}$ receives the answers $a_{G}, a_{T}$ , and $a_{I}$ generated by the general agent, text agent, and image agent, respectively. This comprehensive set of information provides a multifaceted perspective on the question and allows the summarizing agent to perform a thorough synthesis. The summarizing agent analyzes the individual agent answers, identifying commonalities, discrepancies, and complementary insights. It considers the supporting evidence provided by each agent. By resolving potential conflicts or disagreements between the agents and integrating their individual strengths, the summarizing agent constructs a final answer $a_{S}$ that leverages the collective intelligence of the multi-agent system. This final answer is not merely a combination of individual answers but a synthesized response that reflects a deeper and more nuanced understanding of the information extracted from both textual and visual modalities. The whole procedure of this multi-agent collaboration is illustrated in Algorithm 2.
108
+
109
+ Algorithm 2 Multi-agent Collaboration
110
+ Require: Question $q$ , Top-k text segments $T_{q}$ , Top-k image segments $I_{q}$ , General Agent $A_{G}$ , Critical Agent $A_{C}$ , Text Agent $A_{T}$ , Image Agent $A_{I}$ , Summarizing Agent $A_{S}$
111
+ Ensure: Final answer $a_{s}$ ,
112
+ 1: $a_{G} \gets A_{G}(q, T_{q}, I_{q})$ ▷ General agent answer
113
+ 2: $(T_{c}, B_{c}) \gets A_{C}(q, T_{q}, I_{q}, a_{G})$ ▷ Extract critical info
114
+ 3: $a_{T} \gets A_{T}(q, T_{q}, T_{c})$ ▷ Text agent answer
115
+ 4: $a_{I} \gets A_{I}(q, I_{q}, B_{c})$ ▷ Image agent answer
116
+ 5: $a_{S} \gets A_{S}(q, a_{G}, a_{T}, a_{I})$ ▷ Final answer synthesis
117
+ 6: return $a_{S}$
118
+
119
+ # 4. Experiments
120
+
121
+ We evaluate MDocAgent on five document understanding benchmarks covering multiple scenarios to answer the following questions: (1) Does MDocAgent effectively improve document understanding accuracy compared to existing RAG-based approaches? (2) Does each agent in our framework play a meaningful role? (3) How does our approach enhance the model's understanding of documents?
122
+
123
+ # 4.1. Experiment Setup
124
+
125
+ Implementation Details. There are five agents in MDocAgent: general agent, critical agent, text agent, image agent and summarizing agent. We adopt Llama-3.1-8B-Instruct [12] as the base model for text agent, Qwen2-VL-7B-Instruct [38] for other four agents, and select ColBERTv2 [32] and ColPali [10] as the text and image retrieval.
126
+
127
+ ers, respectively. In our settings of RAG, we retrieve 1 or 4 highest-scored segments as input context for each example. All experiments are conducted on 4 NVIDIA H100 GPUs. Details of models and settings are shown in Appendix A.
128
+
129
+ Datasets. The benchmarks involve MMLongBench [26], LongDocUrl [7], PaperTab [14], PaperText [14], FetaTab [14]. These evaluation datasets cover a variety of scenarios, including both open- and closed-domain, textual and visual, long and short documents, ensuring fairness and completeness in the evaluation. Details of dataset descriptions are in Appendix A.2.
130
+
131
+ Metrics. For all benchmarks, following Deng et al. [7], Ma et al. [26], we leverage GPT-4o [30] as the evaluation model to assess the consistency between the model's output and the reference answer, producing a binary decision (correct/incorrect). We provide the average accuracy rate for each benchmark.
132
+
133
+ # 4.2. Main Results
134
+
135
+ In this section, we provide a comprehensive comparison of MDocAgent on multiple benchmarks against existing state-of-the-art LVLMs and RAG-based methods built on them. Our findings can be summarized as:
136
+
137
+ MDocAgent Outperforms All the Comparison Methods and Other LVLMs. We compare our method with baseline approaches on document understanding tasks, with the results presented in Table 1. Overall, our method outperforms all baselines across all benchmarks.
138
+
139
+ Top-1 Retrieval Performance. With top-1 retrieval, MDocAgent demonstrates a significant performance improvement. On PaperText, MDocAgent achieves a score of 0.399, surpassing the second-best method, M3DocRAG, by $16.7\%$ . Similarly, on FetaTab, MDocAgent attains a score of 0.600, exceeding the second-best method by an impressive $21.0\%$ . Compared to the best LVLM (Qwen2.5-VL-7B) and text-RAG-based (ColBERTv2+Llama-3.1-8B) baselines, our approach demonstrates a remarkable average improvement of $51.9\%$ and $23.7\%$ on average across all benchmarks. This improvement highlights the benefits of incorporating visual information and the collaborative multi-agent architecture in our framework. Furthermore, recent state-of-the-art image-RAG-based method M3DocRAG [5] show promising results, yet our approach still outperforms it by $12.1\%$ on average. This suggests that our multi-agent framework, with its specialized agents and critical information extraction mechanism addresses the core challenges of information overload, granular attention to detail, and cross-modality understanding more effectively than existing methods.
140
+
141
+ Top-4 Retrieval Performance. When using top-4 retrieval, the advantages of our method are further demonstrated. MDocAgent consistently achieves the highest scores across all benchmarks. On average, MDocAgent outperforms Qwen2.5-VL-7B by a remarkable $73.5\%$ . Interestingly, with
142
+
143
+ Table 1. Performance comparison across MDocAgent and existing state-of-the-art LVLMs and RAG-based methods.
144
+
145
+ <table><tr><td>Method</td><td>MMLongBench</td><td>LongDocUrl</td><td>PaperTab</td><td>PaperText</td><td>FetaTab</td><td>Avg</td></tr><tr><td colspan="7">LVLMs</td></tr><tr><td>Qwen2-VL-7B-Instruct [38]</td><td>0.165</td><td>0.296</td><td>0.087</td><td>0.166</td><td>0.324</td><td>0.208</td></tr><tr><td>Qwen2.5-VL-7B-Instruct [2]</td><td>0.224</td><td>0.389</td><td>0.127</td><td>0.271</td><td>0.329</td><td>0.268</td></tr><tr><td>LLaVA-v1.6-Mistral-7B [22]</td><td>0.099</td><td>0.074</td><td>0.033</td><td>0.033</td><td>0.110</td><td>0.070</td></tr><tr><td>Phi-3.5-Vision-Instruct [1]</td><td>0.144</td><td>0.280</td><td>0.071</td><td>0.165</td><td>0.237</td><td>0.179</td></tr><tr><td>LLaVA-One-Vision-7B [19]</td><td>0.053</td><td>0.126</td><td>0.056</td><td>0.108</td><td>0.077</td><td>0.084</td></tr><tr><td>SmolVLM-Instruct [27]</td><td>0.081</td><td>0.163</td><td>0.066</td><td>0.137</td><td>0.142</td><td>0.118</td></tr><tr><td colspan="7">RAG methods (top 1)</td></tr><tr><td>ColBERTv2 [32]+LLaMA-3.1-8B [12]</td><td>0.241</td><td>0.429</td><td>0.155</td><td>0.332</td><td>0.490</td><td>0.329</td></tr><tr><td>M3DocRAG [5] (ColPali [9]+Qwen2-VL-7B [38])</td><td>0.276</td><td>0.506</td><td>0.196</td><td>0.342</td><td>0.497</td><td>0.363</td></tr><tr><td>MDocAgent (Ours)</td><td>0.299</td><td>0.517</td><td>0.219</td><td>0.399</td><td>0.600</td><td>0.407</td></tr><tr><td colspan="7">RAG methods (top 4)</td></tr><tr><td>ColBERTv2 [32]+LLaMA-3.1-8B [12]</td><td>0.273</td><td>0.491</td><td>0.277</td><td>0.460</td><td>0.673</td><td>0.435</td></tr><tr><td>M3DocRAG [5] (ColPali [9]+Qwen2-VL-7B [38])</td><td>0.296</td><td>0.554</td><td>0.237</td><td>0.430</td><td>0.578</td><td>0.419</td></tr><tr><td>MDocAgent (Ours)</td><td>0.315</td><td>0.578</td><td>0.278</td><td>0.487</td><td>0.675</td><td>0.465</td></tr></table>
146
+
147
+ Table 2. Performance comparison across different MDocAgent's variants.
148
+
149
+ <table><tr><td rowspan="2">Variants</td><td colspan="3">Agent Configuration</td><td colspan="6">Evaluation Benchmarks</td></tr><tr><td>General &amp; Critical Agent</td><td>Text Agent</td><td>Image Agent</td><td>MMLongBench</td><td>LongDocUrl</td><td>PaperTab</td><td>PaperText</td><td>FetaTab</td><td>Avg</td></tr><tr><td>\( MDocAgent_i \)</td><td>✓</td><td>✘</td><td>✓</td><td>0.287</td><td>0.508</td><td>0.196</td><td>0.376</td><td>0.552</td><td>0.384</td></tr><tr><td>\( MDocAgent_t \)</td><td>✓</td><td>✓</td><td>✘</td><td>0.288</td><td>0.484</td><td>0.201</td><td>0.391</td><td>0.596</td><td>0.392</td></tr><tr><td>\( MDocAgent_s \)</td><td>✘</td><td>✓</td><td>✓</td><td>0.285</td><td>0.479</td><td>0.188</td><td>0.365</td><td>0.592</td><td>0.382</td></tr><tr><td>\( MDocAgent \)</td><td>✓</td><td>✓</td><td>✓</td><td>0.299</td><td>0.517</td><td>0.219</td><td>0.399</td><td>0.600</td><td>0.407</td></tr></table>
150
+
151
+ top-4 retrieval, M3DocRAG slightly performs worse than ColBERTv2+Llama-3.1-8B compared to top-1 retrieval. This may suggest limitations on M3DocRAG's capacity of selectively integrate across multiple retrieved documents when dealing with larger amounts of retrieved information. On average, MDocAgent exceeds M3DocRAG by $10.9\%$ . Meanwhile, compared to ColBERTv2+Llama-3.1-8B, MDocAgent demonstrates a $6.9\%$ improvement. This consistent improvement suggests that our method effectively harnesses the additional contextual information provided by the top-4 retrieved items, offering a greater benefit with more retrieval results.
152
+
153
+ # 4.3. Quantitative Analysis
154
+
155
+ In this section, we conduct three quantitative analyses to understand the effectiveness and contribution of different components within our proposed framework. First, we perform ablation studies to assess the impact of removing individual agents or groups of agents. Second, we present a fine-grained performance analysis, examining MDocAgent's performance across different evidence modalities on MMLongBench to pinpoint the source of its improvements.
156
+
157
+ Third, a compatibility analysis explores the framework's performance with different image-based RAG backbones to demonstrate its robustness and generalizability. Additionally, we present experimental results showcasing its performance with different model backbones in Appendix B.2.
158
+
159
+ # 4.3.1. Ablation Studies
160
+
161
+ Table 2 presents a comparison of our full method (MDocAgent) against it's variants: MDocAgent $_i$ (without the text agent) and MDocAgent $_t$ (without the image agent). Across all benchmarks, the full MDocAgent method consistently achieves the highest performance. The removal of either specialized agent, text or image, results in a noticeable performance drop. This underscores the importance of incorporating both text and image modalities through specialized agents within our framework. The performance difference is most pronounced in benchmarks like LongDocURL and PaperText, which likely contain richer visual or textual information respectively, further highlighting the value of specialized processing. This ablation study clearly demonstrates the synergistic effect of combining specialized agents dedicated to each modality.
162
+
163
+ Table 3. Performance comparison across different evidence source on MMLongBench.
164
+
165
+ <table><tr><td>Method</td><td>Chart</td><td>Table</td><td>Pure-text</td><td>Generalized-text</td><td>Figure</td><td>Avg</td></tr><tr><td colspan="7">LVLMs (up to 32 pages)</td></tr><tr><td>Qwen2-VL-7B-Instruct</td><td>0.182</td><td>0.097</td><td>0.209</td><td>0.185</td><td>0.197</td><td>0.165</td></tr><tr><td>Qwen2.5-VL-7B-Instruct</td><td>0.188</td><td>0.124</td><td>0.265</td><td>0.210</td><td>0.254</td><td>0.224</td></tr><tr><td>LLaVA-v1.6-Mistral-7B</td><td>0.011</td><td>0.023</td><td>0.033</td><td>0.000</td><td>0.057</td><td>0.074</td></tr><tr><td>LLaVA-One-Vision-7B</td><td>0.045</td><td>0.051</td><td>0.076</td><td>0.017</td><td>0.084</td><td>0.053</td></tr><tr><td>Phi-3.5-Vision-Instruct</td><td>0.159</td><td>0.101</td><td>0.156</td><td>0.160</td><td>0.164</td><td>0.144</td></tr><tr><td>SmolVLM-Instruct</td><td>0.062</td><td>0.065</td><td>0.123</td><td>0.118</td><td>0.094</td><td>0.081</td></tr><tr><td colspan="7">RAG methods (top 1)</td></tr><tr><td>ColBERTv2+LLaMA-3.1-8B</td><td>0.148</td><td>0.203</td><td>0.265</td><td>0.143</td><td>0.074</td><td>0.241</td></tr><tr><td>M3DocRAG (ColPali+Qwen2-VL-7B)</td><td>0.268</td><td>0.263</td><td>0.334</td><td>0.250</td><td>0.303</td><td>0.276</td></tr><tr><td>MDocAgent (Ours)</td><td>0.269</td><td>0.300</td><td>0.348</td><td>0.252</td><td>0.298</td><td>0.299</td></tr><tr><td colspan="7">RAG methods (top 4)</td></tr><tr><td>ColBERTv2+LLaMA-3.1-8B</td><td>0.182</td><td>0.267</td><td>0.311</td><td>0.168</td><td>0.120</td><td>0.273</td></tr><tr><td>M3DocRAG (ColPali+Qwen2-VL-7B)</td><td>0.290</td><td>0.318</td><td>0.371</td><td>0.277</td><td>0.321</td><td>0.296</td></tr><tr><td>MDocAgent (Ours)</td><td>0.347</td><td>0.323</td><td>0.401</td><td>0.294</td><td>0.321</td><td>0.315</td></tr></table>
166
+
167
+ Table 4. Performance comparison between using ColPali and ColQwen2-v1.0 as MDocAgent's image-based RAG model.
168
+
169
+ <table><tr><td></td><td>MMLongBench</td><td>LongDocUrl</td><td>PaperTab</td><td>PaperText</td><td>FetaTab</td><td>Avg</td></tr><tr><td>+ColPali</td><td>0.299</td><td>0.517</td><td>0.219</td><td>0.399</td><td>0.600</td><td>0.407</td></tr><tr><td>+ColQwen2-v1.0</td><td>0.303</td><td>0.520</td><td>0.216</td><td>0.391</td><td>0.603</td><td>0.407</td></tr></table>
170
+
171
+ Table 2 also compares MDocAgent with MDocAgent $_s$ , where both the general agent and the critical agent are removed, to evaluate their contribution. The consistent improvement of the full method over MDocAgent $_s$ across all datasets clearly underscores the importance of these two agents. The general agent establishes a crucial foundation by initially integrating both text and image modalities, providing a holistic understanding of the context. Removing this integration step noticeably reduces the subsequent agents' capacity to focus their analysis of critical information and answer effectively. On top of general modal integration, removing the critical agent limits the framework's ability to effectively identify and leverage crucial information. This highlights the essential role of the critical agent in focusing the specialized agents' attention and facilitating more targeted and efficient information extraction.
172
+
173
+ # 4.3.2. Fine-Grained Performance Analysis
174
+
175
+ We present an in-depth analysis of the performance in different types of evidence modalities, by further analyzing the scores on MMLongBench in Table 3, to gain a better understanding of the performance improvements achieved by MDocAgent. We also illustrate the results of evidence modalities of LongDocURL in Appendix B.1. According
176
+
177
+ to the results, MDocAgent outperforms all LVLM baselines among all types of evidence modalities. When comparing RAG methods using the top 1 retrieval approach, though M3DocRAG performs slightly better on Figure category, MDocAgent show strong performance in Chart, Table and Text categories, reflecting its enhanced capability to process textual and visual information. With the top 4 retrieval strategy, MDocAgent enhances its performance in the all categories, specifically in Figure, highlighting its effective handling of large and varied information sources.
178
+
179
+ # 4.3.3. Compatibility Analysis
180
+
181
+ We further analyze the compatibility of MDocAgent with different RAG backbones. Table 4 presents results using two image-based RAG models, ColPali and ColQwen2-v1.0, within our proposed framework. Both models achieve comparable overall performance, with an identical average score of 0.407 across all benchmarks. While ColQwen2-v1.0 shows a slight advantage on MMLongBench, LongDocUrl, and FetaTab, ColPali performs marginally better on PaperTab and PaperText. This suggests that the choice of image-based RAG model has minimal impact on the framework's overall effectiveness, underscoring the robustness of our multiagent architecture. Moreover, the consistency in performance
182
+
183
+ ![](images/2bd7c09de03471bde09aeab739f64575a2199a4ba0a5b0e6b991aabd27b2fe58.jpg)
184
+ Figure 3. A Case study of MDocAgent compared with other two RAG-method baselines(ColBERT + Llama 3.1-8B and M3DocRAG). Given a question comparing two population sizes, both baseline methods fail to arrive at the correct answer. Our framework, through the collaborative efforts of its specialized agents, successfully identifies the relevant information from both text and a table within the image, ultimately synthesizing the correct answer. This highlights the importance of granular, multi-modal analysis and the ability to accurately process information within the context.
185
+
186
+ across different RAG models highlights that the core strength of our approach lies in the multi-agent architecture itself, rather than reliance on a specific retrieval model. This further reinforces the compatibility of our proposed method.
187
+
188
+ # 4.4. Case Study
189
+
190
+ We perform a case study to better understand MDocAgent. Figure 3 illustrates an example. The question requires extracting and comparing numerical information related to two distinct Latino populations from both textual and tabular data within a document. While both ColBERT and ColPali successfully retrieve the relevant page containing the necessary information, both baseline methods fail to synthesize the correct answer. The ColBERT + Llama-3.1-8B baseline, relying solely on text, incorrectly concludes that the foreign-born Latino population is greater, demonstrating a failure to accurately interpret the numerical data presented within the document's textual content. Similarly, M3DocRAG fails to correctly interpret the question due to capturing wrong information. In contrast, our multi-agent framework successfully navigates this complexity and gives the correct answer.
191
+
192
+ Specifically, the general agent provides a correct but vague answer, making the critical agent essential for identifying key phrases like "Foreign born (excl. PR)" and the "cellphone sampling frame" table. This guides specialized agents
193
+
194
+ to precise locations for efficient data extraction. Both text agent and image agent correctly extract 795 for foreign-born Latinos and 1,051 for cellphone-interviewed Latinos. The summarizing agent then integrates these insights for accurate comparison and a comprehensive final answer. This case study demonstrates how our structured, multi-agent framework outperforms methods struggling with integrated text and image analysis (See more case studies in Appendix B.3).
195
+
196
+ # 5. Conclusion
197
+
198
+ This paper presents a multi-agent framework MDocQA for DocQA that integrates text and visual information through specialized agents and a dual RAG approach. Our framework addresses the limitations of existing methods by employing agents dedicated to text processing, image analysis, and critical information extraction, culminating in a synthesizing agent for final answer generation. Experimental results demonstrate significant improvements over LVLMs and multi-modal RAG methods, highlighting the efficacy of our collaborative multi-agent architecture. Our framework effectively handles information overload and promotes detailed cross-modal understanding, leading to more accurate and comprehensive answers in complex DocQA tasks. Future work will explore more advanced inter-agent communication and the integration of external knowledge sources.
199
+
200
+ # Acknowledgement
201
+
202
+ This research was partially supported by NIH 1R01AG085581 and Cisco Faculty Research Award.
203
+
204
+ # References
205
+
206
+ [1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219, 2024. 6, 12
207
+ [2] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. 6, 12
208
+ [3] Chi-Min Chan, Weize Chen, Yusheng Su, Jianxuan Yu, Wei Xue, Shanghang Zhang, Jie Fu, and Zhiyuan Liu. Chateval: Towards better ltm-based evaluators through multi-agent debate. arXiv preprint arXiv:2308.07201, 2023. 3
209
+ [4] Zhanpeng Chen, Chengjin Xu, Yiyan Qi, and Jian Guo. Mllm is a strong reranker: Advancing multimodal retrieval-augmented generation via knowledge-enhanced reranking and noise-injected training. arXiv preprint arXiv:2407.21439, 2024. 2
210
+ [5] Jaemin Cho, Debanjan Mahata, Ozan Irsoy, Yujie He, and Mohit Bansal. M3docrag: Multi-modal retrieval is what you need for multi-page multi-document understanding. arXiv preprint arXiv:2411.04952, 2024. 1, 2, 5, 6, 12
211
+ [6] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructclip: Towards general-purpose vision-language models with instruction tuning. arXiv preprint arXiv:2305.06500, 2023. 2
212
+ [7] Chao Deng, Jiale Yuan, Pi Bu, Peijie Wang, Zhong-Zhi Li, Jian Xu, Xiao-Hui Li, Yuan Gao, Jun Song, Bo Zheng, et al. Longdocurl: a comprehensive multimodal long document benchmark integrating understanding, reasoning, and locating. arXiv preprint arXiv:2412.18424, 2024. 1, 2, 5, 12
213
+ [8] Yihao Ding, Zhe Huang, Runlin Wang, YanHang Zhang, Xi'anru Chen, Yuzhong Ma, Hyunsuk Chung, and Soyeon Caren Han. V-doc: Visual questions answers with documents. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 21492-21498, 2022. 1, 2
214
+ [9] Manuel Faysse, Hugues Sibille, Tony Wu, Bilel Omrani, Gautier Viaud, Céline Hudelot, and Pierre Colombo. Colpali: Efficient document retrieval with vision language models. In The Thirteenth International Conference on Learning Representations, 2024. 2, 4, 6, 12
215
+ [10] Manuel Faysse, Hugues Sibille, Tony Wu, Bilel Omrani, Gautier Viaud, Céline Hudelot, and Pierre Colombo. Colpali: Efficient document retrieval with vision language models, 2024. 5
216
+
217
+ [11] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, Haofen Wang, and Haofen Wang. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997, 2, 2023. 2
218
+ [12] Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. 5, 6, 12
219
+ [13] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. mplugdocowl 1.5: Unified structure learning forocr-free document understanding. arXiv preprint arXiv:2403.12895, 2024. 2
220
+ [14] Yulong Hui, Yao Lu, and Huanchen Zhang. Uda: A benchmark suite for retrieval augmented generation in real-world document analysis. arXiv preprint arXiv:2406.15187, 2024. 2, 5, 12
221
+ [15] Shyam Sundar Kannan, Vishnunandan LN Venkatesh, and Byung-Cheol Min. Smart-llm: Smart multi-agent robot task planning using large language models. In 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 12140–12147. IEEE, 2024. 3
222
+ [16] Omar Khattab and Matei Zaharia. Colbert: Efficient and effective passage search via contextualized late interaction over bert. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval, pages 39-48, 2020. 2, 4
223
+ [17] Yubin Kim, Chanwoo Park, Hyewon Jeong, Yik Siu Chan, Xuhai Xu, Daniel McDuff, Hyeonhoon Lee, Marzyeh Ghassemi, Cynthia Breazeal, Hae Park, et al. Mdagents: An adaptive collaboration of llms for medical decision-making. Advances in Neural Information Processing Systems, 37:79410-79452, 2024. 3
224
+ [18] Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. Retrievalaugmented generation for knowledge-intensive nlp tasks. Advances in neural information processing systems, 33:9459-9474, 2020. 2
225
+ [19] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 6, 12
226
+ [20] Bingxuan Li, Yiwei Wang, Jieuxiang Gu, Kai-Wei Chang, and Nanyun Peng. Metal: A multi-agent framework for chart generation with test-time scaling. arXiv preprint arXiv:2502.17651, 2025.3
227
+ [21] Guohao Li, Hasan Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem. Camel: Communicative agents for mind" exploration of large language model society. Advances in Neural Information Processing Systems, 36:51991-52008, 2023. 3
228
+ [22] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024. 2, 6, 12
229
+
230
+ [23] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024. 2
231
+ [24] Chuwei Luo, Yufan Shen, Zhaoqing Zhu, Qi Zheng, Zhi Yu, and Cong Yao. Layout: Layout instruction tuning with large language models for document understanding. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15630-15640, 2024. 2
232
+ [25] Xueguang Ma, Shengyao Zhuang, Bevan Koopman, Guido Zuccon, Wenhu Chen, and Jimmy Lin. Visa: Retrieval augmented generation with visual source attribution. arXiv preprint arXiv:2412.14457, 2024. 1, 2
233
+ [26] Yubo Ma, Yuhang Zang, Liangyu Chen, Meiqi Chen, Yizhu Jiao, Xinze Li, Xinyuan Lu, Ziyu Liu, Yan Ma, Xiaoyi Dong, Pan Zhang, Liangming Pan, Yu-Gang Jiang, Jiaqi Wang, Yixin Cao, and Aixin Sun. Mmlongbench-doc: Benchmarking long-context document understanding with visualizations, 2024. 1, 2, 5, 12
234
+ [27] Andres Marafioti, Orr Zohar, Miquel Farre, Merve Noyan, Elie Bakouch, Pedro Cuenca, Cyril Zakka, Loubna Ben Allal, Anton Lozhkov, Nouamane Tazi, Vaibhav Srivastav, Joshua Lochner, Hugo Larcher, Mathieu Morlon, Lewis Tunstall, Leandro von Werra, and Thomas Wolf. Smolvlm: Redefining small and efficient multimodal models. 2025. 6, 12
235
+ [28] Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. Ocr-vqa: Visual question answering by reading text in images. In 2019 international conference on document analysis and recognition (ICDAR), pages 947-952. IEEE, 2019. 1, 2
236
+ [29] Humza Naveed, Asad Ullah Khan, Shi Qiu, Muhammad Saqib, Saeed Anwar, Muhammad Usman, Naveed Akhtar, Nick Barnes, and Ajmal Mian. A comprehensive overview of large language models. arXiv preprint arXiv:2307.06435, 2023. 2
237
+ [30] OpenAI. Gpt-4 technical report, 2023. https://arxiv.org/abs/2303.08774.5, 15
238
+ [31] Jaeyoo Park, Jin Young Choi, Jeonghyung Park, and Bohyung Han. Hierarchical visual feature aggregation forocr-free document understanding. Advances in Neural Information Processing Systems, 37:105972-105996, 2024. 2
239
+ [32] Keshav Santhanam, Omar Khattab, Jon Saad-Falcon, Christopher Potts, and Matei Zaharia. Colbertv2: Effective and efficient retrieval via lightweight late interaction. arXiv preprint arXiv:2112.01488, 2021. 5, 6, 12
240
+ [33] Peng Su, Kun Wang, Xingyu Zeng, Shixiang Tang, Dapeng Chen, Di Qiu, and Xiaogang Wang. Adapting object detectors with conditional domain normalization. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XI 16, pages 403-419. Springer, 2020. 3
241
+ [34] Manan Suri, Puneet Mathur, Franck Dernoncourt, Kanika Goswami, Ryan A Rossi, and Dinesh Manocha. Visdom: Multi-document qa with visually rich elements using multimodal retrieval-augmented generation. arXiv preprint arXiv:2412.10704, 2024. 1, 2
242
+ [35] Ryota Tanaka, Kyosuke Nishida, Kosuke Nishida, Taku Hasegawa, Itsumi Saito, and Kuniko Saito. Slidevqa: A
243
+
244
+ dataset for document visual question answering on multiple images. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 13636-13645, 2023. 1, 2
245
+ [36] Rubén Tito, Dimosthenis Karatzas, and Ernest Valveny. Hierarchical multimodal transformers for multipage docvqa. Pattern Recognition, 144:109834, 2023. 2
246
+ [37] Haibo Tong, Zhaoyang Wang, Zhaorun Chen, Haonian Ji, Shi Qiu, Siwei Han, Kexin Geng, Zhongkai Xue, Yiyang Zhou, Peng Xia, et al. Mj-video: Fine-grained benchmarking and rewarding video preferences in video generation. arXiv preprint arXiv:2502.01719, 2025. 2
247
+ [38] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 5, 6, 12
248
+ [39] Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Beibin Li, Erkang Zhu, Li Jiang, Xiaoyun Zhang, Shaokun Zhang, Jiale Liu, et al. Autogen: Enabling next-gen lmm applications via multi-agent conversation. arXiv preprint arXiv:2308.08155, 2023. 3
249
+ [40] Peng Xia, Ze Chen, Juanxi Tian, Yangrui Gong, Ruibo Hou, Yue Xu, Zhenbang Wu, Zhiyuan Fan, Yiyang Zhou, Kangyu Zhu, et al. Cares: A comprehensive benchmark of trustworthiness in medical vision language models. Advances in Neural Information Processing Systems, 37:140334-140365, 2024. 2
250
+ [41] Peng Xia, Siwei Han, Shi Qiu, Yiyang Zhou, Zhaoyang Wang, Wenhao Zheng, Zhaorun Chen, Chenhang Cui, Mingyu Ding, Linjie Li, et al. Mmie: Massive multimodal interleaved comprehension benchmark for large vision-language models. arXiv preprint arXiv:2410.10139, 2024. 2
251
+ [42] Peng Xia, Kangyu Zhu, Haoran Li, Tianze Wang, Weijia Shi, Sheng Wang, Linjun Zhang, James Zou, and Huaxiu Yao. Mmed-rag: Versatile multimodal rag system for medical vision language models. arXiv preprint arXiv:2410.13085, 2024. 2
252
+ [43] Peng Xia, Kangyu Zhu, Haoran Li, Hongtu Zhu, Yun Li, Gang Li, Linjun Zhang, and Huaxiu Yao. Rule: Reliable multimodal rag for factuality in medical vision language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 1081-1093, 2024.
253
+ [44] Shuo Xing, Yuping Wang, Peiran Li, Ruizheng Bai, Yueqi Wang, Chengxuan Qian, Huaxiu Yao, and Zhengzhong Tu. Re-align: Aligning vision language models via retrieval-augmented direct preference optimization. arXiv preprint arXiv:2502.13146, 2025. 2
254
+ [45] Junyuan Zhang, Qintong Zhang, Bin Wang, Linke Ouyang, Zichen Wen, Ying Li, Ka-Ho Chow, Conghui He, and Wentao Zhang. Ocr hinders rag: Evaluating the cascading impact ofOCR on retrieval-augmented generation. arXiv preprint arXiv:2412.02592, 2024. 1
255
+ [46] Yaqi Zhang, Di Huang, Bin Liu, Shixiang Tang, Yan Lu, Lu Chen, Lei Bai, Qi Chu, Nenghai Yu, and Wanli Ouyang. Motiongpt: Finetuned llms are general-purpose motion generators. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 7368-7376, 2024. 2
256
+
257
+ [47] Yiyang Zhou, Chenhang Cui, Jaehong Yoon, Linjun Zhang, Zhun Deng, Chelsea Finn, Mohit Bansal, and Huaxiu Yao. Analyzing and mitigating object hallucination in large vision-language models. arXiv preprint arXiv:2310.00754, 2023.
258
+ [48] Yiyang Zhou, Chenhang Cui, Rafael Rafailov, Chelsea Finn, and Huaxiu Yao. Aligning modalities in vision large language models via preference fine-tuning. arXiv preprint arXiv:2402.11411, 2024.
259
+ [49] Yiyang Zhou, Zhiyuan Fan, Dongjie Cheng, Sihan Yang, Zhaorun Chen, Chenhang Cui, Xiyao Wang, Yun Li, Linjun Zhang, and Huaxiu Yao. Calibrated self-rewarding vision language models. arXiv preprint arXiv:2405.14622, 2024.
260
+ [50] Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023.
261
+ [51] Kangyu Zhu, Peng Xia, Yun Li, Hongtu Zhu, Sheng Wang, and Huaxiu Yao. Mmedpo: Aligning medical vision-language models with clinical-aware multimodal preference optimization. arXiv preprint arXiv:2412.06141, 2024. 2
262
+
263
+ # A. Experimental Setup
264
+
265
+ # A.1. Baseline Models
266
+
267
+ - Qwen2-VL-7B-Instruct [38]: A large vision-language model developed by Alibaba, designed to handle multiple images as input.
268
+ - Qwen2.5-VL-7B-Instruct [2]: An enhanced version of Qwen2-VL-7B-Instruct, offering improved performance in processing multiple images.
269
+ - llava-v1.6-mistral-7b [22]: Also called LLaVA-NeXT, a vision-language model improved upon LLaVa-1.5, capable of interpreting and generating content from multiple images.
270
+ - Phi-3.5-vision-instruct [1]: A model developed by Microsoft that integrates vision and language understanding, designed to process and generate responses based on multiple images.
271
+ - llava-one-vision-7B [19]: A model trained on LLaVA-OneVision, based on Qwen2-7B language model with a context window of 32K tokens.
272
+ - SmolVLM-Instruct [27]: A compact vision-language model developed by HuggingFace, optimized for handling image inputs efficiently.
273
+ - ColBERTv2+Llama-3.1-8B-Instruct [12, 32]: A text-based RAG pipeline that utilizes ColBERTv2 [32] for retrieving text segments and Llama-3.1-8B-Instruct as the LLM to generate responses.
274
+ - M3DocRAG [5]: An image-based RAG pipeline that employs ColPali [9] for retrieving image segments and Qwen2-VL-7B-Instruct [38] as the LVLM for answer generation.
275
+
276
+ # A.2. Evaluation Benchmarks
277
+
278
+ - MMLongBench [26]: Evaluates models' ability to understand long documents with rich layouts and multi-modal components, comprising 1091 questions and 135 documents averaging 47.5 pages each.
279
+ - LongDocURL [7]: Provides a comprehensive multi-modal long document benchmark integrating understanding, reasoning, and locating tasks, covering over 33,000 pages of documents and 2,325 question-answer pairs.
280
+ - PaperTab [14]: Focuses on evaluating models' ability to comprehend and extract information from tables within NLP research papers, covering 393 questions among 307 documents.
281
+ - PaperText [14]: Assesses models' proficiency in understanding the textual content of NLP research papers, covering 2804 questions among 1087 documents.
282
+ - FetaTab [14]: a question-answering dataset for tables from Wikipedia pages, challenging models to generate freeform text answers, comprising 1023 questions and 878 documents.
283
+
284
+ # A.3. Hyperparameter Settings
285
+
286
+ - Temperature: All models use their default temperature setting.
287
+ Max New Tokens: 256.
288
+ - Max Tokens per Image (Qwen2-VL-7B-Instruct):
289
+
290
+ - Top-1 retrieval: 16,384 (by default).
291
+
292
+ - Top-4 retrieval: 2,048.
293
+
294
+ - Image Resolution: 144 (for all benchmarks).
295
+
296
+ # A.4. Prompt Settings
297
+
298
+ # General Agent
299
+
300
+ You are an advanced agent capable of analyzing both text and images. Your task is to use both the textual and visual information provided to answer the user's question accurately.
301
+
302
+ Extract Text from Both Sources: If the image contains text, extract it and consider both the text in the image and the provided textual content.
303
+
304
+ Analyze Visual and Textual Information: Combine details from both the image (e.g., objects, scenes, or patterns) and the text to build a comprehensive understanding of the content.
305
+
306
+ Provide a Combined Answer: Use the relevant details from both the image and the text to provide a clear, accurate, and context-aware response to the user's question.
307
+
308
+ # When responding:
309
+
310
+ - If both the image and text contain similar or overlapping information, cross-check and use both to ensure consistency.
311
+ - If the image contains information not present in the text, include it in your response if it is relevant to the question.
312
+ - If the text and image offer conflicting details, explain the discrepancies and clarify the most reliable source.
313
+
314
+ # Critical Agent
315
+
316
+ Provide a Python dictionary of critical information based on all given information—one for text and one for image.
317
+
318
+ Respond exclusively in a valid dictionary format without any additional text. The format should be: {"text": "critical information for text", "image": "critical information for image"}
319
+
320
+ <table><tr><td>Method</td><td>Layout</td><td>Text</td><td>Figure</td><td>Table</td><td>Others</td><td>Avg</td></tr><tr><td colspan="7">LVLMs</td></tr><tr><td>Qwen2-VL-7B-Instruct</td><td>0.264</td><td>0.386</td><td>0.308</td><td>0.207</td><td>0.500</td><td>0.296</td></tr><tr><td>Qwen2.5-VL-7B-Instruct</td><td>0.357</td><td>0.479</td><td>0.442</td><td>0.299</td><td>0.375</td><td>0.389</td></tr><tr><td>llava-v1.6-mistral-7b</td><td>0.067</td><td>0.165</td><td>0.088</td><td>0.051</td><td>0.250</td><td>0.099</td></tr><tr><td>llava-one-vision-7B</td><td>0.098</td><td>0.200</td><td>0.144</td><td>0.057</td><td>0.125</td><td>0.126</td></tr><tr><td>Phi-3.5-vision-instruct</td><td>0.245</td><td>0.375</td><td>0.291</td><td>0.187</td><td>0.375</td><td>0.280</td></tr><tr><td>SmolVLM-Instruct</td><td>0.128</td><td>0.224</td><td>0.164</td><td>0.100</td><td>0.250</td><td>0.163</td></tr><tr><td colspan="7">RAG methods (top 1)</td></tr><tr><td>ColBERTv2+Llama-3.1-8B</td><td>0.257</td><td>0.529</td><td>0.471</td><td>0.428</td><td>0.775</td><td>0.429</td></tr><tr><td>M3DocRAG (ColPali+Qwen2-VL-7B)</td><td>0.340</td><td>0.605</td><td>0.546</td><td>0.520</td><td>0.625</td><td>0.506</td></tr><tr><td>MDocAgent (Ours)</td><td>0.341</td><td>0.612</td><td>0.540</td><td>0.527</td><td>0.750</td><td>0.517</td></tr><tr><td colspan="7">RAG methods (top 4)</td></tr><tr><td>ColBERTv2+Llama-3.1-8B</td><td>0.349</td><td>0.599</td><td>0.491</td><td>0.485</td><td>0.875</td><td>0.491</td></tr><tr><td>M3DocRAG (ColPali+Qwen2-VL-7B)</td><td>0.426</td><td>0.660</td><td>0.595</td><td>0.542</td><td>0.625</td><td>0.554</td></tr><tr><td>MDocAgent (Ours)</td><td>0.438</td><td>0.675</td><td>0.592</td><td>0.581</td><td>0.875</td><td>0.578</td></tr></table>
321
+
322
+ Table 5. Performance comparison across different evidence source on LongDocURL.
323
+
324
+ # Text Agent
325
+
326
+ You are a text analysis agent. Your job is to extract key information from the text and use it to answer the user's question accurately.
327
+
328
+ # Your tasks:
329
+
330
+ - Extract key details. Focus on the most important facts, data, or ideas related to the question.
331
+ - Understand the context and pay attention to the meaning and details.
332
+ - Use the extracted information to give a concise and relevant response to the user's question. Provide a clear answer.
333
+
334
+ # Image Agent
335
+
336
+ You are an advanced image processing agent specialized in analyzing and extracting information from images. The images may include document screenshots, illustrations, or photographs.
337
+
338
+ # Your tasks:
339
+
340
+ - Extract textual information from images using Optical Character Recognition (OCR).
341
+ - Analyze visual content to identify relevant details (e.g., objects, patterns, scenes).
342
+ - Combine textual and visual information to provide an accurate and context-aware answer to the user's question.
343
+
344
+ # Summarizing Agent
345
+
346
+ You are tasked with summarizing and evaluating the collective responses provided by multiple agents. You have access to the following information:
347
+
348
+ - Answers: The individual answers from all agents. Your tasks:
349
+ - Analyze: Evaluate the quality, consistency, and relevance of each answer. Identify commonalities, discrepancies, or gaps in reasoning.
350
+ - Synthesize: Summarize the most accurate and reliable information based on the evidence provided by the agents and their discussions.
351
+ - Conclude: Provide a final, well-reasoned answer to the question or task. Your conclusion should reflect the consensus (if one exists) or the most credible and well-supported answer.
352
+
353
+ Return the final answer in the following dictionary format:
354
+
355
+ {"Answer": Your final answer here}
356
+
357
+ # Evaluation
358
+
359
+ Question: {question}
360
+
361
+ Predicted Answer: {answer}
362
+
363
+ Ground Truth Answer: $\{\mathrm{gt}\}$
364
+
365
+ Please evaluate whether the predicted answer is correct.
366
+
367
+ - If the answer is correct, return 1.
368
+ - If the answer is incorrect, return 0.
369
+
370
+ Return only a string formatted as a valid JSON dictionary that can be parsed using json.loads, for example: {"correctness": 1}
371
+
372
+ # A.5. Evaluation Metrics
373
+
374
+ The metric of all benchmarks is the average binary correctness evaluated by GPT-4o. The evaluation prompt is given in Section A.4. We use a python script to extract the result provided by GPT-4o.
375
+
376
+ # B. Additional Results
377
+
378
+ # B.1. Fine-grained Performance of LongDocURL
379
+
380
+ We present the fine-grained performance of LongDocURL, as illustrated in Table 5. Similar to MMLongBench, MDocAgent outperforms all LVLM baselines. When using the top 1 retrieval approach, though M3DocRAG performs slightly better on Figure and ColBERTv2+Llama3.1-8B performs slightly better on the type Others, MDocAgent show strong performance in Layout, Text, Table and get the highest average accuracy. With the top 4 retrieval strategy, MDocAgent improves its performance and reach the highest score in the all categories.
381
+
382
+ # B.2. Experiments on different model backbones in MDocAgent
383
+
384
+ Table 6 presents an ablation study evaluating the impact of different LVLMs on the performance of our framework. Three LVLMs: Qwen2-VL-7B-Instruct, Qwen2.5-VL-7B-Instruct, and GPT-4o were integrated as the backbone model for all agents except the text agent.
385
+
386
+ Qwen2.5-VL-7B-Instruct performs worse than Qwen2-VL-7B-Instruct on PaperTab, PaperText, and FetaTab, with both top-1 and top-4 retrieval. However, Qwen2.5-VL shows an extremely marked improvement over Qwen2-VL on MMLongBench, resulting higher average scores. MMLong-Bench's greater reliance on image-based questions might explain Qwen2.5-VL's superior performance on this benchmark, possibly indicating that Qwen2.5-VL is better at handling visual question-answering tasks, but worse at handling textual tasks.
387
+
388
+ Importantly, GPT-4o significantly outperforms both Qwen2-VL and Qwen2.5-VL across all benchmarks. Remarkably, GPT-4o's top-1 performance surpasses even the top-4 results of both Qwen models in almost all cases. This substantial performance increase strongly suggests that our framework effectively leverages more powerful backbone models, showcasing its adaptability and capacity to benefit from improvements in the underlying LVLMs.
389
+
390
+ # B.3. Additional case studies
391
+
392
+ In Figure 4, the question requires identifying a reason from a list that lacks explicit numbering and is accompanied by images. ColBERT fails to retrieve the correct evidence page, resulting ColBERT + Llama's inability to answer the question. Although ColPali correctly locates the evidence page, M3DocRAG fails to get the correct answer. However, our framework successfully identifies the correct answer ("Most Beautiful Campus") through the concerted efforts of all agents. The general agent arrives at a preliminary answer and the critical agent identifies critical textual clues ("Most Beautiful Campus") and corresponding visual elements (images of the NTU campus). Image agent then refines the answer, leveraging the critical information to correctly pinpoint the description lacking people. Though text agent can't find the related information from the given context, information provided by the critical agent helps it to guess that the answer is "Most Beautiful Campus". The summarizing agent combines these insights to arrive at the correct final answer.
393
+
394
+ In Figure 5, the question asks for Professor Lebour's degree. ColPali fails to retrieve the relevant page, rendering M3DocRAG ineffective. While ColBERT correctly retrieves the page, ColBERT + Llama still produces an incorrect answer because it incorrectly adds "F.G.S." to the answer, which is not a degree. MDocAgent, on the other hand, correctly identifies the "M.A. degree". The general agent provides an initial answer, and the critical agent identi
395
+
396
+ <table><tr><td></td><td>MMLongBench</td><td>LongDocUrl</td><td>PaperTab</td><td>PaperText</td><td>FetaTab</td><td>Avg</td></tr><tr><td colspan="7">With top 1 retrieval</td></tr><tr><td>+Qwen2-VL-7B-Instruct</td><td>0.299</td><td>0.517</td><td>0.219</td><td>0.399</td><td>0.600</td><td>0.407</td></tr><tr><td>+Qwen2.5-VL-7B-Instruct</td><td>0.351</td><td>0.519</td><td>0.211</td><td>0.382</td><td>0.589</td><td>0.410</td></tr><tr><td>+GPT-4o [30]</td><td>0.420</td><td>0.595</td><td>0.293</td><td>0.474</td><td>0.716</td><td>0.500</td></tr><tr><td colspan="7">With top 4 retrieval</td></tr><tr><td>+Qwen2-VL-7B-Instruct</td><td>0.315</td><td>0.578</td><td>0.278</td><td>0.487</td><td>0.675</td><td>0.467</td></tr><tr><td>+Qwen2.5-VL-7B-Instruct</td><td>0.389</td><td>0.566</td><td>0.277</td><td>0.454</td><td>0.671</td><td>0.471</td></tr></table>
397
+
398
+ Table 6. Performance comparison of using different backbone LVLMs in MDocAgent.
399
+
400
+ ![](images/889dc2b074d81ad1d1cd17f9ed0c0bd65ba90c1ee94d250f3794b6698ce56777.jpg)
401
+ Figure 4. A Case study of MDocAgent compared with other two baselines. While only ColPali correctly retrieves the evidence page, neither baseline method identifies the correct answer. Our method, through critical information sharing and specialized agent collaboration, correctly pinpoints the "Most Beautiful Campus" as the only reason without a corresponding image containing people.
402
+
403
+ fies the "M.A." designation in both text and image. Based on the clue, the text agent adds a more detailed explanation, and the image agent directly uses the clue as its answer. Finally, the summarizing agent synthesizes the results to provide the verified answer.
404
+
405
+ These two cases highlight MDocAgent's resilience to imperfect retrieval, demonstrating the effectiveness of collaborative multi-modal information processing and the importance of the general-critical agent's guidance in achieving high accuracy even with potentially insufficient or ambiguous information.
406
+
407
+ ![](images/e3302d47576efedcf4711a805d0e69760f3609ad80ae292bb265d550bded9952.jpg)
408
+ Figure 5. A Case study of MDocAgent compared with other two RAG-method baselines. In this case, ColPali fails to retrieve the correct evidence page, hindering M3DocRAG. While ColBERT succeeds in retrieval, the ColBERT + Llama baseline still provides an incorrect answer. Only our multi-agent framework, through precise critical information extraction and agent collaboration, correctly identifies the M.A. degree.
data/2025/2503_13xxx/2503.13964/images/1f00b2158f4baa2e6c8177096d270b27b92ab0d3605d80704357732f68428092.jpg ADDED

Git LFS Details

  • SHA256: 65b75bf8df4dedf400d92ef67bab685c69eed3988fff8a8d75c5ee76e3f2230f
  • Pointer size: 130 Bytes
  • Size of remote file: 15.6 kB
data/2025/2503_13xxx/2503.13964/images/2bd7c09de03471bde09aeab739f64575a2199a4ba0a5b0e6b991aabd27b2fe58.jpg ADDED

Git LFS Details

  • SHA256: d7701f78b6ae360f4ab981eba888a4828ec219c4713298bfa10aecdf0c835ed3
  • Pointer size: 131 Bytes
  • Size of remote file: 160 kB
data/2025/2503_13xxx/2503.13964/images/433cce92c4fa9b0ed6396c768f7c88a7d517383e10709be57e826f4b55732810.jpg ADDED

Git LFS Details

  • SHA256: 98dfd720f6a14dc186d05ccb8b718508cbd114e9801e808c49ae855754d85ad2
  • Pointer size: 130 Bytes
  • Size of remote file: 32.5 kB
data/2025/2503_13xxx/2503.13964/images/48dd2c6b9561c177c32582203772f073323eb12cd85b4bb5af966477db5e2ccf.jpg ADDED

Git LFS Details

  • SHA256: 24c06bac27f0a050b6334cb8f8a3e685e38b2311435b3a373ac7d14d0d4d2698
  • Pointer size: 131 Bytes
  • Size of remote file: 121 kB
data/2025/2503_13xxx/2503.13964/images/4e1dc163721351e4b1723c336237a6658cf99d52867c870146f5e16e57995ace.jpg ADDED

Git LFS Details

  • SHA256: 509dc38859aaa7ff758cc01b79f4aea20bb775bb02759017045f29a3211b5448
  • Pointer size: 131 Bytes
  • Size of remote file: 110 kB
data/2025/2503_13xxx/2503.13964/images/54193b1ec25a174ac9a5be0796af15788bb43ab4e2f179cd9684b69bf1429505.jpg ADDED

Git LFS Details

  • SHA256: ea0c76f74e2cff9bec399d7e230928946b1d549f39e655d6290819cdb0fe6188
  • Pointer size: 129 Bytes
  • Size of remote file: 3.29 kB
data/2025/2503_13xxx/2503.13964/images/5917de7b382bac0920c45bddea247ed059c33945008b92bec6a0a1a037bd24c0.jpg ADDED

Git LFS Details

  • SHA256: 142722231b821b0a3bff0cd80af456043466331bfca8808715b36e088a64d82e
  • Pointer size: 129 Bytes
  • Size of remote file: 8.93 kB
data/2025/2503_13xxx/2503.13964/images/65f09d9d9bf8a5d98d1a88b5078f75dbbadfdb206dedf12a188259313b7f6cbc.jpg ADDED

Git LFS Details

  • SHA256: e206b6f20fec417a8da4adaac0e739998986ef8de9bed77910fc6c99226cba44
  • Pointer size: 129 Bytes
  • Size of remote file: 3.05 kB
data/2025/2503_13xxx/2503.13964/images/7f99184156c64d7981aec59cb5ccae09a33552db937f003b0b4c448c4c943b4f.jpg ADDED

Git LFS Details

  • SHA256: 0da882bdc9f2c162590ad7911e17e91060fd04e4d1b611c3dbde48cc64a813b4
  • Pointer size: 131 Bytes
  • Size of remote file: 122 kB
data/2025/2503_13xxx/2503.13964/images/82db518b3d61ee7469d02fdb602f817f79c7da8d397039599091ab230cc5649f.jpg ADDED

Git LFS Details

  • SHA256: ae2681adacf9fb6d6dc4993f9799c5b5c13faa24a5e880eb0352160606fa8405
  • Pointer size: 130 Bytes
  • Size of remote file: 25.9 kB
data/2025/2503_13xxx/2503.13964/images/889dc2b074d81ad1d1cd17f9ed0c0bd65ba90c1ee94d250f3794b6698ce56777.jpg ADDED

Git LFS Details

  • SHA256: b7b72104631278965662c9c397eedd5cc377132240715fa08b53d1a1bc75482c
  • Pointer size: 131 Bytes
  • Size of remote file: 181 kB
data/2025/2503_13xxx/2503.13964/images/9b5b3aee3c02295fb484e50c85b0a9b598cbc5a7e5c4664da6351e57fd7c0c06.jpg ADDED

Git LFS Details

  • SHA256: 0ddc24822eb816e27b4be0e92d6a6c485daf494242bf95b5d12e182fa63c31ed
  • Pointer size: 131 Bytes
  • Size of remote file: 127 kB
data/2025/2503_13xxx/2503.13964/images/bbaef9c5613382d31f0f6cb67083f12e6430ea3dbf2c820eed48659ce97c7c81.jpg ADDED

Git LFS Details

  • SHA256: 80122d27fee19825b36bd889146ac3f72975760ee154cf82c4e2b37577be0648
  • Pointer size: 130 Bytes
  • Size of remote file: 48.4 kB
data/2025/2503_13xxx/2503.13964/images/bc82f417c5756372f5029d6b6ff6aed9a580609b40de53909fd949e594a23756.jpg ADDED

Git LFS Details

  • SHA256: fbd351b1ca5405a6a544e21501a58da283bb23c7add134b5a2f615a04fcc9ea6
  • Pointer size: 129 Bytes
  • Size of remote file: 3.24 kB
data/2025/2503_13xxx/2503.13964/images/bcf89c5d74f4c278c908dec983bf5ab2a4f1ed2bb9dd128735f8fc782e0d2401.jpg ADDED

Git LFS Details

  • SHA256: c000c51138bd0a129d70f2dfd3367788c1bd7630d97073ac695a29c797160393
  • Pointer size: 130 Bytes
  • Size of remote file: 61.9 kB
data/2025/2503_13xxx/2503.13964/images/c2f25214c60f23849de616ca343020e1890b570d8b13257ca0d07ab92a7b4341.jpg ADDED

Git LFS Details

  • SHA256: 1600a816510c573fae9e3ba310aaebbe68cd5deb29e9eb8bba8556da80aa09a5
  • Pointer size: 129 Bytes
  • Size of remote file: 5.29 kB
data/2025/2503_13xxx/2503.13964/images/d91ccdfc0d2ffe61a73e1a10c2178fe6dbc5f6967910e5ec9ea341203cc40cf6.jpg ADDED

Git LFS Details

  • SHA256: ff093a71826104a1f9ac1b944a81dce721c45ffeabe6cc9cb36a9e8b479ced4a
  • Pointer size: 130 Bytes
  • Size of remote file: 51.1 kB
data/2025/2503_13xxx/2503.13964/images/e3302d47576efedcf4711a805d0e69760f3609ad80ae292bb265d550bded9952.jpg ADDED

Git LFS Details

  • SHA256: 6ddde4c1cd3580d3da72c493b718138d8cd9e8ec1fd8ac7f68aa2307b9b0a42f
  • Pointer size: 131 Bytes
  • Size of remote file: 155 kB
data/2025/2503_13xxx/2503.13964/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_13xxx/2503.13975/339fa74d-2282-48ca-80ce-795c4c3bfb08_content_list.json ADDED
The diff for this file is too large to render. See raw diff