Chelsea707 commited on
Commit
7bd50aa
·
verified ·
1 Parent(s): fadcbf8

MinerU Batch b6dc05ec-e25c-4945-af67-398b60c0f004 (Part 2/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2503_06xxx/2503.06034/e7fd26f7-fd01-4638-b446-400698cb8b0c_content_list.json +1385 -0
  3. data/2025/2503_06xxx/2503.06034/e7fd26f7-fd01-4638-b446-400698cb8b0c_model.json +1646 -0
  4. data/2025/2503_06xxx/2503.06034/e7fd26f7-fd01-4638-b446-400698cb8b0c_origin.pdf +3 -0
  5. data/2025/2503_06xxx/2503.06034/full.md +245 -0
  6. data/2025/2503_06xxx/2503.06034/images/19bd12ea78b936391312be3542c89a51b650df6a46d4d0aae1b61c8758274193.jpg +3 -0
  7. data/2025/2503_06xxx/2503.06034/images/201276d89336b192c93edab42d7fd1f3756ef49b8fe971ce2da2ea08cf452b45.jpg +3 -0
  8. data/2025/2503_06xxx/2503.06034/images/33f6b988f7f82d10a201c8e7946b04b7b4e90b5c40ca8fdca7c7d3c2952cd20d.jpg +3 -0
  9. data/2025/2503_06xxx/2503.06034/images/5526761c70a68372ea70cb8a85788dcf7940cef10806a774f6a4b4ff6cff2088.jpg +3 -0
  10. data/2025/2503_06xxx/2503.06034/images/7129538c3cce7d017491c1eb6e639b82fe2d3ce74339f8151667ec7ad63d8f2f.jpg +3 -0
  11. data/2025/2503_06xxx/2503.06034/images/841c8ba914964229a06742463ad1a26b8534c8a57b907702c417e76170048961.jpg +3 -0
  12. data/2025/2503_06xxx/2503.06034/images/8ec781e53444e1410a1f7eed8b074c0242f2a33deab5c7c2e018480cfe44d841.jpg +3 -0
  13. data/2025/2503_06xxx/2503.06034/images/9ea4efa287dabb14400778b8c69544876c275e2979203a7c8a83ec4a6573c34b.jpg +3 -0
  14. data/2025/2503_06xxx/2503.06034/images/b61c0ad51072c6e5fb85db75333e280138e16595dfcf5a63de76802a6ebbf36d.jpg +3 -0
  15. data/2025/2503_06xxx/2503.06034/images/c1476da0554e945ca909e8455bd43585b6e7854ec087c1ca7fadec2fbf1ae44c.jpg +3 -0
  16. data/2025/2503_06xxx/2503.06034/layout.json +0 -0
  17. data/2025/2503_06xxx/2503.06072/ab21925c-33f1-43f4-a470-379226ecf3cb_content_list.json +0 -0
  18. data/2025/2503_06xxx/2503.06072/ab21925c-33f1-43f4-a470-379226ecf3cb_model.json +0 -0
  19. data/2025/2503_06xxx/2503.06072/ab21925c-33f1-43f4-a470-379226ecf3cb_origin.pdf +3 -0
  20. data/2025/2503_06xxx/2503.06072/full.md +0 -0
  21. data/2025/2503_06xxx/2503.06072/images/0189318ba1f014900b935a12a8a899913bb183a223da671c7ff7c8182bd98b07.jpg +3 -0
  22. data/2025/2503_06xxx/2503.06072/images/0203984daef82ac56f8e726f63158f0e8ae803834f3c08bef25d8ea99306b545.jpg +3 -0
  23. data/2025/2503_06xxx/2503.06072/images/08d7291a0233cbff66fedcc73c37117b8964ed7b6c23469843605665e99020be.jpg +3 -0
  24. data/2025/2503_06xxx/2503.06072/images/0c8fe2f79dc4a652c0633aa5a01560234ebbba1b0a05dc7717028cfc46d2171c.jpg +3 -0
  25. data/2025/2503_06xxx/2503.06072/images/0e0ea3161ef3fa6246f85462de2e851c6834b67ef42eaf4eda6b0e93ee38020b.jpg +3 -0
  26. data/2025/2503_06xxx/2503.06072/images/0e1b8367982f98fb83896a36de0896732e87edf014aceff48385292f06f3f2c6.jpg +3 -0
  27. data/2025/2503_06xxx/2503.06072/images/10a4b3c0c7e978ebb888e89286bc4d170fab570434c1cd29338a15ada828e765.jpg +3 -0
  28. data/2025/2503_06xxx/2503.06072/images/1307a3e1a3f64b78100b2aa1f683beae0d4ead4d1415f8fc8875bf98f75a4b60.jpg +3 -0
  29. data/2025/2503_06xxx/2503.06072/images/15fd582f6e71b50f45d63f0917378c25b08d547dc161b8ce4ecd34d00f1a4994.jpg +3 -0
  30. data/2025/2503_06xxx/2503.06072/images/17c4fdfd8a06ef096d0d51f9701f9caa167a2d979a4d240fbdc26f1d6da84e92.jpg +3 -0
  31. data/2025/2503_06xxx/2503.06072/images/182497f6d38bc1c4e889271e45996f2cf73b13809a7a085e35d48d14dd4f5ac7.jpg +3 -0
  32. data/2025/2503_06xxx/2503.06072/images/1bf98390f938c7a7467a23dafc76ae0d83d73cc263774a37047f8e6a67780fc3.jpg +3 -0
  33. data/2025/2503_06xxx/2503.06072/images/1f580d3caa54a8c1ad14eeeb0b9524ea6a25be9c120f9ea3be6868f2c589dddb.jpg +3 -0
  34. data/2025/2503_06xxx/2503.06072/images/2019b1dd85bc1c3e4845446e15902307a368b391de1c78ed4e83e4e89106ee70.jpg +3 -0
  35. data/2025/2503_06xxx/2503.06072/images/2173ae04cebe2c3863c2c0cf9123d98bf615ef6051e6c4191bd55e5ce8a0efc3.jpg +3 -0
  36. data/2025/2503_06xxx/2503.06072/images/223eb80a4314fa372e3c47c429314ba7ee1f9d9762237046ce7a5de74ef3485f.jpg +3 -0
  37. data/2025/2503_06xxx/2503.06072/images/27643558b74f8b2d7609f8b89f64b85023d6cd4592e7cce758abdb14c4ea0e83.jpg +3 -0
  38. data/2025/2503_06xxx/2503.06072/images/27c46c383e853427ef82465e2281e439bf21aca6f673dfc01e3d6f5784f23286.jpg +3 -0
  39. data/2025/2503_06xxx/2503.06072/images/35c7ba3fad913e20399bd9397d748298f1fcbcf0d604361c3137a8dc43a77ffc.jpg +3 -0
  40. data/2025/2503_06xxx/2503.06072/images/374ef5377b9dbc5e164405b18882c5dbe9bca35a19bc2d9fe189f202361f23f1.jpg +3 -0
  41. data/2025/2503_06xxx/2503.06072/images/37e469b3c82780ef32d32ff106bd4f045eea6b9ff314edea6cb99e1afb1bdb45.jpg +3 -0
  42. data/2025/2503_06xxx/2503.06072/images/3cd34579598d7287fbdf429f2b1bbf106b7fdf1d2083e7b62c81397797f94baf.jpg +3 -0
  43. data/2025/2503_06xxx/2503.06072/images/424cd433d41e56ffc3a768b9a071e34032aaee6c4e7f2914229f2ea9be0fd921.jpg +3 -0
  44. data/2025/2503_06xxx/2503.06072/images/425d11a682cb1e770ff86eee388c25a4e602a1f0334ec8e07b9639b3f75048d3.jpg +3 -0
  45. data/2025/2503_06xxx/2503.06072/images/44e7d69795c791fd1119416a2ee726d19789ea60d167a36338a21b7a80552f1c.jpg +3 -0
  46. data/2025/2503_06xxx/2503.06072/images/4505beccc08a752eb0429b0d8551eb748ab3429b6a65a8d188898611bfe59041.jpg +3 -0
  47. data/2025/2503_06xxx/2503.06072/images/49f52cbadc78923e815b72e1986b3bd486d400ab3358db927b07ad349721325b.jpg +3 -0
  48. data/2025/2503_06xxx/2503.06072/images/4e5adc43cc428a4495ef911b14c8d6fbb5da3c24fb516dca29ffabf37a12c1ba.jpg +3 -0
  49. data/2025/2503_06xxx/2503.06072/images/4f5590c99bea44f972908c6d46f573881e9049485d1de1a876f23547c228e2cd.jpg +3 -0
  50. data/2025/2503_06xxx/2503.06072/images/50940339ea4cfe17073ed58d9e475f9e60810e2637fcfd510950681c9ef92d60.jpg +3 -0
.gitattributes CHANGED
@@ -1755,3 +1755,11 @@ data/2025/2503_06xxx/2503.06287/9d21b81c-23d4-4928-b1f9-45ae9c277987_origin.pdf
1755
  data/2025/2503_06xxx/2503.06345/3ef38785-ad43-40bc-b404-5a874443c8b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1756
  data/2025/2503_06xxx/2503.06424/d6df37ef-e168-463c-9b87-fcfe8060cc6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
1757
  data/2025/2503_06xxx/2503.06456/8d2f16db-733b-44a3-97e9-c0e8582df35a_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1755
  data/2025/2503_06xxx/2503.06345/3ef38785-ad43-40bc-b404-5a874443c8b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1756
  data/2025/2503_06xxx/2503.06424/d6df37ef-e168-463c-9b87-fcfe8060cc6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
1757
  data/2025/2503_06xxx/2503.06456/8d2f16db-733b-44a3-97e9-c0e8582df35a_origin.pdf filter=lfs diff=lfs merge=lfs -text
1758
+ data/2025/2503_06xxx/2503.06034/e7fd26f7-fd01-4638-b446-400698cb8b0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
1759
+ data/2025/2503_06xxx/2503.06072/ab21925c-33f1-43f4-a470-379226ecf3cb_origin.pdf filter=lfs diff=lfs merge=lfs -text
1760
+ data/2025/2503_06xxx/2503.06073/971972e1-d144-4258-92a2-f94b01ae1f42_origin.pdf filter=lfs diff=lfs merge=lfs -text
1761
+ data/2025/2503_06xxx/2503.06132/05ca3479-3c0c-419e-8c0d-47f55b926370_origin.pdf filter=lfs diff=lfs merge=lfs -text
1762
+ data/2025/2503_06xxx/2503.06142/26e56f5e-5590-4669-889c-812f3669dc23_origin.pdf filter=lfs diff=lfs merge=lfs -text
1763
+ data/2025/2503_06xxx/2503.06320/236b0124-3aee-409e-b7ae-9afcd614c55b_origin.pdf filter=lfs diff=lfs merge=lfs -text
1764
+ data/2025/2503_10xxx/2503.10657/4ee135fa-463d-4bff-9d2d-fb223e3be9f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
1765
+ data/2025/2503_10xxx/2503.10658/f3f36660-d6f4-4702-bb4c-9a5d4cbb4f7f_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2503_06xxx/2503.06034/e7fd26f7-fd01-4638-b446-400698cb8b0c_content_list.json ADDED
@@ -0,0 +1,1385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Rank-R1: Enhancing Reasoning in LLM-based Document Rerankers via Reinforcement Learning",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 122,
8
+ 89,
9
+ 875,
10
+ 130
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Shengyao Zhuang\\*,1, Xueguang Ma\\*,2, Bevan Koopman\\*,3, Jimmy Lin\\*, Guido Zuccon\\* 1CSIRO,",
17
+ "bbox": [
18
+ 121,
19
+ 149,
20
+ 875,
21
+ 184
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "2 University of Waterloo,",
28
+ "bbox": [
29
+ 394,
30
+ 185,
31
+ 601,
32
+ 200
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "<sup>3</sup> The University of Queensland",
39
+ "bbox": [
40
+ 368,
41
+ 202,
42
+ 628,
43
+ 219
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Abstract",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 260,
53
+ 260,
54
+ 339,
55
+ 275
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "In this paper, we introduce Rank-R1, a novel LLM-based reranker that performs reasoning over both the user query and candidate documents before performing the ranking task.",
62
+ "bbox": [
63
+ 141,
64
+ 292,
65
+ 460,
66
+ 350
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Existing document reranking methods based on large language models (LLMs) typically rely on prompting or fine-tuning LLMs to order or label candidate documents according to their relevance to a query. For Rank-R1, we use a reinforcement learning algorithm along with only a small set of relevance labels (without any reasoning supervision) to enhance the reasoning ability of LLM-based rerankers. Our hypothesis is that adding reasoning capabilities to the rerankers can improve their relevance assessment and ranking capabilities.",
73
+ "bbox": [
74
+ 141,
75
+ 356,
76
+ 460,
77
+ 527
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Our experiments on the TREC DL and BRIGHT datasets show that Rank-R1 is highly effective, especially for complex queries. In particular, we find that Rank-R1 achieves effectiveness on in-domain datasets at par with that of supervised fine-tuning methods, but utilizing only $18\\%$ of the training data used by the fine-tuning methods. We also find that the model largely outperforms zero-shot and supervised fine-tuning when applied to out-of-domain datasets featuring complex queries, especially when a 14B-size model is used. Finally, we qualitatively observe that Rank-R1's reasoning process improves the explainability of the ranking results, opening new opportunities for search engine results presentation and fruition.",
84
+ "bbox": [
85
+ 141,
86
+ 533,
87
+ 460,
88
+ 774
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "1 Introduction",
95
+ "text_level": 1,
96
+ "bbox": [
97
+ 114,
98
+ 791,
99
+ 260,
100
+ 806
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "Large language models (LLMs) have shown strong performance in document ranking tasks (Ma et al., 2023; Sun et al., 2023; Zhuang et al., 2023, 2024b,a; Sun et al., 2024; Xu, 2024). Zero-shot",
107
+ "bbox": [
108
+ 112,
109
+ 819,
110
+ 489,
111
+ 883
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "prompting methods such as RankGPT, rerank documents by directly prompting LLMs to generate reordered document lists (Sun et al., 2023). However, these methods rely on the assumption that the LLM follows instructions well. Moreover, being zero-shot, they do not leverage available human-annotated relevance data for further improvement.",
118
+ "bbox": [
119
+ 507,
120
+ 261,
121
+ 884,
122
+ 373
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "In contrast, methods like RankLlama fine-tune LLMs using human relevance judgments, assigning scores to each query-document pair (Ma et al., 2024). While effective, these approaches do not explicitly model reasoning processes. This is mainly due to the lack of high-quality reasoning data for supervised fine-tuning. In practice, user relevance judgments often come in the form of selecting the most relevant document from a set, but achieving high-quality rankings requires strong reasoning capabilities to interpret complex relevance relationships.",
129
+ "bbox": [
130
+ 507,
131
+ 374,
132
+ 884,
133
+ 567
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "Recent advancements in reinforcement learning (RL) for LLMs, such as DeepSeek-R1 (Guo et al., 2025) and Simple RL (Zeng et al., 2025), have demonstrated that reward-based training can enhance reasoning abilities, particularly in tasks like mathematical question-answering (Shao et al., 2024). These recent innovations suggest that rule-based reward scoring alone can improve an LLM's ability to reason and explain.",
140
+ "bbox": [
141
+ 507,
142
+ 568,
143
+ 885,
144
+ 711
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "Building on this insight, we ask whether reinforcement learning in the style of Deepseek-R1 can improve reasoning in document reranking. Specifically, we apply Group Relative Policy Optimization (GRPO) (Shao et al., 2024) to train an LLM-based reranker. Given a user query and a list of retrieved candidate documents, the reranker generates reasoning steps before selecting the most relevant document. The reward signal is determined only by whether the model eventually identifies the most relevant document among the candidates correctly.",
151
+ "bbox": [
152
+ 507,
153
+ 712,
154
+ 885,
155
+ 889
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "text",
161
+ "text": "We train our model, Rank-R1, on the MS MARCO passage ranking dataset and evaluate it",
162
+ "bbox": [
163
+ 507,
164
+ 889,
165
+ 882,
166
+ 921
167
+ ],
168
+ "page_idx": 0
169
+ },
170
+ {
171
+ "type": "aside_text",
172
+ "text": "arXiv:2503.06034v1 [cs.IR] 8 Mar 2025",
173
+ "bbox": [
174
+ 21,
175
+ 317,
176
+ 60,
177
+ 717
178
+ ],
179
+ "page_idx": 0
180
+ },
181
+ {
182
+ "type": "page_footnote",
183
+ "text": "*These authors contributed equally to this work. Work in progress.",
184
+ "bbox": [
185
+ 115,
186
+ 894,
187
+ 487,
188
+ 921
189
+ ],
190
+ "page_idx": 0
191
+ },
192
+ {
193
+ "type": "page_number",
194
+ "text": "1",
195
+ "bbox": [
196
+ 492,
197
+ 942,
198
+ 504,
199
+ 954
200
+ ],
201
+ "page_idx": 0
202
+ },
203
+ {
204
+ "type": "text",
205
+ "text": "on TREC DL19 and DL20 (in-domain datasets). Our results show that RL-based training is at par with supervised fine-tuning on in-domain data.",
206
+ "bbox": [
207
+ 112,
208
+ 84,
209
+ 487,
210
+ 131
211
+ ],
212
+ "page_idx": 1
213
+ },
214
+ {
215
+ "type": "text",
216
+ "text": "Additionally, we evaluate Rank-R1 on the BRIGHT dataset (SU et al., 2025), which requires complex query reasoning and relevance understanding, and is out-of-domain with respect to the data used to train the model. Rank-R1, trained with retrieval reinforcement learning, outperforms both zero-shot prompting and supervised fine-tuning on this out-of-domain dataset. Notably, our 14B model surpasses the much larger (zeroshot) GPT-4 in reranking performance on the BRIGHT dataset.",
217
+ "bbox": [
218
+ 112,
219
+ 134,
220
+ 487,
221
+ 294
222
+ ],
223
+ "page_idx": 1
224
+ },
225
+ {
226
+ "type": "text",
227
+ "text": "2 Method",
228
+ "text_level": 1,
229
+ "bbox": [
230
+ 114,
231
+ 307,
232
+ 218,
233
+ 322
234
+ ],
235
+ "page_idx": 1
236
+ },
237
+ {
238
+ "type": "text",
239
+ "text": "To train Rank-R1, we adapt the RL training framework proposed by DeepSeek (Shao et al., 2024; Guo et al., 2025) to enhance the reasoning ability of LLM-based document rerankers. In this section, we discuss the details of each component in our method.",
240
+ "bbox": [
241
+ 112,
242
+ 335,
243
+ 487,
244
+ 430
245
+ ],
246
+ "page_idx": 1
247
+ },
248
+ {
249
+ "type": "text",
250
+ "text": "2.1 LLM Reranking",
251
+ "text_level": 1,
252
+ "bbox": [
253
+ 112,
254
+ 445,
255
+ 292,
256
+ 460
257
+ ],
258
+ "page_idx": 1
259
+ },
260
+ {
261
+ "type": "text",
262
+ "text": "Our LLM reranker follows the Setwise prompting approach proposed by Zhuang et al. (2024b). This method takes a query and a set of candidate documents as input to the LLM and prompts the LLM to select the most relevant document among the candidates based on relevance to the query. Then, the heapsort algorithm is used to build a heap tree over all the candidate documents from the first-stage retriever, and the documents are reranked via the \"heapify\" operations with the Setwise prompt.",
263
+ "bbox": [
264
+ 112,
265
+ 466,
266
+ 487,
267
+ 627
268
+ ],
269
+ "page_idx": 1
270
+ },
271
+ {
272
+ "type": "text",
273
+ "text": "However, the original Setwise ranking approach does not encourage the LLM to reason about the relevance between the query and the documents. Instead, it directly asks for the most relevant one. To unlock the reranker's reasoning ability, we modify the original Setwise prompt by adding a reasoning instruction, as shown in Figure 1. Specifically, we adapt the system prompt from the DeepSeek-R1-Zero method to the Setwise prompt. This modification encourages LLMs to reason first before providing an answer—in our case, predicting the label of the most relevant candidate document to the query. We refer to the Setwise method using this modified prompt as Rank-R1.",
274
+ "bbox": [
275
+ 112,
276
+ 629,
277
+ 489,
278
+ 853
279
+ ],
280
+ "page_idx": 1
281
+ },
282
+ {
283
+ "type": "text",
284
+ "text": "2.2 Reinforcement Learning",
285
+ "text_level": 1,
286
+ "bbox": [
287
+ 112,
288
+ 866,
289
+ 352,
290
+ 883
291
+ ],
292
+ "page_idx": 1
293
+ },
294
+ {
295
+ "type": "text",
296
+ "text": "Although any modern instruction-tuned LLM, when coupled with our Rank- $R1$ prompt, may ex",
297
+ "bbox": [
298
+ 112,
299
+ 889,
300
+ 489,
301
+ 921
302
+ ],
303
+ "page_idx": 1
304
+ },
305
+ {
306
+ "type": "text",
307
+ "text": "SYSTEM:",
308
+ "text_level": 1,
309
+ "bbox": [
310
+ 534,
311
+ 92,
312
+ 603,
313
+ 103
314
+ ],
315
+ "page_idx": 1
316
+ },
317
+ {
318
+ "type": "text",
319
+ "text": "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., <think> reasoning process here </think> <answer> answer here </answer>.",
320
+ "bbox": [
321
+ 534,
322
+ 104,
323
+ 855,
324
+ 199
325
+ ],
326
+ "page_idx": 1
327
+ },
328
+ {
329
+ "type": "text",
330
+ "text": "USER:",
331
+ "text_level": 1,
332
+ "bbox": [
333
+ 536,
334
+ 210,
335
+ 581,
336
+ 222
337
+ ],
338
+ "page_idx": 1
339
+ },
340
+ {
341
+ "type": "text",
342
+ "text": "Given the query: {\"query}\", which of the following documents is most relevant?",
343
+ "bbox": [
344
+ 536,
345
+ 223,
346
+ 853,
347
+ 244
348
+ ],
349
+ "page_idx": 1
350
+ },
351
+ {
352
+ "type": "list",
353
+ "sub_type": "text",
354
+ "list_items": [
355
+ "[1] {document1}",
356
+ "[2] {document2}"
357
+ ],
358
+ "bbox": [
359
+ 537,
360
+ 247,
361
+ 640,
362
+ 271
363
+ ],
364
+ "page_idx": 1
365
+ },
366
+ {
367
+ "type": "text",
368
+ "text": "[20] {document20}",
369
+ "bbox": [
370
+ 537,
371
+ 277,
372
+ 655,
373
+ 294
374
+ ],
375
+ "page_idx": 1
376
+ },
377
+ {
378
+ "type": "text",
379
+ "text": "After completing the reasoning process, please provide only the label of the most relevant document to the query, enclosed in square brackets, within the answer tags. For example, if the third document is the most relevant, the answer should be: <think> reasoning process here </think> <answer>[3]</answer>.",
380
+ "bbox": [
381
+ 536,
382
+ 294,
383
+ 857,
384
+ 366
385
+ ],
386
+ "page_idx": 1
387
+ },
388
+ {
389
+ "type": "text",
390
+ "text": "Figure 1: Prompt used for Rank-R1.",
391
+ "bbox": [
392
+ 571,
393
+ 395,
394
+ 818,
395
+ 409
396
+ ],
397
+ "page_idx": 1
398
+ },
399
+ {
400
+ "type": "text",
401
+ "text": "hibit strong zeroshot reasoning ability, their reasoning process could still be suboptimal for the Setwise ranking method. This is because the LLMs might not have been fine-tuned on similar instructional data. On the other hand, gathering human-annotated reasoning data for large-scale supervised fine-tuning of Rank-R1 could be both costly and infeasible. To address these challenges, we employ the GRPO RL algorithm (Shao et al., 2024) to enhance the reasoning process of the Rank-R1 reranker. The GRPO algorithm optimizes the following objective:",
402
+ "bbox": [
403
+ 507,
404
+ 436,
405
+ 884,
406
+ 629
407
+ ],
408
+ "page_idx": 1
409
+ },
410
+ {
411
+ "type": "equation",
412
+ "text": "\n$$\n\\begin{array}{l} \\mathcal {J} _ {G R P O} (\\theta) = \\mathbb {E} [ q \\sim P (Q), \\left\\{o _ {i} \\right\\} _ {i = 1} ^ {| G |} \\sim \\pi_ {\\theta_ {o l d}} (O | q) ] \\\\ \\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(\\min \\left(\\frac {\\pi_ {\\theta_ {o l d}} (o _ {i} | q)}{\\pi_ {\\theta_ {o l d}} (o _ {i} | q)} A _ {i}, \\right. \\right. \\\\ \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q)}{\\pi_ {\\theta_ {o l d}} (o _ {i} | q)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i}\\bigg) \\\\ \\left. - \\beta \\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} \\mid \\mid \\pi_ {r e f}\\right)\\right) \\tag {1} \\\\ \\end{array}\n$$\n",
413
+ "text_format": "latex",
414
+ "bbox": [
415
+ 519,
416
+ 649,
417
+ 880,
418
+ 765
419
+ ],
420
+ "page_idx": 1
421
+ },
422
+ {
423
+ "type": "text",
424
+ "text": "where $Q$ is the Setwise ranking prompt in the training data, $G$ is a group of generated data points sampled from an old policy $\\pi_{\\theta_{old}}$ (in our case, the initial LLM) given a sampled prompt, and $\\pi_{\\theta}$ is the LLM ranker we are optimizing. The clip operation is used to cap the ratio of the new and old policies. In our experiment, we only use the data samples from $\\pi_{\\theta_{old}}$ to update $\\pi_{\\theta}$ once, meaning $\\pi_{\\theta_{old}} = \\pi_{\\theta}$ , so Eq 1 reduces to:",
425
+ "bbox": [
426
+ 507,
427
+ 776,
428
+ 882,
429
+ 921
430
+ ],
431
+ "page_idx": 1
432
+ },
433
+ {
434
+ "type": "page_number",
435
+ "text": "2",
436
+ "bbox": [
437
+ 492,
438
+ 942,
439
+ 504,
440
+ 954
441
+ ],
442
+ "page_idx": 1
443
+ },
444
+ {
445
+ "type": "equation",
446
+ "text": "\n$$\n\\mathcal {J} _ {G R P O} (\\theta) =\n$$\n",
447
+ "text_format": "latex",
448
+ "bbox": [
449
+ 156,
450
+ 112,
451
+ 240,
452
+ 124
453
+ ],
454
+ "page_idx": 2
455
+ },
456
+ {
457
+ "type": "equation",
458
+ "text": "\n$$\n\\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid q\\right)}{\\pi_ {\\theta_ {o l d}} \\left(o _ {i} \\mid q\\right)} A _ {i}, - \\beta \\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} \\mid \\mid \\pi_ {r e f}\\right)\\right) \\tag {2}\n$$\n",
459
+ "text_format": "latex",
460
+ "bbox": [
461
+ 157,
462
+ 129,
463
+ 485,
464
+ 162
465
+ ],
466
+ "page_idx": 2
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "where $\\mathbb{D}_{KL}$ is the KL loss that penalizes how far the new policy differs from a reference policy $\\pi_{ref}$ , which in our case is the original instruction-tuned LLM.",
471
+ "bbox": [
472
+ 112,
473
+ 174,
474
+ 487,
475
+ 236
476
+ ],
477
+ "page_idx": 2
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "The generated data points, in our case, represent the reasoning process. We use the generated relevant document label to calculate the advantage $A$ as follows:",
482
+ "bbox": [
483
+ 112,
484
+ 239,
485
+ 489,
486
+ 302
487
+ ],
488
+ "page_idx": 2
489
+ },
490
+ {
491
+ "type": "equation",
492
+ "text": "\n$$\nA _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)} \\tag {3}\n$$\n",
493
+ "text_format": "latex",
494
+ "bbox": [
495
+ 168,
496
+ 312,
497
+ 487,
498
+ 344
499
+ ],
500
+ "page_idx": 2
501
+ },
502
+ {
503
+ "type": "text",
504
+ "text": "where $r$ is the reward given by the training data, which we describe in detail later. Overall, the GRPO training optimizes the LLM to generate tokens that maximize the rewards.",
505
+ "bbox": [
506
+ 112,
507
+ 355,
508
+ 487,
509
+ 418
510
+ ],
511
+ "page_idx": 2
512
+ },
513
+ {
514
+ "type": "text",
515
+ "text": "The Setwise prompts and rewards for our GRPO training come from training data. For constructing such RL training data, we use the MS-MARCO training data provided by the Tevatron IR toolkit (Gao et al., 2023), which includes training queries, human-labeled relevant documents, and BM25-retrieved top-100 documents. For each training query, we sample 19 documents from the set retrieved by BM25, along with one labeled relevant document, to form the Rank-R1 prompt.",
516
+ "bbox": [
517
+ 112,
518
+ 419,
519
+ 487,
520
+ 580
521
+ ],
522
+ "page_idx": 2
523
+ },
524
+ {
525
+ "type": "text",
526
+ "text": "We design the reward function for RL as follows: a reward of one is granted if and only if the LLM generations match the reasoning and answering format (i.e., the generated tokens fill in the <think> </think> <answer> </answer> spans) and the answer correctly matches the label of the ground-truth relevant document. Otherwise, a reward of zero is provided. Our straightforward rule-based reward mechanism does not impose constraints on the reasoning process but instead encourages the model to generate the correct format and answer following the reasoning. Most importantly, this RL training does not require human-annotated reasoning data.",
527
+ "bbox": [
528
+ 112,
529
+ 581,
530
+ 487,
531
+ 789
532
+ ],
533
+ "page_idx": 2
534
+ },
535
+ {
536
+ "type": "text",
537
+ "text": "There are two main reasons why we selected the Setwise ranking approach as the backend of our Rank-R1: First, the nature of Setwise prompting allows us to use a simple rule-based reward function, as only the most relevant document label needs to be predicted, enabling a straightforward match with",
538
+ "bbox": [
539
+ 112,
540
+ 790,
541
+ 487,
542
+ 885
543
+ ],
544
+ "page_idx": 2
545
+ },
546
+ {
547
+ "type": "text",
548
+ "text": "the answer span. Secondly, it is convenient for us to fairly compare the RL-trained Rank-R1 with the supervise fine-tuned Setwise reranker, which simply removes the reasoning process from the prompt and is directly trained to predict the ground-truth label using the same training data. Other LLM-based ranking methods, such as Listwise prompting, are harder to fairly compare in this setting, as a ground-truth ranking is usually not available in the training data (MSMARCO only has one judged relevant document per query on average).",
549
+ "bbox": [
550
+ 507,
551
+ 84,
552
+ 884,
553
+ 262
554
+ ],
555
+ "page_idx": 2
556
+ },
557
+ {
558
+ "type": "text",
559
+ "text": "3 Experimental Settings",
560
+ "text_level": 1,
561
+ "bbox": [
562
+ 507,
563
+ 275,
564
+ 734,
565
+ 292
566
+ ],
567
+ "page_idx": 2
568
+ },
569
+ {
570
+ "type": "text",
571
+ "text": "Datasets. We explore to dataset settings to evaluate the effectiveness of Rank-R1: an in-domain setting, where we use the TREC-DL19 and DL20 datasets (Craswell et al., 2020, 2021), and an out-of-domain setting, where we use the BRIGHT benchmark datasets (SU et al., 2025). The DL19 and DL20 are in-domain because they are based on the MSMARCO passage ranking dataset – the same dataset used in the training of our rerankers; unlike MSMARCO though they contain deep assessments for each query (around 210 assessments per query on average). The BRIGHT benchmark datasets encompass domains, including biology, code, and math, and require intensive reasoning to rank relevant documents.",
572
+ "bbox": [
573
+ 507,
574
+ 303,
575
+ 884,
576
+ 544
577
+ ],
578
+ "page_idx": 2
579
+ },
580
+ {
581
+ "type": "text",
582
+ "text": "Initial Retrieval. For all methods we consider in our experiments, the initial retrieval is performed using the pyserini implementation of BM25 (Lin et al., 2021). Reranking approaches are then provided the top 100 documents retrieved by BM25 to rerank.",
583
+ "bbox": [
584
+ 507,
585
+ 555,
586
+ 882,
587
+ 651
588
+ ],
589
+ "page_idx": 2
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "Rank-R1 Settings. We compare two settings of Rank-R1: (i) a Zeroshot setting, where only the Setwise-based prompt, improved by the presence of the reasoning instruction is used, with no training performed, and (ii) a GRPO setting, where we employ the same prompt as in the Zeroshot setting, but we also train the model according to the GRPO RL method. For GRPO, we set $|G| = 8$ , that is, for each training query we generate 8 answers with the old policy $\\pi_{\\theta_{old}}$ ; as training dataset we use the training part of MS MARCO. For all Rank-R1 experiments, due to the limited computational resources, we only train on approximately $18\\%$ of the full MS-MARCO dataset, requiring roughly three (3B and 7B models) to five (14B model) days of training on four H100 GPUs. The details of GRPO training",
594
+ "bbox": [
595
+ 507,
596
+ 663,
597
+ 884,
598
+ 921
599
+ ],
600
+ "page_idx": 2
601
+ },
602
+ {
603
+ "type": "page_footnote",
604
+ "text": "<sup>1</sup>https://huggingface.co/datasets/Tevatron/msmarco-passage",
605
+ "bbox": [
606
+ 115,
607
+ 894,
608
+ 433,
609
+ 920
610
+ ],
611
+ "page_idx": 2
612
+ },
613
+ {
614
+ "type": "page_number",
615
+ "text": "3",
616
+ "bbox": [
617
+ 492,
618
+ 942,
619
+ 504,
620
+ 954
621
+ ],
622
+ "page_idx": 2
623
+ },
624
+ {
625
+ "type": "text",
626
+ "text": "hyperparameters are provided in Appendix B.",
627
+ "bbox": [
628
+ 112,
629
+ 84,
630
+ 455,
631
+ 99
632
+ ],
633
+ "page_idx": 3
634
+ },
635
+ {
636
+ "type": "text",
637
+ "text": "Comparison Methods. To understand the effect of the reasoning prompt and the GRPO training on the Setwise approach, we compare Rank-R1 against the original Setwise method (using prompt as listed in Appendix A), either used in a Zeroshot manner, or trained with the standard supervised fine-tuning (SFT) on MS MARCO data (400k training datapoints circa). The details of SFT training hyperparameters are provided in Appendix B.",
638
+ "bbox": [
639
+ 112,
640
+ 110,
641
+ 487,
642
+ 254
643
+ ],
644
+ "page_idx": 3
645
+ },
646
+ {
647
+ "type": "text",
648
+ "text": "To further contextualise the effectiveness of Rank-R1, we also consider the effectiveness of the current state-of-the-art Listwise reranker, RankZephyr (Pradeep et al., 2023), which was trained with data generated from GPT-4. We use the model checkpoint $^{2}$ released by the author and run it ourselves to ensure the exact same settings. We also report the effectiveness of RankGPT, a zero-shot listwise reranked where GPT-4 is used as backbone. Due to budget constraints, we were unable to execute the experiments with RankGPT ourselves: we could only report the results of RankGPT obtained by Sun et al. (2023) for TREC DL19 and DL20, and by SU et al. (2025) for BRIGHT. Note that for the BRIGHT dataset, RankGPT was applied to a different implementation of BM25 from the one we used: the one used by RankGPT has a higher average nDCG@10 than our BM25.",
649
+ "bbox": [
650
+ 115,
651
+ 255,
652
+ 489,
653
+ 558
654
+ ],
655
+ "page_idx": 3
656
+ },
657
+ {
658
+ "type": "text",
659
+ "text": "Backbone LLMs. For Setwise and Rank-R1, we explore base LLMs using instruction-tuned Qwen2.5 series models (Qwen et al., 2025) ranging from 3 billion to 14 billion parameters. RankZephyr is based on the Zephyr 7B backbone (Tunstall et al., 2023); RankGPT is based on OpenAI's GPT-4 model (OpenAI et al., 2024).",
660
+ "bbox": [
661
+ 112,
662
+ 570,
663
+ 489,
664
+ 683
665
+ ],
666
+ "page_idx": 3
667
+ },
668
+ {
669
+ "type": "text",
670
+ "text": "4 Results",
671
+ "text_level": 1,
672
+ "bbox": [
673
+ 112,
674
+ 696,
675
+ 213,
676
+ 709
677
+ ],
678
+ "page_idx": 3
679
+ },
680
+ {
681
+ "type": "text",
682
+ "text": "4.1 In-domain effectiveness",
683
+ "text_level": 1,
684
+ "bbox": [
685
+ 112,
686
+ 722,
687
+ 346,
688
+ 736
689
+ ],
690
+ "page_idx": 3
691
+ },
692
+ {
693
+ "type": "text",
694
+ "text": "In Table 1, we present the effectiveness of Rank-R1 variants on the TREC-DL19 and DL20 passage ranking datasets.",
695
+ "bbox": [
696
+ 112,
697
+ 743,
698
+ 487,
699
+ 790
700
+ ],
701
+ "page_idx": 3
702
+ },
703
+ {
704
+ "type": "text",
705
+ "text": "We start by comparing Setwise and Rank-R1 under the zero-shot setting. The results suggest that incorporating the reasoning process into the Setwise method improves zero-shot ranking effectiveness. The improvements are particularly large on the 3B size model.",
706
+ "bbox": [
707
+ 112,
708
+ 791,
709
+ 487,
710
+ 885
711
+ ],
712
+ "page_idx": 3
713
+ },
714
+ {
715
+ "type": "table",
716
+ "img_path": "images/9ea4efa287dabb14400778b8c69544876c275e2979203a7c8a83ec4a6573c34b.jpg",
717
+ "table_caption": [],
718
+ "table_footnote": [],
719
+ "table_body": "<table><tr><td>Model</td><td>Training</td><td>DL19</td><td>DL20</td></tr><tr><td>BM25</td><td>zeroshot</td><td>.506</td><td>.480</td></tr><tr><td>RankZephyr-7B</td><td>GPT4-distil.</td><td>.739</td><td>.706</td></tr><tr><td>RankGPT</td><td>Zeroshot</td><td>.756</td><td>.706</td></tr><tr><td>Setwise-3B</td><td>Zeroshot</td><td>.371</td><td>.317</td></tr><tr><td>Setwise-3B</td><td>SFT</td><td>.734</td><td>.672</td></tr><tr><td>Rank-R1-3B</td><td>Zeroshot</td><td>.605</td><td>.538</td></tr><tr><td>Rank-R1-3B</td><td>GRPO</td><td>.713</td><td>.668</td></tr><tr><td>Setwise-7B</td><td>Zeroshot</td><td>.675</td><td>.636</td></tr><tr><td>Setwise-7B</td><td>SFT</td><td>.738</td><td>.692</td></tr><tr><td>Rank-R1-7B</td><td>Zeroshot</td><td>.712</td><td>.662</td></tr><tr><td>Rank-R1-7B</td><td>GRPO</td><td>.727</td><td>.685</td></tr><tr><td>Setwise-14B</td><td>Zeroshot</td><td>.677</td><td>.648</td></tr><tr><td>Setwise-14B</td><td>SFT</td><td>.729</td><td>.689</td></tr><tr><td>Rank-R1-14B</td><td>Zeroshot</td><td>.679</td><td>.652</td></tr><tr><td>Rank-R1-14B</td><td>GRPO</td><td>.714</td><td>.691</td></tr></table>",
720
+ "bbox": [
721
+ 510,
722
+ 80,
723
+ 892,
724
+ 344
725
+ ],
726
+ "page_idx": 3
727
+ },
728
+ {
729
+ "type": "text",
730
+ "text": "Table 1: TREC DL19 and DL20 nDCG@10 results. SFT=supervised fine tuned. GRPO trained on only $18\\%$ of 400k data used to train SFT.",
731
+ "bbox": [
732
+ 507,
733
+ 354,
734
+ 884,
735
+ 395
736
+ ],
737
+ "page_idx": 3
738
+ },
739
+ {
740
+ "type": "image",
741
+ "img_path": "images/c1476da0554e945ca909e8455bd43585b6e7854ec087c1ca7fadec2fbf1ae44c.jpg",
742
+ "image_caption": [
743
+ "Figure 2: Data efficiency comparison between Setwise SFT and Rank-R1."
744
+ ],
745
+ "image_footnote": [],
746
+ "bbox": [
747
+ 515,
748
+ 416,
749
+ 890,
750
+ 574
751
+ ],
752
+ "page_idx": 3
753
+ },
754
+ {
755
+ "type": "text",
756
+ "text": "Next we consider the effects of training with GRPO. With GRPO training, Rank-R1 effectiveness increases, indicating that reasoning and answer generation (i.e. ranking) are enhanced by RL training. This improvement makes Rank-R1 comparable to the Setwise SFT trained on the full dataset and brings it closer to the effectiveness of the state-of-the-art RankZephyr.",
757
+ "bbox": [
758
+ 507,
759
+ 643,
760
+ 884,
761
+ 772
762
+ ],
763
+ "page_idx": 3
764
+ },
765
+ {
766
+ "type": "text",
767
+ "text": "4.2 Effect of quantity of training data",
768
+ "text_level": 1,
769
+ "bbox": [
770
+ 507,
771
+ 785,
772
+ 821,
773
+ 802
774
+ ],
775
+ "page_idx": 3
776
+ },
777
+ {
778
+ "type": "text",
779
+ "text": "The results in Table 1 for Rank-R1 trained with GRPO are obtained when using only $18\\%$ of the MSMARCO training data (while SFT used all available training data). To explore whether longer training could further improve effectiveness, we continued training the 3B and 7B Rank-R1 models for an additional two days and evaluated check",
780
+ "bbox": [
781
+ 505,
782
+ 808,
783
+ 882,
784
+ 921
785
+ ],
786
+ "page_idx": 3
787
+ },
788
+ {
789
+ "type": "page_footnote",
790
+ "text": "$^{2}$ https://huggingface.co/castorini/rank_zephyr_7b_v1_full",
791
+ "bbox": [
792
+ 112,
793
+ 894,
794
+ 487,
795
+ 919
796
+ ],
797
+ "page_idx": 3
798
+ },
799
+ {
800
+ "type": "page_number",
801
+ "text": "4",
802
+ "bbox": [
803
+ 492,
804
+ 942,
805
+ 504,
806
+ 954
807
+ ],
808
+ "page_idx": 3
809
+ },
810
+ {
811
+ "type": "table",
812
+ "img_path": "images/b61c0ad51072c6e5fb85db75333e280138e16595dfcf5a63de76802a6ebbf36d.jpg",
813
+ "table_caption": [],
814
+ "table_footnote": [],
815
+ "table_body": "<table><tr><td>Model</td><td>Training</td><td>Bio.</td><td>Earth.</td><td>Econ.</td><td>Psy.</td><td>Rob.</td><td>Stack.</td><td>Sus.</td><td>Pony</td><td>Leet.</td><td>AoPS</td><td>TheoT.</td><td>TheoQ.</td><td>Avg.</td></tr><tr><td>BM25</td><td>zeroshot</td><td>.182</td><td>.279</td><td>.164</td><td>.134</td><td>.109</td><td>.163</td><td>.161</td><td>.043</td><td>.247</td><td>.065</td><td>.021</td><td>.073</td><td>.137</td></tr><tr><td>RankZephyr-7b</td><td>GPT4-distill</td><td>.219</td><td>.237</td><td>.144</td><td>.103</td><td>.076</td><td>.137</td><td>.166</td><td>.065</td><td>.247</td><td>.068</td><td>.020</td><td>.073</td><td>.130</td></tr><tr><td>RankGPT4*</td><td>Zeroshot</td><td>.338</td><td>.342</td><td>.167</td><td>.270</td><td>.223</td><td>.277</td><td>.111</td><td>.156</td><td>.034</td><td>.012</td><td>.086</td><td>.002</td><td>.170</td></tr><tr><td>Setwise-3B</td><td>Zeroshot</td><td>.143</td><td>.175</td><td>.120</td><td>.102</td><td>.077</td><td>.079</td><td>.154</td><td>.053</td><td>.154</td><td>.017</td><td>.042</td><td>.021</td><td>.095</td></tr><tr><td>Setwise-3B</td><td>SFT</td><td>.220</td><td>.188</td><td>.104</td><td>.115</td><td>.091</td><td>.058</td><td>.167</td><td>.057</td><td>.099</td><td>.040</td><td>.034</td><td>.038</td><td>.101</td></tr><tr><td>Rank-R1-3B</td><td>Zeroshot</td><td>.137</td><td>.173</td><td>.119</td><td>.152</td><td>.100</td><td>.066</td><td>.178</td><td>.037</td><td>.077</td><td>.040</td><td>.060</td><td>.025</td><td>.097</td></tr><tr><td>Rank-R1-3B</td><td>GRPO</td><td>.184</td><td>.171</td><td>.137</td><td>.169</td><td>.090</td><td>.100</td><td>.165</td><td>.047</td><td>.111</td><td>.035</td><td>.059</td><td>.032</td><td>.108</td></tr><tr><td>Setwise-7B</td><td>Zeroshot</td><td>.236</td><td>.223</td><td>.161</td><td>.171</td><td>.149</td><td>.092</td><td>.183</td><td>.063</td><td>.149</td><td>.041</td><td>.104</td><td>.056</td><td>.136</td></tr><tr><td>Setwise-7B</td><td>SFT</td><td>.287</td><td>.301</td><td>.141</td><td>.239</td><td>.189</td><td>.137</td><td>.196</td><td>.071</td><td>.207</td><td>.070</td><td>.082</td><td>.082</td><td>.167</td></tr><tr><td>Rank-R1-7B</td><td>Zeroshot</td><td>.268</td><td>.248</td><td>.179</td><td>.221</td><td>.174</td><td>.103</td><td>.211</td><td>.044</td><td>.156</td><td>.033</td><td>.104</td><td>.059</td><td>.150</td></tr><tr><td>Rank-R1-7B</td><td>GRPO</td><td>.260</td><td>.285</td><td>.172</td><td>.242</td><td>.191</td><td>.104</td><td>.242</td><td>.043</td><td>.198</td><td>.043</td><td>.109</td><td>.083</td><td>.164</td></tr><tr><td>Setwise-14B</td><td>Zeroshot</td><td>.295</td><td>.322</td><td>.205</td><td>.248</td><td>.189</td><td>.147</td><td>.236</td><td>.087</td><td>.187</td><td>.080</td><td>.093</td><td>.076</td><td>.180</td></tr><tr><td>Setwise-14B</td><td>SFT</td><td>.220</td><td>.293</td><td>.154</td><td>.230</td><td>.201</td><td>.157</td><td>.203</td><td>.062</td><td>.194</td><td>.095</td><td>.099</td><td>.097</td><td>.167</td></tr><tr><td>Rank-R1-14B</td><td>Zeroshot</td><td>.301</td><td>.366</td><td>.221</td><td>.246</td><td>.217</td><td>.154</td><td>.250</td><td>.090</td><td>.170</td><td>.091</td><td>.116</td><td>.092</td><td>.193</td></tr><tr><td>Rank-R1-14B</td><td>GRPO</td><td>.312</td><td>.385</td><td>.212</td><td>.264</td><td>.226</td><td>.189</td><td>.275</td><td>.092</td><td>.202</td><td>.097</td><td>.119</td><td>.092</td><td>.205</td></tr></table>",
816
+ "bbox": [
817
+ 119,
818
+ 80,
819
+ 877,
820
+ 271
821
+ ],
822
+ "page_idx": 4
823
+ },
824
+ {
825
+ "type": "text",
826
+ "text": "Table 2: BRIGHT nDCG@10 results. All methods rerank BM25 top-100 documents (First line). $\\ast$ : Results directly copied from the paper which uses a different BM25 ranking system (has a higher average nDCG@10 than our BM25).",
827
+ "bbox": [
828
+ 112,
829
+ 280,
830
+ 882,
831
+ 324
832
+ ],
833
+ "page_idx": 4
834
+ },
835
+ {
836
+ "type": "text",
837
+ "text": "points saved during training. We report the results in Figure 2. In the figure, we also include results obtained when using SFT on incremental parts of the training data.",
838
+ "bbox": [
839
+ 112,
840
+ 349,
841
+ 487,
842
+ 413
843
+ ],
844
+ "page_idx": 4
845
+ },
846
+ {
847
+ "type": "text",
848
+ "text": "From the figure, we observe that Rank-R1 requires significantly less data than Setwise SFT to achieve the same level of performance at early training stage – however this data efficiency effect vanishes early on during the training phase. Passed $5 - 7\\%$ of training data, in fact, the two training approaches tend to track each other. SFT has a clear advantage over GRPO in that it is by far less computationally expensive. On the other hand, GRPO adds new features to the reranker, introducing the ability to perform reasoning.",
849
+ "bbox": [
850
+ 112,
851
+ 414,
852
+ 489,
853
+ 592
854
+ ],
855
+ "page_idx": 4
856
+ },
857
+ {
858
+ "type": "text",
859
+ "text": "4.3 Reasoning intensive out-of-domain effectiveness",
860
+ "text_level": 1,
861
+ "bbox": [
862
+ 112,
863
+ 606,
864
+ 431,
865
+ 637
866
+ ],
867
+ "page_idx": 4
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "Next we consider results from our out-of-domain experiments, reported in Table 2.",
872
+ "bbox": [
873
+ 112,
874
+ 645,
875
+ 485,
876
+ 677
877
+ ],
878
+ "page_idx": 4
879
+ },
880
+ {
881
+ "type": "text",
882
+ "text": "We observe that the SOTA RankZephyr reranker, which does not incorporate reasoning, does not provide better rankings than BM25 in most datasets from the BRIGHT benchmark - effectively failing at the reranking task. This suggests that the BRIGHT benchmark poses a challenge for current SOTA LLM rerankers.",
883
+ "bbox": [
884
+ 112,
885
+ 678,
886
+ 487,
887
+ 790
888
+ ],
889
+ "page_idx": 4
890
+ },
891
+ {
892
+ "type": "text",
893
+ "text": "On the other hand, Rank-R1 trained with GPRO outperforms or is on par with both zero-shot and Setwise SFT models in most cases. Notably, when using the 14B model, Setwise SFT effectiveness plateaued and even performed worse than its zero-shot counterparts, suggesting that the large model trained on the MSMARCO could not generalize to the BRIGHT using the standard Setwise ap",
894
+ "bbox": [
895
+ 112,
896
+ 791,
897
+ 489,
898
+ 921
899
+ ],
900
+ "page_idx": 4
901
+ },
902
+ {
903
+ "type": "text",
904
+ "text": "proach. However, Rank-R1 based on the 14B model achieves the largest performance gain over Setwise SFT and even surpasses the GPT-4-based Listwise reranker baseline (RankGPT4). These results highlight that the reasoning process can help the model generalize to different domains and that strong reasoning abilities, along with larger model sizes, are crucial for LLM-based rerankers to be effective in reasoning-intensive ranking tasks.",
905
+ "bbox": [
906
+ 507,
907
+ 349,
908
+ 884,
909
+ 495
910
+ ],
911
+ "page_idx": 4
912
+ },
913
+ {
914
+ "type": "text",
915
+ "text": "5 Analysis",
916
+ "text_level": 1,
917
+ "bbox": [
918
+ 507,
919
+ 511,
920
+ 618,
921
+ 527
922
+ ],
923
+ "page_idx": 4
924
+ },
925
+ {
926
+ "type": "text",
927
+ "text": "5.1 Reward score v.s. Response length",
928
+ "text_level": 1,
929
+ "bbox": [
930
+ 507,
931
+ 541,
932
+ 823,
933
+ 558
934
+ ],
935
+ "page_idx": 4
936
+ },
937
+ {
938
+ "type": "text",
939
+ "text": "In Figure 3, we present the received reward values and model completion lengths logged during training for Rank-R1, across different model sizes. Rewards consistently increase throughout training, with smaller models showing a higher rate of increase, while larger models start with a higher initial reward.",
940
+ "bbox": [
941
+ 507,
942
+ 565,
943
+ 884,
944
+ 677
945
+ ],
946
+ "page_idx": 4
947
+ },
948
+ {
949
+ "type": "text",
950
+ "text": "Regarding completion length, larger models tend to generate longer responses; however, we do not observe a noticeable increase in length as training proceeds. This observation differs from the findings for DeepSeek-R1 (Guo et al., 2025). This may be attributed to two factors. First, we initialize RL training from an instruction-tuned model rather than a base model, meaning the instruction model already follows a reasonable reasoning process. Second, the MSMARCO passage ranking dataset is relatively simple compared to tasks like math or coding, where a longer reasoning process is more essential. Thus, extensive reasoning may not be necessary for achieving high effectiveness in this task.",
951
+ "bbox": [
952
+ 507,
953
+ 678,
954
+ 884,
955
+ 920
956
+ ],
957
+ "page_idx": 4
958
+ },
959
+ {
960
+ "type": "page_number",
961
+ "text": "5",
962
+ "bbox": [
963
+ 492,
964
+ 942,
965
+ 505,
966
+ 954
967
+ ],
968
+ "page_idx": 4
969
+ },
970
+ {
971
+ "type": "image",
972
+ "img_path": "images/5526761c70a68372ea70cb8a85788dcf7940cef10806a774f6a4b4ff6cff2088.jpg",
973
+ "image_caption": [],
974
+ "image_footnote": [],
975
+ "bbox": [
976
+ 124,
977
+ 91,
978
+ 489,
979
+ 256
980
+ ],
981
+ "page_idx": 5
982
+ },
983
+ {
984
+ "type": "image",
985
+ "img_path": "images/841c8ba914964229a06742463ad1a26b8534c8a57b907702c417e76170048961.jpg",
986
+ "image_caption": [
987
+ "Figure 3: Rewards (top) and model completion length (bottom) obtained during GRPO training.",
988
+ "Figure 4: Example of completion. The Rank-R1 used in this example is based on Qwen2.5-3B-Instruct. The GRPO-Trained Rank-R1 gives the correct answer."
989
+ ],
990
+ "image_footnote": [],
991
+ "bbox": [
992
+ 124,
993
+ 269,
994
+ 489,
995
+ 435
996
+ ],
997
+ "page_idx": 5
998
+ },
999
+ {
1000
+ "type": "text",
1001
+ "text": "5.2 Case study",
1002
+ "text_level": 1,
1003
+ "bbox": [
1004
+ 114,
1005
+ 506,
1006
+ 247,
1007
+ 521
1008
+ ],
1009
+ "page_idx": 5
1010
+ },
1011
+ {
1012
+ "type": "text",
1013
+ "text": "In Figure 4, we provide an example of Rank-R1's generation. We compare the outputs of the Zereshot model and the model after GPRO training. Both models successfully follow the instruction by providing a reasoning process within the <think> span and predicting a relevant document label in the correct format. However, the Zereshot model tends to merely describe what each document mentions and ultimately makes an incorrect prediction. In contrast, the GPRO-trained model focuses on the most relevant documents, compares them, and correctly selects the best one. In addition, we argue that Rank-R1's transparent reasoning process makes its predictions more explainable, which could be particularly important in domains such as medical document ranking.",
1014
+ "bbox": [
1015
+ 112,
1016
+ 527,
1017
+ 489,
1018
+ 785
1019
+ ],
1020
+ "page_idx": 5
1021
+ },
1022
+ {
1023
+ "type": "text",
1024
+ "text": "6 Conclusion",
1025
+ "text_level": 1,
1026
+ "bbox": [
1027
+ 114,
1028
+ 797,
1029
+ 247,
1030
+ 814
1031
+ ],
1032
+ "page_idx": 5
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "text": "In this paper, we introduce Rank-R1, which uses training based on the GPRO reinforcement learning strategy to enhance the reasoning process of an LLM-based Setwise reranker. Our training approach does not require human-annotated reasoning data; instead, it relies only on doument rele",
1037
+ "bbox": [
1038
+ 112,
1039
+ 825,
1040
+ 489,
1041
+ 921
1042
+ ],
1043
+ "page_idx": 5
1044
+ },
1045
+ {
1046
+ "type": "text",
1047
+ "text": "Query: common minerals list",
1048
+ "bbox": [
1049
+ 536,
1050
+ 92,
1051
+ 722,
1052
+ 105
1053
+ ],
1054
+ "page_idx": 5
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "text": "Zeroshot Rank-R1 Completion:",
1059
+ "text_level": 1,
1060
+ "bbox": [
1061
+ 534,
1062
+ 116,
1063
+ 742,
1064
+ 128
1065
+ ],
1066
+ "page_idx": 5
1067
+ },
1068
+ {
1069
+ "type": "text",
1070
+ "text": "<think>The query is \"common minerals list\". Document [3] talks about mica being a common rock-forming mineral. Document [19] mentions that chemical sedimentary rocks are formed through precipitation of minerals. However, it does not provide a comprehensive list of common minerals. Documents [1], [2], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], [18], and [20] do not mention anything related to common minerals.</think>\n<answer>[3]</answer>",
1071
+ "bbox": [
1072
+ 534,
1073
+ 128,
1074
+ 857,
1075
+ 259
1076
+ ],
1077
+ "page_idx": 5
1078
+ },
1079
+ {
1080
+ "type": "text",
1081
+ "text": "GRPO-Trained Rank-R1 Completion:",
1082
+ "text_level": 1,
1083
+ "bbox": [
1084
+ 534,
1085
+ 269,
1086
+ 786,
1087
+ 282
1088
+ ],
1089
+ "page_idx": 5
1090
+ },
1091
+ {
1092
+ "type": "text",
1093
+ "text": "<think>The query is \"common minerals list\". Document [3] talks about mica, which is a common rock-forming mineral. Document [4] also mentions common rock-forming minerals. Both documents seem relevant to the query. However, document [4] provides a list of common rock-forming minerals, which directly answers the query. </think> <answer>[4]</answer>",
1094
+ "bbox": [
1095
+ 534,
1096
+ 282,
1097
+ 857,
1098
+ 376
1099
+ ],
1100
+ "page_idx": 5
1101
+ },
1102
+ {
1103
+ "type": "text",
1104
+ "text": "vance assessments. We find that RL training performs similarly to supervised fine tuning on indomain tasks. However, in reasoning-intensive ranking tasks, Rank-R1 based on a 14B LLM achieves considerable higher effectiveness than the SOTA non-reasoning Listwise reranker, highlighting the importance of incorporating a reasoning process in document reranking. Moreover, this reasoning capability might improve the explainability of black-box LLM-based rerankers, and offer new affordances in terms of search engine result presentation and fruition.",
1105
+ "bbox": [
1106
+ 507,
1107
+ 476,
1108
+ 884,
1109
+ 669
1110
+ ],
1111
+ "page_idx": 5
1112
+ },
1113
+ {
1114
+ "type": "text",
1115
+ "text": "We have made our code open-source at https://github.com/ielab/l1m-rankers/tree/main/Rank-R1.",
1116
+ "bbox": [
1117
+ 507,
1118
+ 671,
1119
+ 882,
1120
+ 717
1121
+ ],
1122
+ "page_idx": 5
1123
+ },
1124
+ {
1125
+ "type": "text",
1126
+ "text": "References",
1127
+ "text_level": 1,
1128
+ "bbox": [
1129
+ 510,
1130
+ 749,
1131
+ 608,
1132
+ 764
1133
+ ],
1134
+ "page_idx": 5
1135
+ },
1136
+ {
1137
+ "type": "list",
1138
+ "sub_type": "ref_text",
1139
+ "list_items": [
1140
+ "Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the trec 2020 deep learning track. Preprint, arXiv:2102.07662.",
1141
+ "Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M. Voorhees. 2020. Overview of the trec 2019 deep learning track. *Preprint*, arXiv:2003.07820.",
1142
+ "Luyu Gao, Xueguang Ma, Jimmy Lin, and Jamie Callan. 2023. Tevatron: An efficient and flexible toolkit for"
1143
+ ],
1144
+ "bbox": [
1145
+ 509,
1146
+ 772,
1147
+ 882,
1148
+ 920
1149
+ ],
1150
+ "page_idx": 5
1151
+ },
1152
+ {
1153
+ "type": "page_number",
1154
+ "text": "6",
1155
+ "bbox": [
1156
+ 492,
1157
+ 942,
1158
+ 505,
1159
+ 954
1160
+ ],
1161
+ "page_idx": 5
1162
+ },
1163
+ {
1164
+ "type": "list",
1165
+ "sub_type": "ref_text",
1166
+ "list_items": [
1167
+ "neural retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, page 3120-3124, New York, NY, USA. Association for Computing Machinery.",
1168
+ "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv:2501.12948.",
1169
+ "Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2022. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations.",
1170
+ "Jimmy Lin, Xueguang Ma, Sheng-Chieh Lin, Jheng-Hong Yang, Ronak Pradeep, and Rodrigo Nogueira. 2021. Pyserini: A Python toolkit for reproducible information retrieval research with sparse and dense representations. In Proceedings of the 44th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2021), pages 2356-2362.",
1171
+ "Xueguang Ma, Liang Wang, Nan Yang, Furu Wei, and Jimmy Lin. 2024. Fine-tuning llama for multi-stage text retrieval. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, page 2421-2425, New York, NY, USA. Association for Computing Machinery.",
1172
+ "Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023. Zero-shot listwise document reranking with a large language model. Preprint, arXiv:2305.02156.",
1173
+ "OpenAI et al. 2024. Gpt-4 technical report. Preprint, arXiv:2303.08774.",
1174
+ "Ronak Pradeep, Sahel Sharifymoghaddam, and Jimmy Lin. 2023. Rankzephyr: Effective and robust zero-shot listwise reranking is a breeze! Preprint, arXiv:2312.02724.",
1175
+ "Qwen et al. 2025. Qwen2.5 technical report. Preprint, arXiv:2412.15115.",
1176
+ "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. Preprint, arXiv:2402.03300.",
1177
+ "Hongjin SU, Howard Yen, Mengzhou Xia, Weijia Shi, Niklas Muennighoff, Han yu Wang, Liu Haisu, Quan Shi, Zachary S Siegel, Michael Tang, Ruoxi Sun, Jinsung Yoon, Sercan O Arik, Danqi Chen, and Tao Yu. 2025. BRIGHT: A realistic and challenging benchmark for reasoning-intensive retrieval. In The Thirteenth International Conference on Learning Representations."
1178
+ ],
1179
+ "bbox": [
1180
+ 115,
1181
+ 85,
1182
+ 487,
1183
+ 919
1184
+ ],
1185
+ "page_idx": 6
1186
+ },
1187
+ {
1188
+ "type": "list",
1189
+ "sub_type": "ref_text",
1190
+ "list_items": [
1191
+ "Shuoqi Sun, Shengyao Zhuang, Shuai Wang, and Guido Zuccon. 2024. An investigation of prompt variations for zero-shot llm-based rankers. arXiv preprint arXiv:2406.14117.",
1192
+ "Weiwei Sun, Lingyong Yan, Xinyu Ma, Shuaiqiang Wang, Pengjie Ren, Zhumin Chen, Dawei Yin, and Zhaochun Ren. 2023. Is ChatGPT good at search? investigating large language models as re-ranking agents. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 14918-14937, Singapore. Association for Computational Linguistics.",
1193
+ "Lewis Tunstall, Edward Beeching, Nathan Lambert, Nazneen Rajani, Kashif Rasul, Younes Belkada, Shengyi Huang, Leandro von Werra, Clémentine Fourrier, Nathan Habib, Nathan Sarrazin, Omar Sanseviero, Alexander M. Rush, and Thomas Wolf. 2023. Zephyr: Direct distillation of lm alignment. Preprint, arXiv:2310.16944.",
1194
+ "Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. 2020. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl.",
1195
+ "Zhichao Xu. 2024. Rankmamba: Benchmarking mamba's document ranking performance in the era of transformers. Preprint, arXiv:2403.18276.",
1196
+ "Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 2025. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason. Notion Blog.",
1197
+ "Honglei Zhuang, Zhen Qin, Kai Hui, Junru Wu, Le Yan, Xuanhui Wang, and Michael Bendersky. 2024a. Beyond yes and no: Improving zero-shot LLM rankers via scoring fine-grained relevance labels. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pages 358-370, Mexico City, Mexico. Association for Computational Linguistics.",
1198
+ "Shengyao Zhuang, Bing Liu, Bevan Koopman, and Guido Zuccon. 2023. Open-source large language models are strong zero-shot query likelihood models for document ranking. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 8807-8817, Singapore. Association for Computational Linguistics.",
1199
+ "Shengyao Zhuang, Honglei Zhuang, Bevan Koopman, and Guido Zuccon. 2024b. A setwise approach for effective and highly efficient zero-shot ranking with large language models. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, page 38-47, New York, NY, USA. Association for Computing Machinery."
1200
+ ],
1201
+ "bbox": [
1202
+ 510,
1203
+ 85,
1204
+ 880,
1205
+ 920
1206
+ ],
1207
+ "page_idx": 6
1208
+ },
1209
+ {
1210
+ "type": "page_number",
1211
+ "text": "7",
1212
+ "bbox": [
1213
+ 492,
1214
+ 942,
1215
+ 504,
1216
+ 953
1217
+ ],
1218
+ "page_idx": 6
1219
+ },
1220
+ {
1221
+ "type": "table",
1222
+ "img_path": "images/19bd12ea78b936391312be3542c89a51b650df6a46d4d0aae1b61c8758274193.jpg",
1223
+ "table_caption": [],
1224
+ "table_footnote": [],
1225
+ "table_body": "<table><tr><td>Parameter</td><td>GRPO</td><td>SFT</td></tr><tr><td>learning_rate</td><td>1e-5</td><td>1e-5</td></tr><tr><td>batch_size</td><td>64</td><td>64</td></tr><tr><td>optimizer</td><td>AdamW</td><td>AdamW</td></tr><tr><td>lora_rank</td><td>16</td><td>16</td></tr><tr><td>max_prompt_length</td><td>4096</td><td>4096</td></tr><tr><td>max Completion_length</td><td>2048</td><td>na</td></tr><tr><td>group_size</td><td>8</td><td>na</td></tr></table>",
1226
+ "bbox": [
1227
+ 117,
1228
+ 80,
1229
+ 485,
1230
+ 215
1231
+ ],
1232
+ "page_idx": 7
1233
+ },
1234
+ {
1235
+ "type": "text",
1236
+ "text": "Table 3: Hyper-parameters for training.",
1237
+ "bbox": [
1238
+ 164,
1239
+ 224,
1240
+ 433,
1241
+ 239
1242
+ ],
1243
+ "page_idx": 7
1244
+ },
1245
+ {
1246
+ "type": "text",
1247
+ "text": "A Prompt for Setwise reranker",
1248
+ "text_level": 1,
1249
+ "bbox": [
1250
+ 114,
1251
+ 263,
1252
+ 400,
1253
+ 280
1254
+ ],
1255
+ "page_idx": 7
1256
+ },
1257
+ {
1258
+ "type": "text",
1259
+ "text": "SYSTEM:",
1260
+ "text_level": 1,
1261
+ "bbox": [
1262
+ 141,
1263
+ 297,
1264
+ 208,
1265
+ 309
1266
+ ],
1267
+ "page_idx": 7
1268
+ },
1269
+ {
1270
+ "type": "text",
1271
+ "text": "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant provides the user with the answer enclosed within <answer> </answer> tags, i.e., <answer> answer here </answer>.",
1272
+ "bbox": [
1273
+ 141,
1274
+ 310,
1275
+ 460,
1276
+ 369
1277
+ ],
1278
+ "page_idx": 7
1279
+ },
1280
+ {
1281
+ "type": "text",
1282
+ "text": "USER:",
1283
+ "text_level": 1,
1284
+ "bbox": [
1285
+ 142,
1286
+ 380,
1287
+ 186,
1288
+ 391
1289
+ ],
1290
+ "page_idx": 7
1291
+ },
1292
+ {
1293
+ "type": "text",
1294
+ "text": "Given the query: {\"query)\", which of the following documents is most relevant?",
1295
+ "bbox": [
1296
+ 142,
1297
+ 393,
1298
+ 460,
1299
+ 416
1300
+ ],
1301
+ "page_idx": 7
1302
+ },
1303
+ {
1304
+ "type": "list",
1305
+ "sub_type": "text",
1306
+ "list_items": [
1307
+ "[1] {document1}",
1308
+ "[2] {document2}"
1309
+ ],
1310
+ "bbox": [
1311
+ 142,
1312
+ 417,
1313
+ 245,
1314
+ 441
1315
+ ],
1316
+ "page_idx": 7
1317
+ },
1318
+ {
1319
+ "type": "text",
1320
+ "text": "[20] {document20}",
1321
+ "bbox": [
1322
+ 142,
1323
+ 449,
1324
+ 262,
1325
+ 464
1326
+ ],
1327
+ "page_idx": 7
1328
+ },
1329
+ {
1330
+ "type": "text",
1331
+ "text": "Please provide only the label of the most relevant document to the query, enclosed in square brackets, within the answer tags. For example, if the third document is the most relevant, the answer should be: <answer>[3]</answer>.",
1332
+ "bbox": [
1333
+ 141,
1334
+ 464,
1335
+ 460,
1336
+ 524
1337
+ ],
1338
+ "page_idx": 7
1339
+ },
1340
+ {
1341
+ "type": "text",
1342
+ "text": "We use the above prompt for both zero-shot and supervised fine-tuning of Setwise rerankers. The only difference from the prompt used for Rank- $R1$ is that the reasoning instructions are removed.",
1343
+ "bbox": [
1344
+ 112,
1345
+ 542,
1346
+ 487,
1347
+ 605
1348
+ ],
1349
+ "page_idx": 7
1350
+ },
1351
+ {
1352
+ "type": "text",
1353
+ "text": "B Training hyper-parameters",
1354
+ "text_level": 1,
1355
+ "bbox": [
1356
+ 114,
1357
+ 617,
1358
+ 386,
1359
+ 634
1360
+ ],
1361
+ "page_idx": 7
1362
+ },
1363
+ {
1364
+ "type": "text",
1365
+ "text": "We use the TRL (von Werra et al., 2020) library for both GPRP and SFT training. Both methods are trained with LoRA adapter (Hu et al., 2022). For SFT, we compute the cross-entropy loss only on the answer span and ignore the loss on the prompt tokens. The hyperparameters are listed in Table 3, while other parameters follow the default settings of the TRL trainer.",
1366
+ "bbox": [
1367
+ 112,
1368
+ 643,
1369
+ 489,
1370
+ 771
1371
+ ],
1372
+ "page_idx": 7
1373
+ },
1374
+ {
1375
+ "type": "page_number",
1376
+ "text": "8",
1377
+ "bbox": [
1378
+ 492,
1379
+ 942,
1380
+ 505,
1381
+ 954
1382
+ ],
1383
+ "page_idx": 7
1384
+ }
1385
+ ]
data/2025/2503_06xxx/2503.06034/e7fd26f7-fd01-4638-b446-400698cb8b0c_model.json ADDED
@@ -0,0 +1,1646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.318,
8
+ 0.061,
9
+ 0.718
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2503.06034v1 [cs.IR] 8 Mar 2025"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.123,
18
+ 0.09,
19
+ 0.877,
20
+ 0.131
21
+ ],
22
+ "angle": 0,
23
+ "content": "Rank-R1: Enhancing Reasoning in LLM-based Document Rerankers via Reinforcement Learning"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.122,
29
+ 0.151,
30
+ 0.876,
31
+ 0.185
32
+ ],
33
+ "angle": 0,
34
+ "content": "Shengyao Zhuang\\*,1, Xueguang Ma\\*,2, Bevan Koopman\\*,3, Jimmy Lin\\*, Guido Zuccon\\* 1CSIRO,"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.395,
40
+ 0.186,
41
+ 0.603,
42
+ 0.202
43
+ ],
44
+ "angle": 0,
45
+ "content": "2 University of Waterloo,"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.369,
51
+ 0.203,
52
+ 0.629,
53
+ 0.22
54
+ ],
55
+ "angle": 0,
56
+ "content": "<sup>3</sup> The University of Queensland"
57
+ },
58
+ {
59
+ "type": "title",
60
+ "bbox": [
61
+ 0.261,
62
+ 0.261,
63
+ 0.341,
64
+ 0.277
65
+ ],
66
+ "angle": 0,
67
+ "content": "Abstract"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.142,
73
+ 0.293,
74
+ 0.461,
75
+ 0.351
76
+ ],
77
+ "angle": 0,
78
+ "content": "In this paper, we introduce Rank-R1, a novel LLM-based reranker that performs reasoning over both the user query and candidate documents before performing the ranking task."
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.142,
84
+ 0.357,
85
+ 0.461,
86
+ 0.528
87
+ ],
88
+ "angle": 0,
89
+ "content": "Existing document reranking methods based on large language models (LLMs) typically rely on prompting or fine-tuning LLMs to order or label candidate documents according to their relevance to a query. For Rank-R1, we use a reinforcement learning algorithm along with only a small set of relevance labels (without any reasoning supervision) to enhance the reasoning ability of LLM-based rerankers. Our hypothesis is that adding reasoning capabilities to the rerankers can improve their relevance assessment and ranking capabilities."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.142,
95
+ 0.535,
96
+ 0.462,
97
+ 0.775
98
+ ],
99
+ "angle": 0,
100
+ "content": "Our experiments on the TREC DL and BRIGHT datasets show that Rank-R1 is highly effective, especially for complex queries. In particular, we find that Rank-R1 achieves effectiveness on in-domain datasets at par with that of supervised fine-tuning methods, but utilizing only \\(18\\%\\) of the training data used by the fine-tuning methods. We also find that the model largely outperforms zero-shot and supervised fine-tuning when applied to out-of-domain datasets featuring complex queries, especially when a 14B-size model is used. Finally, we qualitatively observe that Rank-R1's reasoning process improves the explainability of the ranking results, opening new opportunities for search engine results presentation and fruition."
101
+ },
102
+ {
103
+ "type": "title",
104
+ "bbox": [
105
+ 0.115,
106
+ 0.792,
107
+ 0.262,
108
+ 0.807
109
+ ],
110
+ "angle": 0,
111
+ "content": "1 Introduction"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.114,
117
+ 0.82,
118
+ 0.49,
119
+ 0.884
120
+ ],
121
+ "angle": 0,
122
+ "content": "Large language models (LLMs) have shown strong performance in document ranking tasks (Ma et al., 2023; Sun et al., 2023; Zhuang et al., 2023, 2024b,a; Sun et al., 2024; Xu, 2024). Zero-shot"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.508,
128
+ 0.262,
129
+ 0.885,
130
+ 0.374
131
+ ],
132
+ "angle": 0,
133
+ "content": "prompting methods such as RankGPT, rerank documents by directly prompting LLMs to generate reordered document lists (Sun et al., 2023). However, these methods rely on the assumption that the LLM follows instructions well. Moreover, being zero-shot, they do not leverage available human-annotated relevance data for further improvement."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.508,
139
+ 0.375,
140
+ 0.885,
141
+ 0.568
142
+ ],
143
+ "angle": 0,
144
+ "content": "In contrast, methods like RankLlama fine-tune LLMs using human relevance judgments, assigning scores to each query-document pair (Ma et al., 2024). While effective, these approaches do not explicitly model reasoning processes. This is mainly due to the lack of high-quality reasoning data for supervised fine-tuning. In practice, user relevance judgments often come in the form of selecting the most relevant document from a set, but achieving high-quality rankings requires strong reasoning capabilities to interpret complex relevance relationships."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.508,
150
+ 0.569,
151
+ 0.886,
152
+ 0.712
153
+ ],
154
+ "angle": 0,
155
+ "content": "Recent advancements in reinforcement learning (RL) for LLMs, such as DeepSeek-R1 (Guo et al., 2025) and Simple RL (Zeng et al., 2025), have demonstrated that reward-based training can enhance reasoning abilities, particularly in tasks like mathematical question-answering (Shao et al., 2024). These recent innovations suggest that rule-based reward scoring alone can improve an LLM's ability to reason and explain."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.508,
161
+ 0.713,
162
+ 0.886,
163
+ 0.89
164
+ ],
165
+ "angle": 0,
166
+ "content": "Building on this insight, we ask whether reinforcement learning in the style of Deepseek-R1 can improve reasoning in document reranking. Specifically, we apply Group Relative Policy Optimization (GRPO) (Shao et al., 2024) to train an LLM-based reranker. Given a user query and a list of retrieved candidate documents, the reranker generates reasoning steps before selecting the most relevant document. The reward signal is determined only by whether the model eventually identifies the most relevant document among the candidates correctly."
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.509,
172
+ 0.89,
173
+ 0.884,
174
+ 0.922
175
+ ],
176
+ "angle": 0,
177
+ "content": "We train our model, Rank-R1, on the MS MARCO passage ranking dataset and evaluate it"
178
+ },
179
+ {
180
+ "type": "page_footnote",
181
+ "bbox": [
182
+ 0.116,
183
+ 0.895,
184
+ 0.488,
185
+ 0.922
186
+ ],
187
+ "angle": 0,
188
+ "content": "*These authors contributed equally to this work. Work in progress."
189
+ },
190
+ {
191
+ "type": "page_number",
192
+ "bbox": [
193
+ 0.494,
194
+ 0.943,
195
+ 0.505,
196
+ 0.955
197
+ ],
198
+ "angle": 0,
199
+ "content": "1"
200
+ }
201
+ ],
202
+ [
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.113,
207
+ 0.085,
208
+ 0.489,
209
+ 0.132
210
+ ],
211
+ "angle": 0,
212
+ "content": "on TREC DL19 and DL20 (in-domain datasets). Our results show that RL-based training is at par with supervised fine-tuning on in-domain data."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.113,
218
+ 0.135,
219
+ 0.489,
220
+ 0.295
221
+ ],
222
+ "angle": 0,
223
+ "content": "Additionally, we evaluate Rank-R1 on the BRIGHT dataset (SU et al., 2025), which requires complex query reasoning and relevance understanding, and is out-of-domain with respect to the data used to train the model. Rank-R1, trained with retrieval reinforcement learning, outperforms both zero-shot prompting and supervised fine-tuning on this out-of-domain dataset. Notably, our 14B model surpasses the much larger (zeroshot) GPT-4 in reranking performance on the BRIGHT dataset."
224
+ },
225
+ {
226
+ "type": "title",
227
+ "bbox": [
228
+ 0.115,
229
+ 0.308,
230
+ 0.219,
231
+ 0.323
232
+ ],
233
+ "angle": 0,
234
+ "content": "2 Method"
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.113,
240
+ 0.336,
241
+ 0.489,
242
+ 0.431
243
+ ],
244
+ "angle": 0,
245
+ "content": "To train Rank-R1, we adapt the RL training framework proposed by DeepSeek (Shao et al., 2024; Guo et al., 2025) to enhance the reasoning ability of LLM-based document rerankers. In this section, we discuss the details of each component in our method."
246
+ },
247
+ {
248
+ "type": "title",
249
+ "bbox": [
250
+ 0.114,
251
+ 0.446,
252
+ 0.294,
253
+ 0.461
254
+ ],
255
+ "angle": 0,
256
+ "content": "2.1 LLM Reranking"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.113,
262
+ 0.467,
263
+ 0.489,
264
+ 0.629
265
+ ],
266
+ "angle": 0,
267
+ "content": "Our LLM reranker follows the Setwise prompting approach proposed by Zhuang et al. (2024b). This method takes a query and a set of candidate documents as input to the LLM and prompts the LLM to select the most relevant document among the candidates based on relevance to the query. Then, the heapsort algorithm is used to build a heap tree over all the candidate documents from the first-stage retriever, and the documents are reranked via the \"heapify\" operations with the Setwise prompt."
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.113,
273
+ 0.63,
274
+ 0.49,
275
+ 0.854
276
+ ],
277
+ "angle": 0,
278
+ "content": "However, the original Setwise ranking approach does not encourage the LLM to reason about the relevance between the query and the documents. Instead, it directly asks for the most relevant one. To unlock the reranker's reasoning ability, we modify the original Setwise prompt by adding a reasoning instruction, as shown in Figure 1. Specifically, we adapt the system prompt from the DeepSeek-R1-Zero method to the Setwise prompt. This modification encourages LLMs to reason first before providing an answer—in our case, predicting the label of the most relevant candidate document to the query. We refer to the Setwise method using this modified prompt as Rank-R1."
279
+ },
280
+ {
281
+ "type": "title",
282
+ "bbox": [
283
+ 0.114,
284
+ 0.868,
285
+ 0.354,
286
+ 0.884
287
+ ],
288
+ "angle": 0,
289
+ "content": "2.2 Reinforcement Learning"
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.113,
295
+ 0.89,
296
+ 0.49,
297
+ 0.922
298
+ ],
299
+ "angle": 0,
300
+ "content": "Although any modern instruction-tuned LLM, when coupled with our Rank- \\(R1\\) prompt, may ex"
301
+ },
302
+ {
303
+ "type": "title",
304
+ "bbox": [
305
+ 0.536,
306
+ 0.093,
307
+ 0.605,
308
+ 0.104
309
+ ],
310
+ "angle": 0,
311
+ "content": "SYSTEM:"
312
+ },
313
+ {
314
+ "type": "text",
315
+ "bbox": [
316
+ 0.535,
317
+ 0.105,
318
+ 0.857,
319
+ 0.2
320
+ ],
321
+ "angle": 0,
322
+ "content": "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., <think> reasoning process here </think> <answer> answer here </answer>."
323
+ },
324
+ {
325
+ "type": "title",
326
+ "bbox": [
327
+ 0.537,
328
+ 0.211,
329
+ 0.583,
330
+ 0.223
331
+ ],
332
+ "angle": 0,
333
+ "content": "USER:"
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.537,
339
+ 0.224,
340
+ 0.855,
341
+ 0.246
342
+ ],
343
+ "angle": 0,
344
+ "content": "Given the query: {\"query}\", which of the following documents is most relevant?"
345
+ },
346
+ {
347
+ "type": "text",
348
+ "bbox": [
349
+ 0.539,
350
+ 0.248,
351
+ 0.642,
352
+ 0.259
353
+ ],
354
+ "angle": 0,
355
+ "content": "[1] {document1}"
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.539,
361
+ 0.26,
362
+ 0.642,
363
+ 0.272
364
+ ],
365
+ "angle": 0,
366
+ "content": "[2] {document2}"
367
+ },
368
+ {
369
+ "type": "list",
370
+ "bbox": [
371
+ 0.539,
372
+ 0.248,
373
+ 0.642,
374
+ 0.272
375
+ ],
376
+ "angle": 0,
377
+ "content": null
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.538,
383
+ 0.278,
384
+ 0.657,
385
+ 0.295
386
+ ],
387
+ "angle": 0,
388
+ "content": "[20] {document20}"
389
+ },
390
+ {
391
+ "type": "text",
392
+ "bbox": [
393
+ 0.537,
394
+ 0.295,
395
+ 0.858,
396
+ 0.367
397
+ ],
398
+ "angle": 0,
399
+ "content": "After completing the reasoning process, please provide only the label of the most relevant document to the query, enclosed in square brackets, within the answer tags. For example, if the third document is the most relevant, the answer should be: <think> reasoning process here </think> <answer>[3]</answer>."
400
+ },
401
+ {
402
+ "type": "image_caption",
403
+ "bbox": [
404
+ 0.572,
405
+ 0.396,
406
+ 0.819,
407
+ 0.41
408
+ ],
409
+ "angle": 0,
410
+ "content": "Figure 1: Prompt used for Rank-R1."
411
+ },
412
+ {
413
+ "type": "text",
414
+ "bbox": [
415
+ 0.508,
416
+ 0.437,
417
+ 0.885,
418
+ 0.63
419
+ ],
420
+ "angle": 0,
421
+ "content": "hibit strong zeroshot reasoning ability, their reasoning process could still be suboptimal for the Setwise ranking method. This is because the LLMs might not have been fine-tuned on similar instructional data. On the other hand, gathering human-annotated reasoning data for large-scale supervised fine-tuning of Rank-R1 could be both costly and infeasible. To address these challenges, we employ the GRPO RL algorithm (Shao et al., 2024) to enhance the reasoning process of the Rank-R1 reranker. The GRPO algorithm optimizes the following objective:"
422
+ },
423
+ {
424
+ "type": "equation",
425
+ "bbox": [
426
+ 0.521,
427
+ 0.65,
428
+ 0.882,
429
+ 0.766
430
+ ],
431
+ "angle": 0,
432
+ "content": "\\[\n\\begin{array}{l} \\mathcal {J} _ {G R P O} (\\theta) = \\mathbb {E} [ q \\sim P (Q), \\left\\{o _ {i} \\right\\} _ {i = 1} ^ {| G |} \\sim \\pi_ {\\theta_ {o l d}} (O | q) ] \\\\ \\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(\\min \\left(\\frac {\\pi_ {\\theta_ {o l d}} (o _ {i} | q)}{\\pi_ {\\theta_ {o l d}} (o _ {i} | q)} A _ {i}, \\right. \\right. \\\\ \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q)}{\\pi_ {\\theta_ {o l d}} (o _ {i} | q)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i}\\bigg) \\\\ \\left. - \\beta \\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} \\mid \\mid \\pi_ {r e f}\\right)\\right) \\tag {1} \\\\ \\end{array}\n\\]"
433
+ },
434
+ {
435
+ "type": "text",
436
+ "bbox": [
437
+ 0.508,
438
+ 0.777,
439
+ 0.884,
440
+ 0.922
441
+ ],
442
+ "angle": 0,
443
+ "content": "where \\(Q\\) is the Setwise ranking prompt in the training data, \\(G\\) is a group of generated data points sampled from an old policy \\(\\pi_{\\theta_{old}}\\) (in our case, the initial LLM) given a sampled prompt, and \\(\\pi_{\\theta}\\) is the LLM ranker we are optimizing. The clip operation is used to cap the ratio of the new and old policies. In our experiment, we only use the data samples from \\(\\pi_{\\theta_{old}}\\) to update \\(\\pi_{\\theta}\\) once, meaning \\(\\pi_{\\theta_{old}} = \\pi_{\\theta}\\), so Eq 1 reduces to:"
444
+ },
445
+ {
446
+ "type": "page_number",
447
+ "bbox": [
448
+ 0.493,
449
+ 0.943,
450
+ 0.505,
451
+ 0.955
452
+ ],
453
+ "angle": 0,
454
+ "content": "2"
455
+ }
456
+ ],
457
+ [
458
+ {
459
+ "type": "equation",
460
+ "bbox": [
461
+ 0.157,
462
+ 0.113,
463
+ 0.242,
464
+ 0.125
465
+ ],
466
+ "angle": 0,
467
+ "content": "\\[\n\\mathcal {J} _ {G R P O} (\\theta) =\n\\]"
468
+ },
469
+ {
470
+ "type": "equation",
471
+ "bbox": [
472
+ 0.158,
473
+ 0.13,
474
+ 0.487,
475
+ 0.164
476
+ ],
477
+ "angle": 0,
478
+ "content": "\\[\n\\frac {1}{| G |} \\sum_ {i = 1} ^ {| G |} \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid q\\right)}{\\pi_ {\\theta_ {o l d}} \\left(o _ {i} \\mid q\\right)} A _ {i}, - \\beta \\mathbb {D} _ {K L} \\left(\\pi_ {\\theta} \\mid \\mid \\pi_ {r e f}\\right)\\right) \\tag {2}\n\\]"
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.114,
484
+ 0.175,
485
+ 0.489,
486
+ 0.237
487
+ ],
488
+ "angle": 0,
489
+ "content": "where \\(\\mathbb{D}_{KL}\\) is the KL loss that penalizes how far the new policy differs from a reference policy \\(\\pi_{ref}\\), which in our case is the original instruction-tuned LLM."
490
+ },
491
+ {
492
+ "type": "text",
493
+ "bbox": [
494
+ 0.114,
495
+ 0.24,
496
+ 0.49,
497
+ 0.303
498
+ ],
499
+ "angle": 0,
500
+ "content": "The generated data points, in our case, represent the reasoning process. We use the generated relevant document label to calculate the advantage \\( A \\) as follows:"
501
+ },
502
+ {
503
+ "type": "equation",
504
+ "bbox": [
505
+ 0.169,
506
+ 0.313,
507
+ 0.488,
508
+ 0.346
509
+ ],
510
+ "angle": 0,
511
+ "content": "\\[\nA _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)} \\tag {3}\n\\]"
512
+ },
513
+ {
514
+ "type": "text",
515
+ "bbox": [
516
+ 0.113,
517
+ 0.356,
518
+ 0.489,
519
+ 0.419
520
+ ],
521
+ "angle": 0,
522
+ "content": "where \\( r \\) is the reward given by the training data, which we describe in detail later. Overall, the GRPO training optimizes the LLM to generate tokens that maximize the rewards."
523
+ },
524
+ {
525
+ "type": "text",
526
+ "bbox": [
527
+ 0.113,
528
+ 0.42,
529
+ 0.489,
530
+ 0.581
531
+ ],
532
+ "angle": 0,
533
+ "content": "The Setwise prompts and rewards for our GRPO training come from training data. For constructing such RL training data, we use the MS-MARCO training data provided by the Tevatron IR toolkit (Gao et al., 2023), which includes training queries, human-labeled relevant documents, and BM25-retrieved top-100 documents. For each training query, we sample 19 documents from the set retrieved by BM25, along with one labeled relevant document, to form the Rank-R1 prompt."
534
+ },
535
+ {
536
+ "type": "text",
537
+ "bbox": [
538
+ 0.113,
539
+ 0.582,
540
+ 0.489,
541
+ 0.79
542
+ ],
543
+ "angle": 0,
544
+ "content": "We design the reward function for RL as follows: a reward of one is granted if and only if the LLM generations match the reasoning and answering format (i.e., the generated tokens fill in the <think> </think> <answer> </answer> spans) and the answer correctly matches the label of the ground-truth relevant document. Otherwise, a reward of zero is provided. Our straightforward rule-based reward mechanism does not impose constraints on the reasoning process but instead encourages the model to generate the correct format and answer following the reasoning. Most importantly, this RL training does not require human-annotated reasoning data."
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.113,
550
+ 0.791,
551
+ 0.489,
552
+ 0.887
553
+ ],
554
+ "angle": 0,
555
+ "content": "There are two main reasons why we selected the Setwise ranking approach as the backend of our Rank-R1: First, the nature of Setwise prompting allows us to use a simple rule-based reward function, as only the most relevant document label needs to be predicted, enabling a straightforward match with"
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.508,
561
+ 0.085,
562
+ 0.885,
563
+ 0.263
564
+ ],
565
+ "angle": 0,
566
+ "content": "the answer span. Secondly, it is convenient for us to fairly compare the RL-trained Rank-R1 with the supervise fine-tuned Setwise reranker, which simply removes the reasoning process from the prompt and is directly trained to predict the ground-truth label using the same training data. Other LLM-based ranking methods, such as Listwise prompting, are harder to fairly compare in this setting, as a ground-truth ranking is usually not available in the training data (MSMARCO only has one judged relevant document per query on average)."
567
+ },
568
+ {
569
+ "type": "title",
570
+ "bbox": [
571
+ 0.509,
572
+ 0.276,
573
+ 0.736,
574
+ 0.293
575
+ ],
576
+ "angle": 0,
577
+ "content": "3 Experimental Settings"
578
+ },
579
+ {
580
+ "type": "text",
581
+ "bbox": [
582
+ 0.508,
583
+ 0.304,
584
+ 0.885,
585
+ 0.545
586
+ ],
587
+ "angle": 0,
588
+ "content": "Datasets. We explore to dataset settings to evaluate the effectiveness of Rank-R1: an in-domain setting, where we use the TREC-DL19 and DL20 datasets (Craswell et al., 2020, 2021), and an out-of-domain setting, where we use the BRIGHT benchmark datasets (SU et al., 2025). The DL19 and DL20 are in-domain because they are based on the MSMARCO passage ranking dataset – the same dataset used in the training of our rerankers; unlike MSMARCO though they contain deep assessments for each query (around 210 assessments per query on average). The BRIGHT benchmark datasets encompass domains, including biology, code, and math, and require intensive reasoning to rank relevant documents."
589
+ },
590
+ {
591
+ "type": "text",
592
+ "bbox": [
593
+ 0.508,
594
+ 0.556,
595
+ 0.884,
596
+ 0.652
597
+ ],
598
+ "angle": 0,
599
+ "content": "Initial Retrieval. For all methods we consider in our experiments, the initial retrieval is performed using the pyserini implementation of BM25 (Lin et al., 2021). Reranking approaches are then provided the top 100 documents retrieved by BM25 to rerank."
600
+ },
601
+ {
602
+ "type": "text",
603
+ "bbox": [
604
+ 0.508,
605
+ 0.664,
606
+ 0.885,
607
+ 0.922
608
+ ],
609
+ "angle": 0,
610
+ "content": "Rank-R1 Settings. We compare two settings of Rank-R1: (i) a Zeroshot setting, where only the Setwise-based prompt, improved by the presence of the reasoning instruction is used, with no training performed, and (ii) a GRPO setting, where we employ the same prompt as in the Zeroshot setting, but we also train the model according to the GRPO RL method. For GRPO, we set \\( |G| = 8 \\), that is, for each training query we generate 8 answers with the old policy \\( \\pi_{\\theta_{old}} \\); as training dataset we use the training part of MS MARCO. For all Rank-R1 experiments, due to the limited computational resources, we only train on approximately \\( 18\\% \\) of the full MS-MARCO dataset, requiring roughly three (3B and 7B models) to five (14B model) days of training on four H100 GPUs. The details of GRPO training"
611
+ },
612
+ {
613
+ "type": "page_footnote",
614
+ "bbox": [
615
+ 0.116,
616
+ 0.895,
617
+ 0.435,
618
+ 0.921
619
+ ],
620
+ "angle": 0,
621
+ "content": "<sup>1</sup>https://huggingface.co/datasets/Tevatron/msmarco-passage"
622
+ },
623
+ {
624
+ "type": "page_number",
625
+ "bbox": [
626
+ 0.494,
627
+ 0.943,
628
+ 0.505,
629
+ 0.955
630
+ ],
631
+ "angle": 0,
632
+ "content": "3"
633
+ }
634
+ ],
635
+ [
636
+ {
637
+ "type": "text",
638
+ "bbox": [
639
+ 0.114,
640
+ 0.085,
641
+ 0.457,
642
+ 0.101
643
+ ],
644
+ "angle": 0,
645
+ "content": "hyperparameters are provided in Appendix B."
646
+ },
647
+ {
648
+ "type": "text",
649
+ "bbox": [
650
+ 0.113,
651
+ 0.111,
652
+ 0.489,
653
+ 0.255
654
+ ],
655
+ "angle": 0,
656
+ "content": "Comparison Methods. To understand the effect of the reasoning prompt and the GRPO training on the Setwise approach, we compare Rank-R1 against the original Setwise method (using prompt as listed in Appendix A), either used in a Zeroshot manner, or trained with the standard supervised fine-tuning (SFT) on MS MARCO data (400k training datapoints circa). The details of SFT training hyperparameters are provided in Appendix B."
657
+ },
658
+ {
659
+ "type": "text",
660
+ "bbox": [
661
+ 0.117,
662
+ 0.256,
663
+ 0.49,
664
+ 0.56
665
+ ],
666
+ "angle": 0,
667
+ "content": "To further contextualise the effectiveness of Rank-R1, we also consider the effectiveness of the current state-of-the-art Listwise reranker, RankZephyr (Pradeep et al., 2023), which was trained with data generated from GPT-4. We use the model checkpoint\\(^{2}\\) released by the author and run it ourselves to ensure the exact same settings. We also report the effectiveness of RankGPT, a zero-shot listwise reranked where GPT-4 is used as backbone. Due to budget constraints, we were unable to execute the experiments with RankGPT ourselves: we could only report the results of RankGPT obtained by Sun et al. (2023) for TREC DL19 and DL20, and by SU et al. (2025) for BRIGHT. Note that for the BRIGHT dataset, RankGPT was applied to a different implementation of BM25 from the one we used: the one used by RankGPT has a higher average nDCG@10 than our BM25."
668
+ },
669
+ {
670
+ "type": "text",
671
+ "bbox": [
672
+ 0.113,
673
+ 0.571,
674
+ 0.49,
675
+ 0.684
676
+ ],
677
+ "angle": 0,
678
+ "content": "Backbone LLMs. For Setwise and Rank-R1, we explore base LLMs using instruction-tuned Qwen2.5 series models (Qwen et al., 2025) ranging from 3 billion to 14 billion parameters. RankZephyr is based on the Zephyr 7B backbone (Tunstall et al., 2023); RankGPT is based on OpenAI's GPT-4 model (OpenAI et al., 2024)."
679
+ },
680
+ {
681
+ "type": "title",
682
+ "bbox": [
683
+ 0.114,
684
+ 0.697,
685
+ 0.214,
686
+ 0.71
687
+ ],
688
+ "angle": 0,
689
+ "content": "4 Results"
690
+ },
691
+ {
692
+ "type": "title",
693
+ "bbox": [
694
+ 0.114,
695
+ 0.723,
696
+ 0.347,
697
+ 0.737
698
+ ],
699
+ "angle": 0,
700
+ "content": "4.1 In-domain effectiveness"
701
+ },
702
+ {
703
+ "type": "text",
704
+ "bbox": [
705
+ 0.113,
706
+ 0.744,
707
+ 0.489,
708
+ 0.791
709
+ ],
710
+ "angle": 0,
711
+ "content": "In Table 1, we present the effectiveness of Rank-R1 variants on the TREC-DL19 and DL20 passage ranking datasets."
712
+ },
713
+ {
714
+ "type": "text",
715
+ "bbox": [
716
+ 0.113,
717
+ 0.793,
718
+ 0.489,
719
+ 0.887
720
+ ],
721
+ "angle": 0,
722
+ "content": "We start by comparing Setwise and Rank-R1 under the zero-shot setting. The results suggest that incorporating the reasoning process into the Setwise method improves zero-shot ranking effectiveness. The improvements are particularly large on the 3B size model."
723
+ },
724
+ {
725
+ "type": "table",
726
+ "bbox": [
727
+ 0.511,
728
+ 0.082,
729
+ 0.894,
730
+ 0.346
731
+ ],
732
+ "angle": 0,
733
+ "content": "<table><tr><td>Model</td><td>Training</td><td>DL19</td><td>DL20</td></tr><tr><td>BM25</td><td>zeroshot</td><td>.506</td><td>.480</td></tr><tr><td>RankZephyr-7B</td><td>GPT4-distil.</td><td>.739</td><td>.706</td></tr><tr><td>RankGPT</td><td>Zeroshot</td><td>.756</td><td>.706</td></tr><tr><td>Setwise-3B</td><td>Zeroshot</td><td>.371</td><td>.317</td></tr><tr><td>Setwise-3B</td><td>SFT</td><td>.734</td><td>.672</td></tr><tr><td>Rank-R1-3B</td><td>Zeroshot</td><td>.605</td><td>.538</td></tr><tr><td>Rank-R1-3B</td><td>GRPO</td><td>.713</td><td>.668</td></tr><tr><td>Setwise-7B</td><td>Zeroshot</td><td>.675</td><td>.636</td></tr><tr><td>Setwise-7B</td><td>SFT</td><td>.738</td><td>.692</td></tr><tr><td>Rank-R1-7B</td><td>Zeroshot</td><td>.712</td><td>.662</td></tr><tr><td>Rank-R1-7B</td><td>GRPO</td><td>.727</td><td>.685</td></tr><tr><td>Setwise-14B</td><td>Zeroshot</td><td>.677</td><td>.648</td></tr><tr><td>Setwise-14B</td><td>SFT</td><td>.729</td><td>.689</td></tr><tr><td>Rank-R1-14B</td><td>Zeroshot</td><td>.679</td><td>.652</td></tr><tr><td>Rank-R1-14B</td><td>GRPO</td><td>.714</td><td>.691</td></tr></table>"
734
+ },
735
+ {
736
+ "type": "table_caption",
737
+ "bbox": [
738
+ 0.508,
739
+ 0.355,
740
+ 0.885,
741
+ 0.397
742
+ ],
743
+ "angle": 0,
744
+ "content": "Table 1: TREC DL19 and DL20 nDCG@10 results. SFT=supervised fine tuned. GRPO trained on only \\(18\\%\\) of 400k data used to train SFT."
745
+ },
746
+ {
747
+ "type": "image",
748
+ "bbox": [
749
+ 0.516,
750
+ 0.417,
751
+ 0.891,
752
+ 0.575
753
+ ],
754
+ "angle": 0,
755
+ "content": null
756
+ },
757
+ {
758
+ "type": "image_caption",
759
+ "bbox": [
760
+ 0.508,
761
+ 0.588,
762
+ 0.883,
763
+ 0.616
764
+ ],
765
+ "angle": 0,
766
+ "content": "Figure 2: Data efficiency comparison between Setwise SFT and Rank-R1."
767
+ },
768
+ {
769
+ "type": "text",
770
+ "bbox": [
771
+ 0.508,
772
+ 0.644,
773
+ 0.885,
774
+ 0.773
775
+ ],
776
+ "angle": 0,
777
+ "content": "Next we consider the effects of training with GRPO. With GRPO training, Rank-R1 effectiveness increases, indicating that reasoning and answer generation (i.e. ranking) are enhanced by RL training. This improvement makes Rank-R1 comparable to the Setwise SFT trained on the full dataset and brings it closer to the effectiveness of the state-of-the-art RankZephyr."
778
+ },
779
+ {
780
+ "type": "title",
781
+ "bbox": [
782
+ 0.509,
783
+ 0.787,
784
+ 0.822,
785
+ 0.803
786
+ ],
787
+ "angle": 0,
788
+ "content": "4.2 Effect of quantity of training data"
789
+ },
790
+ {
791
+ "type": "text",
792
+ "bbox": [
793
+ 0.507,
794
+ 0.809,
795
+ 0.884,
796
+ 0.922
797
+ ],
798
+ "angle": 0,
799
+ "content": "The results in Table 1 for Rank-R1 trained with GRPO are obtained when using only \\(18\\%\\) of the MSMARCO training data (while SFT used all available training data). To explore whether longer training could further improve effectiveness, we continued training the 3B and 7B Rank-R1 models for an additional two days and evaluated check"
800
+ },
801
+ {
802
+ "type": "page_footnote",
803
+ "bbox": [
804
+ 0.114,
805
+ 0.895,
806
+ 0.488,
807
+ 0.92
808
+ ],
809
+ "angle": 0,
810
+ "content": "\\(^{2}\\)https://huggingface.co/castorini/rank_zephyr_7b_v1_full"
811
+ },
812
+ {
813
+ "type": "page_number",
814
+ "bbox": [
815
+ 0.494,
816
+ 0.943,
817
+ 0.505,
818
+ 0.955
819
+ ],
820
+ "angle": 0,
821
+ "content": "4"
822
+ }
823
+ ],
824
+ [
825
+ {
826
+ "type": "table",
827
+ "bbox": [
828
+ 0.12,
829
+ 0.082,
830
+ 0.878,
831
+ 0.272
832
+ ],
833
+ "angle": 0,
834
+ "content": "<table><tr><td>Model</td><td>Training</td><td>Bio.</td><td>Earth.</td><td>Econ.</td><td>Psy.</td><td>Rob.</td><td>Stack.</td><td>Sus.</td><td>Pony</td><td>Leet.</td><td>AoPS</td><td>TheoT.</td><td>TheoQ.</td><td>Avg.</td></tr><tr><td>BM25</td><td>zeroshot</td><td>.182</td><td>.279</td><td>.164</td><td>.134</td><td>.109</td><td>.163</td><td>.161</td><td>.043</td><td>.247</td><td>.065</td><td>.021</td><td>.073</td><td>.137</td></tr><tr><td>RankZephyr-7b</td><td>GPT4-distill</td><td>.219</td><td>.237</td><td>.144</td><td>.103</td><td>.076</td><td>.137</td><td>.166</td><td>.065</td><td>.247</td><td>.068</td><td>.020</td><td>.073</td><td>.130</td></tr><tr><td>RankGPT4*</td><td>Zeroshot</td><td>.338</td><td>.342</td><td>.167</td><td>.270</td><td>.223</td><td>.277</td><td>.111</td><td>.156</td><td>.034</td><td>.012</td><td>.086</td><td>.002</td><td>.170</td></tr><tr><td>Setwise-3B</td><td>Zeroshot</td><td>.143</td><td>.175</td><td>.120</td><td>.102</td><td>.077</td><td>.079</td><td>.154</td><td>.053</td><td>.154</td><td>.017</td><td>.042</td><td>.021</td><td>.095</td></tr><tr><td>Setwise-3B</td><td>SFT</td><td>.220</td><td>.188</td><td>.104</td><td>.115</td><td>.091</td><td>.058</td><td>.167</td><td>.057</td><td>.099</td><td>.040</td><td>.034</td><td>.038</td><td>.101</td></tr><tr><td>Rank-R1-3B</td><td>Zeroshot</td><td>.137</td><td>.173</td><td>.119</td><td>.152</td><td>.100</td><td>.066</td><td>.178</td><td>.037</td><td>.077</td><td>.040</td><td>.060</td><td>.025</td><td>.097</td></tr><tr><td>Rank-R1-3B</td><td>GRPO</td><td>.184</td><td>.171</td><td>.137</td><td>.169</td><td>.090</td><td>.100</td><td>.165</td><td>.047</td><td>.111</td><td>.035</td><td>.059</td><td>.032</td><td>.108</td></tr><tr><td>Setwise-7B</td><td>Zeroshot</td><td>.236</td><td>.223</td><td>.161</td><td>.171</td><td>.149</td><td>.092</td><td>.183</td><td>.063</td><td>.149</td><td>.041</td><td>.104</td><td>.056</td><td>.136</td></tr><tr><td>Setwise-7B</td><td>SFT</td><td>.287</td><td>.301</td><td>.141</td><td>.239</td><td>.189</td><td>.137</td><td>.196</td><td>.071</td><td>.207</td><td>.070</td><td>.082</td><td>.082</td><td>.167</td></tr><tr><td>Rank-R1-7B</td><td>Zeroshot</td><td>.268</td><td>.248</td><td>.179</td><td>.221</td><td>.174</td><td>.103</td><td>.211</td><td>.044</td><td>.156</td><td>.033</td><td>.104</td><td>.059</td><td>.150</td></tr><tr><td>Rank-R1-7B</td><td>GRPO</td><td>.260</td><td>.285</td><td>.172</td><td>.242</td><td>.191</td><td>.104</td><td>.242</td><td>.043</td><td>.198</td><td>.043</td><td>.109</td><td>.083</td><td>.164</td></tr><tr><td>Setwise-14B</td><td>Zeroshot</td><td>.295</td><td>.322</td><td>.205</td><td>.248</td><td>.189</td><td>.147</td><td>.236</td><td>.087</td><td>.187</td><td>.080</td><td>.093</td><td>.076</td><td>.180</td></tr><tr><td>Setwise-14B</td><td>SFT</td><td>.220</td><td>.293</td><td>.154</td><td>.230</td><td>.201</td><td>.157</td><td>.203</td><td>.062</td><td>.194</td><td>.095</td><td>.099</td><td>.097</td><td>.167</td></tr><tr><td>Rank-R1-14B</td><td>Zeroshot</td><td>.301</td><td>.366</td><td>.221</td><td>.246</td><td>.217</td><td>.154</td><td>.250</td><td>.090</td><td>.170</td><td>.091</td><td>.116</td><td>.092</td><td>.193</td></tr><tr><td>Rank-R1-14B</td><td>GRPO</td><td>.312</td><td>.385</td><td>.212</td><td>.264</td><td>.226</td><td>.189</td><td>.275</td><td>.092</td><td>.202</td><td>.097</td><td>.119</td><td>.092</td><td>.205</td></tr></table>"
835
+ },
836
+ {
837
+ "type": "table_caption",
838
+ "bbox": [
839
+ 0.114,
840
+ 0.281,
841
+ 0.883,
842
+ 0.325
843
+ ],
844
+ "angle": 0,
845
+ "content": "Table 2: BRIGHT nDCG@10 results. All methods rerank BM25 top-100 documents (First line). \\(\\ast\\): Results directly copied from the paper which uses a different BM25 ranking system (has a higher average nDCG@10 than our BM25)."
846
+ },
847
+ {
848
+ "type": "text",
849
+ "bbox": [
850
+ 0.113,
851
+ 0.35,
852
+ 0.489,
853
+ 0.414
854
+ ],
855
+ "angle": 0,
856
+ "content": "points saved during training. We report the results in Figure 2. In the figure, we also include results obtained when using SFT on incremental parts of the training data."
857
+ },
858
+ {
859
+ "type": "text",
860
+ "bbox": [
861
+ 0.113,
862
+ 0.416,
863
+ 0.49,
864
+ 0.593
865
+ ],
866
+ "angle": 0,
867
+ "content": "From the figure, we observe that Rank-R1 requires significantly less data than Setwise SFT to achieve the same level of performance at early training stage – however this data efficiency effect vanishes early on during the training phase. Passed \\(5 - 7\\%\\) of training data, in fact, the two training approaches tend to track each other. SFT has a clear advantage over GRPO in that it is by far less computationally expensive. On the other hand, GRPO adds new features to the reranker, introducing the ability to perform reasoning."
868
+ },
869
+ {
870
+ "type": "title",
871
+ "bbox": [
872
+ 0.114,
873
+ 0.607,
874
+ 0.433,
875
+ 0.638
876
+ ],
877
+ "angle": 0,
878
+ "content": "4.3 Reasoning intensive out-of-domain effectiveness"
879
+ },
880
+ {
881
+ "type": "text",
882
+ "bbox": [
883
+ 0.113,
884
+ 0.646,
885
+ 0.487,
886
+ 0.678
887
+ ],
888
+ "angle": 0,
889
+ "content": "Next we consider results from our out-of-domain experiments, reported in Table 2."
890
+ },
891
+ {
892
+ "type": "text",
893
+ "bbox": [
894
+ 0.113,
895
+ 0.68,
896
+ 0.489,
897
+ 0.791
898
+ ],
899
+ "angle": 0,
900
+ "content": "We observe that the SOTA RankZephyr reranker, which does not incorporate reasoning, does not provide better rankings than BM25 in most datasets from the BRIGHT benchmark - effectively failing at the reranking task. This suggests that the BRIGHT benchmark poses a challenge for current SOTA LLM rerankers."
901
+ },
902
+ {
903
+ "type": "text",
904
+ "bbox": [
905
+ 0.113,
906
+ 0.793,
907
+ 0.49,
908
+ 0.922
909
+ ],
910
+ "angle": 0,
911
+ "content": "On the other hand, Rank-R1 trained with GPRO outperforms or is on par with both zero-shot and Setwise SFT models in most cases. Notably, when using the 14B model, Setwise SFT effectiveness plateaued and even performed worse than its zero-shot counterparts, suggesting that the large model trained on the MSMARCO could not generalize to the BRIGHT using the standard Setwise ap"
912
+ },
913
+ {
914
+ "type": "text",
915
+ "bbox": [
916
+ 0.508,
917
+ 0.35,
918
+ 0.885,
919
+ 0.496
920
+ ],
921
+ "angle": 0,
922
+ "content": "proach. However, Rank-R1 based on the 14B model achieves the largest performance gain over Setwise SFT and even surpasses the GPT-4-based Listwise reranker baseline (RankGPT4). These results highlight that the reasoning process can help the model generalize to different domains and that strong reasoning abilities, along with larger model sizes, are crucial for LLM-based rerankers to be effective in reasoning-intensive ranking tasks."
923
+ },
924
+ {
925
+ "type": "title",
926
+ "bbox": [
927
+ 0.509,
928
+ 0.512,
929
+ 0.619,
930
+ 0.529
931
+ ],
932
+ "angle": 0,
933
+ "content": "5 Analysis"
934
+ },
935
+ {
936
+ "type": "title",
937
+ "bbox": [
938
+ 0.509,
939
+ 0.542,
940
+ 0.825,
941
+ 0.559
942
+ ],
943
+ "angle": 0,
944
+ "content": "5.1 Reward score v.s. Response length"
945
+ },
946
+ {
947
+ "type": "text",
948
+ "bbox": [
949
+ 0.508,
950
+ 0.566,
951
+ 0.885,
952
+ 0.678
953
+ ],
954
+ "angle": 0,
955
+ "content": "In Figure 3, we present the received reward values and model completion lengths logged during training for Rank-R1, across different model sizes. Rewards consistently increase throughout training, with smaller models showing a higher rate of increase, while larger models start with a higher initial reward."
956
+ },
957
+ {
958
+ "type": "text",
959
+ "bbox": [
960
+ 0.508,
961
+ 0.68,
962
+ 0.885,
963
+ 0.921
964
+ ],
965
+ "angle": 0,
966
+ "content": "Regarding completion length, larger models tend to generate longer responses; however, we do not observe a noticeable increase in length as training proceeds. This observation differs from the findings for DeepSeek-R1 (Guo et al., 2025). This may be attributed to two factors. First, we initialize RL training from an instruction-tuned model rather than a base model, meaning the instruction model already follows a reasonable reasoning process. Second, the MSMARCO passage ranking dataset is relatively simple compared to tasks like math or coding, where a longer reasoning process is more essential. Thus, extensive reasoning may not be necessary for achieving high effectiveness in this task."
967
+ },
968
+ {
969
+ "type": "page_number",
970
+ "bbox": [
971
+ 0.493,
972
+ 0.943,
973
+ 0.506,
974
+ 0.955
975
+ ],
976
+ "angle": 0,
977
+ "content": "5"
978
+ }
979
+ ],
980
+ [
981
+ {
982
+ "type": "image",
983
+ "bbox": [
984
+ 0.125,
985
+ 0.092,
986
+ 0.49,
987
+ 0.257
988
+ ],
989
+ "angle": 0,
990
+ "content": null
991
+ },
992
+ {
993
+ "type": "image",
994
+ "bbox": [
995
+ 0.125,
996
+ 0.27,
997
+ 0.49,
998
+ 0.436
999
+ ],
1000
+ "angle": 0,
1001
+ "content": null
1002
+ },
1003
+ {
1004
+ "type": "image_caption",
1005
+ "bbox": [
1006
+ 0.114,
1007
+ 0.452,
1008
+ 0.489,
1009
+ 0.482
1010
+ ],
1011
+ "angle": 0,
1012
+ "content": "Figure 3: Rewards (top) and model completion length (bottom) obtained during GRPO training."
1013
+ },
1014
+ {
1015
+ "type": "title",
1016
+ "bbox": [
1017
+ 0.115,
1018
+ 0.507,
1019
+ 0.248,
1020
+ 0.523
1021
+ ],
1022
+ "angle": 0,
1023
+ "content": "5.2 Case study"
1024
+ },
1025
+ {
1026
+ "type": "text",
1027
+ "bbox": [
1028
+ 0.113,
1029
+ 0.528,
1030
+ 0.49,
1031
+ 0.786
1032
+ ],
1033
+ "angle": 0,
1034
+ "content": "In Figure 4, we provide an example of Rank-R1's generation. We compare the outputs of the Zereshot model and the model after GPRO training. Both models successfully follow the instruction by providing a reasoning process within the <think> span and predicting a relevant document label in the correct format. However, the Zereshot model tends to merely describe what each document mentions and ultimately makes an incorrect prediction. In contrast, the GPRO-trained model focuses on the most relevant documents, compares them, and correctly selects the best one. In addition, we argue that Rank-R1's transparent reasoning process makes its predictions more explainable, which could be particularly important in domains such as medical document ranking."
1035
+ },
1036
+ {
1037
+ "type": "title",
1038
+ "bbox": [
1039
+ 0.115,
1040
+ 0.799,
1041
+ 0.248,
1042
+ 0.815
1043
+ ],
1044
+ "angle": 0,
1045
+ "content": "6 Conclusion"
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "bbox": [
1050
+ 0.114,
1051
+ 0.826,
1052
+ 0.49,
1053
+ 0.922
1054
+ ],
1055
+ "angle": 0,
1056
+ "content": "In this paper, we introduce Rank-R1, which uses training based on the GPRO reinforcement learning strategy to enhance the reasoning process of an LLM-based Setwise reranker. Our training approach does not require human-annotated reasoning data; instead, it relies only on doument rele"
1057
+ },
1058
+ {
1059
+ "type": "text",
1060
+ "bbox": [
1061
+ 0.537,
1062
+ 0.093,
1063
+ 0.724,
1064
+ 0.106
1065
+ ],
1066
+ "angle": 0,
1067
+ "content": "Query: common minerals list"
1068
+ },
1069
+ {
1070
+ "type": "title",
1071
+ "bbox": [
1072
+ 0.536,
1073
+ 0.117,
1074
+ 0.744,
1075
+ 0.129
1076
+ ],
1077
+ "angle": 0,
1078
+ "content": "Zeroshot Rank-R1 Completion:"
1079
+ },
1080
+ {
1081
+ "type": "text",
1082
+ "bbox": [
1083
+ 0.536,
1084
+ 0.129,
1085
+ 0.858,
1086
+ 0.26
1087
+ ],
1088
+ "angle": 0,
1089
+ "content": "<think>The query is \"common minerals list\". Document [3] talks about mica being a common rock-forming mineral. Document [19] mentions that chemical sedimentary rocks are formed through precipitation of minerals. However, it does not provide a comprehensive list of common minerals. Documents [1], [2], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], [18], and [20] do not mention anything related to common minerals.</think>\n<answer>[3]</answer>"
1090
+ },
1091
+ {
1092
+ "type": "title",
1093
+ "bbox": [
1094
+ 0.536,
1095
+ 0.271,
1096
+ 0.787,
1097
+ 0.283
1098
+ ],
1099
+ "angle": 0,
1100
+ "content": "GRPO-Trained Rank-R1 Completion:"
1101
+ },
1102
+ {
1103
+ "type": "text",
1104
+ "bbox": [
1105
+ 0.536,
1106
+ 0.283,
1107
+ 0.858,
1108
+ 0.378
1109
+ ],
1110
+ "angle": 0,
1111
+ "content": "<think>The query is \"common minerals list\". Document [3] talks about mica, which is a common rock-forming mineral. Document [4] also mentions common rock-forming minerals. Both documents seem relevant to the query. However, document [4] provides a list of common rock-forming minerals, which directly answers the query. </think> <answer>[4]</answer>"
1112
+ },
1113
+ {
1114
+ "type": "image_caption",
1115
+ "bbox": [
1116
+ 0.508,
1117
+ 0.408,
1118
+ 0.883,
1119
+ 0.451
1120
+ ],
1121
+ "angle": 0,
1122
+ "content": "Figure 4: Example of completion. The Rank-R1 used in this example is based on Qwen2.5-3B-Instruct. The GRPO-Trained Rank-R1 gives the correct answer."
1123
+ },
1124
+ {
1125
+ "type": "text",
1126
+ "bbox": [
1127
+ 0.508,
1128
+ 0.478,
1129
+ 0.885,
1130
+ 0.67
1131
+ ],
1132
+ "angle": 0,
1133
+ "content": "vance assessments. We find that RL training performs similarly to supervised fine tuning on indomain tasks. However, in reasoning-intensive ranking tasks, Rank-R1 based on a 14B LLM achieves considerable higher effectiveness than the SOTA non-reasoning Listwise reranker, highlighting the importance of incorporating a reasoning process in document reranking. Moreover, this reasoning capability might improve the explainability of black-box LLM-based rerankers, and offer new affordances in terms of search engine result presentation and fruition."
1134
+ },
1135
+ {
1136
+ "type": "text",
1137
+ "bbox": [
1138
+ 0.508,
1139
+ 0.672,
1140
+ 0.884,
1141
+ 0.718
1142
+ ],
1143
+ "angle": 0,
1144
+ "content": "We have made our code open-source at https://github.com/ielab/l1m-rankers/tree/main/Rank-R1."
1145
+ },
1146
+ {
1147
+ "type": "title",
1148
+ "bbox": [
1149
+ 0.511,
1150
+ 0.75,
1151
+ 0.61,
1152
+ 0.765
1153
+ ],
1154
+ "angle": 0,
1155
+ "content": "References"
1156
+ },
1157
+ {
1158
+ "type": "ref_text",
1159
+ "bbox": [
1160
+ 0.51,
1161
+ 0.774,
1162
+ 0.883,
1163
+ 0.815
1164
+ ],
1165
+ "angle": 0,
1166
+ "content": "Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the trec 2020 deep learning track. Preprint, arXiv:2102.07662."
1167
+ },
1168
+ {
1169
+ "type": "ref_text",
1170
+ "bbox": [
1171
+ 0.51,
1172
+ 0.827,
1173
+ 0.884,
1174
+ 0.88
1175
+ ],
1176
+ "angle": 0,
1177
+ "content": "Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M. Voorhees. 2020. Overview of the trec 2019 deep learning track. *Preprint*, arXiv:2003.07820."
1178
+ },
1179
+ {
1180
+ "type": "ref_text",
1181
+ "bbox": [
1182
+ 0.51,
1183
+ 0.894,
1184
+ 0.884,
1185
+ 0.921
1186
+ ],
1187
+ "angle": 0,
1188
+ "content": "Luyu Gao, Xueguang Ma, Jimmy Lin, and Jamie Callan. 2023. Tevatron: An efficient and flexible toolkit for"
1189
+ },
1190
+ {
1191
+ "type": "list",
1192
+ "bbox": [
1193
+ 0.51,
1194
+ 0.774,
1195
+ 0.884,
1196
+ 0.921
1197
+ ],
1198
+ "angle": 0,
1199
+ "content": null
1200
+ },
1201
+ {
1202
+ "type": "page_number",
1203
+ "bbox": [
1204
+ 0.494,
1205
+ 0.943,
1206
+ 0.506,
1207
+ 0.955
1208
+ ],
1209
+ "angle": 0,
1210
+ "content": "6"
1211
+ }
1212
+ ],
1213
+ [
1214
+ {
1215
+ "type": "ref_text",
1216
+ "bbox": [
1217
+ 0.135,
1218
+ 0.086,
1219
+ 0.489,
1220
+ 0.153
1221
+ ],
1222
+ "angle": 0,
1223
+ "content": "neural retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, page 3120-3124, New York, NY, USA. Association for Computing Machinery."
1224
+ },
1225
+ {
1226
+ "type": "ref_text",
1227
+ "bbox": [
1228
+ 0.117,
1229
+ 0.162,
1230
+ 0.489,
1231
+ 0.228
1232
+ ],
1233
+ "angle": 0,
1234
+ "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv:2501.12948."
1235
+ },
1236
+ {
1237
+ "type": "ref_text",
1238
+ "bbox": [
1239
+ 0.117,
1240
+ 0.238,
1241
+ 0.488,
1242
+ 0.304
1243
+ ],
1244
+ "angle": 0,
1245
+ "content": "Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2022. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations."
1246
+ },
1247
+ {
1248
+ "type": "ref_text",
1249
+ "bbox": [
1250
+ 0.117,
1251
+ 0.313,
1252
+ 0.488,
1253
+ 0.418
1254
+ ],
1255
+ "angle": 0,
1256
+ "content": "Jimmy Lin, Xueguang Ma, Sheng-Chieh Lin, Jheng-Hong Yang, Ronak Pradeep, and Rodrigo Nogueira. 2021. Pyserini: A Python toolkit for reproducible information retrieval research with sparse and dense representations. In Proceedings of the 44th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2021), pages 2356-2362."
1257
+ },
1258
+ {
1259
+ "type": "ref_text",
1260
+ "bbox": [
1261
+ 0.117,
1262
+ 0.428,
1263
+ 0.488,
1264
+ 0.52
1265
+ ],
1266
+ "angle": 0,
1267
+ "content": "Xueguang Ma, Liang Wang, Nan Yang, Furu Wei, and Jimmy Lin. 2024. Fine-tuning llama for multi-stage text retrieval. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, page 2421-2425, New York, NY, USA. Association for Computing Machinery."
1268
+ },
1269
+ {
1270
+ "type": "ref_text",
1271
+ "bbox": [
1272
+ 0.117,
1273
+ 0.529,
1274
+ 0.488,
1275
+ 0.581
1276
+ ],
1277
+ "angle": 0,
1278
+ "content": "Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023. Zero-shot listwise document reranking with a large language model. Preprint, arXiv:2305.02156."
1279
+ },
1280
+ {
1281
+ "type": "ref_text",
1282
+ "bbox": [
1283
+ 0.117,
1284
+ 0.592,
1285
+ 0.488,
1286
+ 0.617
1287
+ ],
1288
+ "angle": 0,
1289
+ "content": "OpenAI et al. 2024. Gpt-4 technical report. Preprint, arXiv:2303.08774."
1290
+ },
1291
+ {
1292
+ "type": "ref_text",
1293
+ "bbox": [
1294
+ 0.117,
1295
+ 0.628,
1296
+ 0.488,
1297
+ 0.68
1298
+ ],
1299
+ "angle": 0,
1300
+ "content": "Ronak Pradeep, Sahel Sharifymoghaddam, and Jimmy Lin. 2023. Rankzephyr: Effective and robust zero-shot listwise reranking is a breeze! Preprint, arXiv:2312.02724."
1301
+ },
1302
+ {
1303
+ "type": "ref_text",
1304
+ "bbox": [
1305
+ 0.117,
1306
+ 0.69,
1307
+ 0.488,
1308
+ 0.717
1309
+ ],
1310
+ "angle": 0,
1311
+ "content": "Qwen et al. 2025. Qwen2.5 technical report. Preprint, arXiv:2412.15115."
1312
+ },
1313
+ {
1314
+ "type": "ref_text",
1315
+ "bbox": [
1316
+ 0.117,
1317
+ 0.727,
1318
+ 0.488,
1319
+ 0.805
1320
+ ],
1321
+ "angle": 0,
1322
+ "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. Preprint, arXiv:2402.03300."
1323
+ },
1324
+ {
1325
+ "type": "ref_text",
1326
+ "bbox": [
1327
+ 0.117,
1328
+ 0.816,
1329
+ 0.488,
1330
+ 0.92
1331
+ ],
1332
+ "angle": 0,
1333
+ "content": "Hongjin SU, Howard Yen, Mengzhou Xia, Weijia Shi, Niklas Muennighoff, Han yu Wang, Liu Haisu, Quan Shi, Zachary S Siegel, Michael Tang, Ruoxi Sun, Jinsung Yoon, Sercan O Arik, Danqi Chen, and Tao Yu. 2025. BRIGHT: A realistic and challenging benchmark for reasoning-intensive retrieval. In The Thirteenth International Conference on Learning Representations."
1334
+ },
1335
+ {
1336
+ "type": "list",
1337
+ "bbox": [
1338
+ 0.117,
1339
+ 0.086,
1340
+ 0.489,
1341
+ 0.92
1342
+ ],
1343
+ "angle": 0,
1344
+ "content": null
1345
+ },
1346
+ {
1347
+ "type": "ref_text",
1348
+ "bbox": [
1349
+ 0.511,
1350
+ 0.086,
1351
+ 0.882,
1352
+ 0.138
1353
+ ],
1354
+ "angle": 0,
1355
+ "content": "Shuoqi Sun, Shengyao Zhuang, Shuai Wang, and Guido Zuccon. 2024. An investigation of prompt variations for zero-shot llm-based rankers. arXiv preprint arXiv:2406.14117."
1356
+ },
1357
+ {
1358
+ "type": "ref_text",
1359
+ "bbox": [
1360
+ 0.512,
1361
+ 0.149,
1362
+ 0.882,
1363
+ 0.255
1364
+ ],
1365
+ "angle": 0,
1366
+ "content": "Weiwei Sun, Lingyong Yan, Xinyu Ma, Shuaiqiang Wang, Pengjie Ren, Zhumin Chen, Dawei Yin, and Zhaochun Ren. 2023. Is ChatGPT good at search? investigating large language models as re-ranking agents. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 14918-14937, Singapore. Association for Computational Linguistics."
1367
+ },
1368
+ {
1369
+ "type": "ref_text",
1370
+ "bbox": [
1371
+ 0.512,
1372
+ 0.265,
1373
+ 0.882,
1374
+ 0.356
1375
+ ],
1376
+ "angle": 0,
1377
+ "content": "Lewis Tunstall, Edward Beeching, Nathan Lambert, Nazneen Rajani, Kashif Rasul, Younes Belkada, Shengyi Huang, Leandro von Werra, Clémentine Fourrier, Nathan Habib, Nathan Sarrazin, Omar Sanseviero, Alexander M. Rush, and Thomas Wolf. 2023. Zephyr: Direct distillation of lm alignment. Preprint, arXiv:2310.16944."
1378
+ },
1379
+ {
1380
+ "type": "ref_text",
1381
+ "bbox": [
1382
+ 0.512,
1383
+ 0.368,
1384
+ 0.882,
1385
+ 0.434
1386
+ ],
1387
+ "angle": 0,
1388
+ "content": "Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. 2020. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl."
1389
+ },
1390
+ {
1391
+ "type": "ref_text",
1392
+ "bbox": [
1393
+ 0.512,
1394
+ 0.444,
1395
+ 0.882,
1396
+ 0.485
1397
+ ],
1398
+ "angle": 0,
1399
+ "content": "Zhichao Xu. 2024. Rankmamba: Benchmarking mamba's document ranking performance in the era of transformers. Preprint, arXiv:2403.18276."
1400
+ },
1401
+ {
1402
+ "type": "ref_text",
1403
+ "bbox": [
1404
+ 0.512,
1405
+ 0.495,
1406
+ 0.882,
1407
+ 0.574
1408
+ ],
1409
+ "angle": 0,
1410
+ "content": "Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 2025. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason. Notion Blog."
1411
+ },
1412
+ {
1413
+ "type": "ref_text",
1414
+ "bbox": [
1415
+ 0.512,
1416
+ 0.584,
1417
+ 0.882,
1418
+ 0.703
1419
+ ],
1420
+ "angle": 0,
1421
+ "content": "Honglei Zhuang, Zhen Qin, Kai Hui, Junru Wu, Le Yan, Xuanhui Wang, and Michael Bendersky. 2024a. Beyond yes and no: Improving zero-shot LLM rankers via scoring fine-grained relevance labels. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pages 358-370, Mexico City, Mexico. Association for Computational Linguistics."
1422
+ },
1423
+ {
1424
+ "type": "ref_text",
1425
+ "bbox": [
1426
+ 0.512,
1427
+ 0.713,
1428
+ 0.882,
1429
+ 0.805
1430
+ ],
1431
+ "angle": 0,
1432
+ "content": "Shengyao Zhuang, Bing Liu, Bevan Koopman, and Guido Zuccon. 2023. Open-source large language models are strong zero-shot query likelihood models for document ranking. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 8807-8817, Singapore. Association for Computational Linguistics."
1433
+ },
1434
+ {
1435
+ "type": "ref_text",
1436
+ "bbox": [
1437
+ 0.512,
1438
+ 0.816,
1439
+ 0.882,
1440
+ 0.921
1441
+ ],
1442
+ "angle": 0,
1443
+ "content": "Shengyao Zhuang, Honglei Zhuang, Bevan Koopman, and Guido Zuccon. 2024b. A setwise approach for effective and highly efficient zero-shot ranking with large language models. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, page 38-47, New York, NY, USA. Association for Computing Machinery."
1444
+ },
1445
+ {
1446
+ "type": "list",
1447
+ "bbox": [
1448
+ 0.511,
1449
+ 0.086,
1450
+ 0.882,
1451
+ 0.921
1452
+ ],
1453
+ "angle": 0,
1454
+ "content": null
1455
+ },
1456
+ {
1457
+ "type": "page_number",
1458
+ "bbox": [
1459
+ 0.494,
1460
+ 0.943,
1461
+ 0.505,
1462
+ 0.954
1463
+ ],
1464
+ "angle": 0,
1465
+ "content": "7"
1466
+ }
1467
+ ],
1468
+ [
1469
+ {
1470
+ "type": "table",
1471
+ "bbox": [
1472
+ 0.118,
1473
+ 0.082,
1474
+ 0.486,
1475
+ 0.216
1476
+ ],
1477
+ "angle": 0,
1478
+ "content": "<table><tr><td>Parameter</td><td>GRPO</td><td>SFT</td></tr><tr><td>learning_rate</td><td>1e-5</td><td>1e-5</td></tr><tr><td>batch_size</td><td>64</td><td>64</td></tr><tr><td>optimizer</td><td>AdamW</td><td>AdamW</td></tr><tr><td>lora_rank</td><td>16</td><td>16</td></tr><tr><td>max_prompt_length</td><td>4096</td><td>4096</td></tr><tr><td>max Completion_length</td><td>2048</td><td>na</td></tr><tr><td>group_size</td><td>8</td><td>na</td></tr></table>"
1479
+ },
1480
+ {
1481
+ "type": "table_caption",
1482
+ "bbox": [
1483
+ 0.166,
1484
+ 0.225,
1485
+ 0.435,
1486
+ 0.24
1487
+ ],
1488
+ "angle": 0,
1489
+ "content": "Table 3: Hyper-parameters for training."
1490
+ },
1491
+ {
1492
+ "type": "title",
1493
+ "bbox": [
1494
+ 0.115,
1495
+ 0.264,
1496
+ 0.401,
1497
+ 0.281
1498
+ ],
1499
+ "angle": 0,
1500
+ "content": "A Prompt for Setwise reranker"
1501
+ },
1502
+ {
1503
+ "type": "title",
1504
+ "bbox": [
1505
+ 0.142,
1506
+ 0.298,
1507
+ 0.21,
1508
+ 0.31
1509
+ ],
1510
+ "angle": 0,
1511
+ "content": "SYSTEM:"
1512
+ },
1513
+ {
1514
+ "type": "text",
1515
+ "bbox": [
1516
+ 0.142,
1517
+ 0.311,
1518
+ 0.462,
1519
+ 0.37
1520
+ ],
1521
+ "angle": 0,
1522
+ "content": "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant provides the user with the answer enclosed within <answer> </answer> tags, i.e., <answer> answer here </answer>."
1523
+ },
1524
+ {
1525
+ "type": "title",
1526
+ "bbox": [
1527
+ 0.143,
1528
+ 0.381,
1529
+ 0.188,
1530
+ 0.392
1531
+ ],
1532
+ "angle": 0,
1533
+ "content": "USER:"
1534
+ },
1535
+ {
1536
+ "type": "text",
1537
+ "bbox": [
1538
+ 0.143,
1539
+ 0.394,
1540
+ 0.461,
1541
+ 0.417
1542
+ ],
1543
+ "angle": 0,
1544
+ "content": "Given the query: {\"query)\", which of the following documents is most relevant?"
1545
+ },
1546
+ {
1547
+ "type": "text",
1548
+ "bbox": [
1549
+ 0.144,
1550
+ 0.418,
1551
+ 0.246,
1552
+ 0.429
1553
+ ],
1554
+ "angle": 0,
1555
+ "content": "[1] {document1}"
1556
+ },
1557
+ {
1558
+ "type": "text",
1559
+ "bbox": [
1560
+ 0.144,
1561
+ 0.43,
1562
+ 0.246,
1563
+ 0.442
1564
+ ],
1565
+ "angle": 0,
1566
+ "content": "[2] {document2}"
1567
+ },
1568
+ {
1569
+ "type": "list",
1570
+ "bbox": [
1571
+ 0.144,
1572
+ 0.418,
1573
+ 0.246,
1574
+ 0.442
1575
+ ],
1576
+ "angle": 0,
1577
+ "content": null
1578
+ },
1579
+ {
1580
+ "type": "text",
1581
+ "bbox": [
1582
+ 0.143,
1583
+ 0.45,
1584
+ 0.263,
1585
+ 0.465
1586
+ ],
1587
+ "angle": 0,
1588
+ "content": "[20] {document20}"
1589
+ },
1590
+ {
1591
+ "type": "text",
1592
+ "bbox": [
1593
+ 0.142,
1594
+ 0.466,
1595
+ 0.462,
1596
+ 0.525
1597
+ ],
1598
+ "angle": 0,
1599
+ "content": "Please provide only the label of the most relevant document to the query, enclosed in square brackets, within the answer tags. For example, if the third document is the most relevant, the answer should be: <answer>[3]</answer>."
1600
+ },
1601
+ {
1602
+ "type": "text",
1603
+ "bbox": [
1604
+ 0.114,
1605
+ 0.543,
1606
+ 0.489,
1607
+ 0.606
1608
+ ],
1609
+ "angle": 0,
1610
+ "content": "We use the above prompt for both zero-shot and supervised fine-tuning of Setwise rerankers. The only difference from the prompt used for Rank- \\(R1\\) is that the reasoning instructions are removed."
1611
+ },
1612
+ {
1613
+ "type": "title",
1614
+ "bbox": [
1615
+ 0.115,
1616
+ 0.618,
1617
+ 0.388,
1618
+ 0.636
1619
+ ],
1620
+ "angle": 0,
1621
+ "content": "B Training hyper-parameters"
1622
+ },
1623
+ {
1624
+ "type": "text",
1625
+ "bbox": [
1626
+ 0.113,
1627
+ 0.644,
1628
+ 0.49,
1629
+ 0.772
1630
+ ],
1631
+ "angle": 0,
1632
+ "content": "We use the TRL (von Werra et al., 2020) library for both GPRP and SFT training. Both methods are trained with LoRA adapter (Hu et al., 2022). For SFT, we compute the cross-entropy loss only on the answer span and ignore the loss on the prompt tokens. The hyperparameters are listed in Table 3, while other parameters follow the default settings of the TRL trainer."
1633
+ },
1634
+ {
1635
+ "type": "page_number",
1636
+ "bbox": [
1637
+ 0.494,
1638
+ 0.943,
1639
+ 0.506,
1640
+ 0.955
1641
+ ],
1642
+ "angle": 0,
1643
+ "content": "8"
1644
+ }
1645
+ ]
1646
+ ]
data/2025/2503_06xxx/2503.06034/e7fd26f7-fd01-4638-b446-400698cb8b0c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5616f8f725597231a1a9048f4d1482dbf2fa5b9cd952cc26d27b2f4f708aaea4
3
+ size 741186
data/2025/2503_06xxx/2503.06034/full.md ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Rank-R1: Enhancing Reasoning in LLM-based Document Rerankers via Reinforcement Learning
2
+
3
+ Shengyao Zhuang\*,1, Xueguang Ma\*,2, Bevan Koopman\*,3, Jimmy Lin\*, Guido Zuccon\* 1CSIRO,
4
+
5
+ 2 University of Waterloo,
6
+
7
+ <sup>3</sup> The University of Queensland
8
+
9
+ # Abstract
10
+
11
+ In this paper, we introduce Rank-R1, a novel LLM-based reranker that performs reasoning over both the user query and candidate documents before performing the ranking task.
12
+
13
+ Existing document reranking methods based on large language models (LLMs) typically rely on prompting or fine-tuning LLMs to order or label candidate documents according to their relevance to a query. For Rank-R1, we use a reinforcement learning algorithm along with only a small set of relevance labels (without any reasoning supervision) to enhance the reasoning ability of LLM-based rerankers. Our hypothesis is that adding reasoning capabilities to the rerankers can improve their relevance assessment and ranking capabilities.
14
+
15
+ Our experiments on the TREC DL and BRIGHT datasets show that Rank-R1 is highly effective, especially for complex queries. In particular, we find that Rank-R1 achieves effectiveness on in-domain datasets at par with that of supervised fine-tuning methods, but utilizing only $18\%$ of the training data used by the fine-tuning methods. We also find that the model largely outperforms zero-shot and supervised fine-tuning when applied to out-of-domain datasets featuring complex queries, especially when a 14B-size model is used. Finally, we qualitatively observe that Rank-R1's reasoning process improves the explainability of the ranking results, opening new opportunities for search engine results presentation and fruition.
16
+
17
+ # 1 Introduction
18
+
19
+ Large language models (LLMs) have shown strong performance in document ranking tasks (Ma et al., 2023; Sun et al., 2023; Zhuang et al., 2023, 2024b,a; Sun et al., 2024; Xu, 2024). Zero-shot
20
+
21
+ prompting methods such as RankGPT, rerank documents by directly prompting LLMs to generate reordered document lists (Sun et al., 2023). However, these methods rely on the assumption that the LLM follows instructions well. Moreover, being zero-shot, they do not leverage available human-annotated relevance data for further improvement.
22
+
23
+ In contrast, methods like RankLlama fine-tune LLMs using human relevance judgments, assigning scores to each query-document pair (Ma et al., 2024). While effective, these approaches do not explicitly model reasoning processes. This is mainly due to the lack of high-quality reasoning data for supervised fine-tuning. In practice, user relevance judgments often come in the form of selecting the most relevant document from a set, but achieving high-quality rankings requires strong reasoning capabilities to interpret complex relevance relationships.
24
+
25
+ Recent advancements in reinforcement learning (RL) for LLMs, such as DeepSeek-R1 (Guo et al., 2025) and Simple RL (Zeng et al., 2025), have demonstrated that reward-based training can enhance reasoning abilities, particularly in tasks like mathematical question-answering (Shao et al., 2024). These recent innovations suggest that rule-based reward scoring alone can improve an LLM's ability to reason and explain.
26
+
27
+ Building on this insight, we ask whether reinforcement learning in the style of Deepseek-R1 can improve reasoning in document reranking. Specifically, we apply Group Relative Policy Optimization (GRPO) (Shao et al., 2024) to train an LLM-based reranker. Given a user query and a list of retrieved candidate documents, the reranker generates reasoning steps before selecting the most relevant document. The reward signal is determined only by whether the model eventually identifies the most relevant document among the candidates correctly.
28
+
29
+ We train our model, Rank-R1, on the MS MARCO passage ranking dataset and evaluate it
30
+
31
+ on TREC DL19 and DL20 (in-domain datasets). Our results show that RL-based training is at par with supervised fine-tuning on in-domain data.
32
+
33
+ Additionally, we evaluate Rank-R1 on the BRIGHT dataset (SU et al., 2025), which requires complex query reasoning and relevance understanding, and is out-of-domain with respect to the data used to train the model. Rank-R1, trained with retrieval reinforcement learning, outperforms both zero-shot prompting and supervised fine-tuning on this out-of-domain dataset. Notably, our 14B model surpasses the much larger (zeroshot) GPT-4 in reranking performance on the BRIGHT dataset.
34
+
35
+ # 2 Method
36
+
37
+ To train Rank-R1, we adapt the RL training framework proposed by DeepSeek (Shao et al., 2024; Guo et al., 2025) to enhance the reasoning ability of LLM-based document rerankers. In this section, we discuss the details of each component in our method.
38
+
39
+ # 2.1 LLM Reranking
40
+
41
+ Our LLM reranker follows the Setwise prompting approach proposed by Zhuang et al. (2024b). This method takes a query and a set of candidate documents as input to the LLM and prompts the LLM to select the most relevant document among the candidates based on relevance to the query. Then, the heapsort algorithm is used to build a heap tree over all the candidate documents from the first-stage retriever, and the documents are reranked via the "heapify" operations with the Setwise prompt.
42
+
43
+ However, the original Setwise ranking approach does not encourage the LLM to reason about the relevance between the query and the documents. Instead, it directly asks for the most relevant one. To unlock the reranker's reasoning ability, we modify the original Setwise prompt by adding a reasoning instruction, as shown in Figure 1. Specifically, we adapt the system prompt from the DeepSeek-R1-Zero method to the Setwise prompt. This modification encourages LLMs to reason first before providing an answer—in our case, predicting the label of the most relevant candidate document to the query. We refer to the Setwise method using this modified prompt as Rank-R1.
44
+
45
+ # 2.2 Reinforcement Learning
46
+
47
+ Although any modern instruction-tuned LLM, when coupled with our Rank- $R1$ prompt, may ex
48
+
49
+ # SYSTEM:
50
+
51
+ A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., <think> reasoning process here </think> <answer> answer here </answer>.
52
+
53
+ # USER:
54
+
55
+ Given the query: {"query}", which of the following documents is most relevant?
56
+
57
+ [1] {document1}
58
+ [2] {document2}
59
+
60
+ [20] {document20}
61
+
62
+ After completing the reasoning process, please provide only the label of the most relevant document to the query, enclosed in square brackets, within the answer tags. For example, if the third document is the most relevant, the answer should be: <think> reasoning process here </think> <answer>[3]</answer>.
63
+
64
+ Figure 1: Prompt used for Rank-R1.
65
+
66
+ hibit strong zeroshot reasoning ability, their reasoning process could still be suboptimal for the Setwise ranking method. This is because the LLMs might not have been fine-tuned on similar instructional data. On the other hand, gathering human-annotated reasoning data for large-scale supervised fine-tuning of Rank-R1 could be both costly and infeasible. To address these challenges, we employ the GRPO RL algorithm (Shao et al., 2024) to enhance the reasoning process of the Rank-R1 reranker. The GRPO algorithm optimizes the following objective:
67
+
68
+ $$
69
+ \begin{array}{l} \mathcal {J} _ {G R P O} (\theta) = \mathbb {E} [ q \sim P (Q), \left\{o _ {i} \right\} _ {i = 1} ^ {| G |} \sim \pi_ {\theta_ {o l d}} (O | q) ] \\ \frac {1}{| G |} \sum_ {i = 1} ^ {| G |} \left(\min \left(\frac {\pi_ {\theta_ {o l d}} (o _ {i} | q)}{\pi_ {\theta_ {o l d}} (o _ {i} | q)} A _ {i}, \right. \right. \\ \operatorname {c l i p} \left(\frac {\pi_ {\theta} (o _ {i} | q)}{\pi_ {\theta_ {o l d}} (o _ {i} | q)}, 1 - \epsilon , 1 + \epsilon\right) A _ {i}\bigg) \\ \left. - \beta \mathbb {D} _ {K L} \left(\pi_ {\theta} \mid \mid \pi_ {r e f}\right)\right) \tag {1} \\ \end{array}
70
+ $$
71
+
72
+ where $Q$ is the Setwise ranking prompt in the training data, $G$ is a group of generated data points sampled from an old policy $\pi_{\theta_{old}}$ (in our case, the initial LLM) given a sampled prompt, and $\pi_{\theta}$ is the LLM ranker we are optimizing. The clip operation is used to cap the ratio of the new and old policies. In our experiment, we only use the data samples from $\pi_{\theta_{old}}$ to update $\pi_{\theta}$ once, meaning $\pi_{\theta_{old}} = \pi_{\theta}$ , so Eq 1 reduces to:
73
+
74
+ $$
75
+ \mathcal {J} _ {G R P O} (\theta) =
76
+ $$
77
+
78
+ $$
79
+ \frac {1}{| G |} \sum_ {i = 1} ^ {| G |} \left(\frac {\pi_ {\theta} \left(o _ {i} \mid q\right)}{\pi_ {\theta_ {o l d}} \left(o _ {i} \mid q\right)} A _ {i}, - \beta \mathbb {D} _ {K L} \left(\pi_ {\theta} \mid \mid \pi_ {r e f}\right)\right) \tag {2}
80
+ $$
81
+
82
+ where $\mathbb{D}_{KL}$ is the KL loss that penalizes how far the new policy differs from a reference policy $\pi_{ref}$ , which in our case is the original instruction-tuned LLM.
83
+
84
+ The generated data points, in our case, represent the reasoning process. We use the generated relevant document label to calculate the advantage $A$ as follows:
85
+
86
+ $$
87
+ A _ {i} = \frac {r _ {i} - \operatorname {m e a n} \left(\left\{r _ {1} , r _ {2} , \cdots , r _ {G} \right\}\right)}{\operatorname {s t d} \left(\left\{r _ {1} , r _ {2} , \cdots , r _ {G} \right\}\right)} \tag {3}
88
+ $$
89
+
90
+ where $r$ is the reward given by the training data, which we describe in detail later. Overall, the GRPO training optimizes the LLM to generate tokens that maximize the rewards.
91
+
92
+ The Setwise prompts and rewards for our GRPO training come from training data. For constructing such RL training data, we use the MS-MARCO training data provided by the Tevatron IR toolkit (Gao et al., 2023), which includes training queries, human-labeled relevant documents, and BM25-retrieved top-100 documents. For each training query, we sample 19 documents from the set retrieved by BM25, along with one labeled relevant document, to form the Rank-R1 prompt.
93
+
94
+ We design the reward function for RL as follows: a reward of one is granted if and only if the LLM generations match the reasoning and answering format (i.e., the generated tokens fill in the <think> </think> <answer> </answer> spans) and the answer correctly matches the label of the ground-truth relevant document. Otherwise, a reward of zero is provided. Our straightforward rule-based reward mechanism does not impose constraints on the reasoning process but instead encourages the model to generate the correct format and answer following the reasoning. Most importantly, this RL training does not require human-annotated reasoning data.
95
+
96
+ There are two main reasons why we selected the Setwise ranking approach as the backend of our Rank-R1: First, the nature of Setwise prompting allows us to use a simple rule-based reward function, as only the most relevant document label needs to be predicted, enabling a straightforward match with
97
+
98
+ the answer span. Secondly, it is convenient for us to fairly compare the RL-trained Rank-R1 with the supervise fine-tuned Setwise reranker, which simply removes the reasoning process from the prompt and is directly trained to predict the ground-truth label using the same training data. Other LLM-based ranking methods, such as Listwise prompting, are harder to fairly compare in this setting, as a ground-truth ranking is usually not available in the training data (MSMARCO only has one judged relevant document per query on average).
99
+
100
+ # 3 Experimental Settings
101
+
102
+ Datasets. We explore to dataset settings to evaluate the effectiveness of Rank-R1: an in-domain setting, where we use the TREC-DL19 and DL20 datasets (Craswell et al., 2020, 2021), and an out-of-domain setting, where we use the BRIGHT benchmark datasets (SU et al., 2025). The DL19 and DL20 are in-domain because they are based on the MSMARCO passage ranking dataset – the same dataset used in the training of our rerankers; unlike MSMARCO though they contain deep assessments for each query (around 210 assessments per query on average). The BRIGHT benchmark datasets encompass domains, including biology, code, and math, and require intensive reasoning to rank relevant documents.
103
+
104
+ Initial Retrieval. For all methods we consider in our experiments, the initial retrieval is performed using the pyserini implementation of BM25 (Lin et al., 2021). Reranking approaches are then provided the top 100 documents retrieved by BM25 to rerank.
105
+
106
+ Rank-R1 Settings. We compare two settings of Rank-R1: (i) a Zeroshot setting, where only the Setwise-based prompt, improved by the presence of the reasoning instruction is used, with no training performed, and (ii) a GRPO setting, where we employ the same prompt as in the Zeroshot setting, but we also train the model according to the GRPO RL method. For GRPO, we set $|G| = 8$ , that is, for each training query we generate 8 answers with the old policy $\pi_{\theta_{old}}$ ; as training dataset we use the training part of MS MARCO. For all Rank-R1 experiments, due to the limited computational resources, we only train on approximately $18\%$ of the full MS-MARCO dataset, requiring roughly three (3B and 7B models) to five (14B model) days of training on four H100 GPUs. The details of GRPO training
107
+
108
+ hyperparameters are provided in Appendix B.
109
+
110
+ Comparison Methods. To understand the effect of the reasoning prompt and the GRPO training on the Setwise approach, we compare Rank-R1 against the original Setwise method (using prompt as listed in Appendix A), either used in a Zeroshot manner, or trained with the standard supervised fine-tuning (SFT) on MS MARCO data (400k training datapoints circa). The details of SFT training hyperparameters are provided in Appendix B.
111
+
112
+ To further contextualise the effectiveness of Rank-R1, we also consider the effectiveness of the current state-of-the-art Listwise reranker, RankZephyr (Pradeep et al., 2023), which was trained with data generated from GPT-4. We use the model checkpoint $^{2}$ released by the author and run it ourselves to ensure the exact same settings. We also report the effectiveness of RankGPT, a zero-shot listwise reranked where GPT-4 is used as backbone. Due to budget constraints, we were unable to execute the experiments with RankGPT ourselves: we could only report the results of RankGPT obtained by Sun et al. (2023) for TREC DL19 and DL20, and by SU et al. (2025) for BRIGHT. Note that for the BRIGHT dataset, RankGPT was applied to a different implementation of BM25 from the one we used: the one used by RankGPT has a higher average nDCG@10 than our BM25.
113
+
114
+ Backbone LLMs. For Setwise and Rank-R1, we explore base LLMs using instruction-tuned Qwen2.5 series models (Qwen et al., 2025) ranging from 3 billion to 14 billion parameters. RankZephyr is based on the Zephyr 7B backbone (Tunstall et al., 2023); RankGPT is based on OpenAI's GPT-4 model (OpenAI et al., 2024).
115
+
116
+ # 4 Results
117
+
118
+ # 4.1 In-domain effectiveness
119
+
120
+ In Table 1, we present the effectiveness of Rank-R1 variants on the TREC-DL19 and DL20 passage ranking datasets.
121
+
122
+ We start by comparing Setwise and Rank-R1 under the zero-shot setting. The results suggest that incorporating the reasoning process into the Setwise method improves zero-shot ranking effectiveness. The improvements are particularly large on the 3B size model.
123
+
124
+ <table><tr><td>Model</td><td>Training</td><td>DL19</td><td>DL20</td></tr><tr><td>BM25</td><td>zeroshot</td><td>.506</td><td>.480</td></tr><tr><td>RankZephyr-7B</td><td>GPT4-distil.</td><td>.739</td><td>.706</td></tr><tr><td>RankGPT</td><td>Zeroshot</td><td>.756</td><td>.706</td></tr><tr><td>Setwise-3B</td><td>Zeroshot</td><td>.371</td><td>.317</td></tr><tr><td>Setwise-3B</td><td>SFT</td><td>.734</td><td>.672</td></tr><tr><td>Rank-R1-3B</td><td>Zeroshot</td><td>.605</td><td>.538</td></tr><tr><td>Rank-R1-3B</td><td>GRPO</td><td>.713</td><td>.668</td></tr><tr><td>Setwise-7B</td><td>Zeroshot</td><td>.675</td><td>.636</td></tr><tr><td>Setwise-7B</td><td>SFT</td><td>.738</td><td>.692</td></tr><tr><td>Rank-R1-7B</td><td>Zeroshot</td><td>.712</td><td>.662</td></tr><tr><td>Rank-R1-7B</td><td>GRPO</td><td>.727</td><td>.685</td></tr><tr><td>Setwise-14B</td><td>Zeroshot</td><td>.677</td><td>.648</td></tr><tr><td>Setwise-14B</td><td>SFT</td><td>.729</td><td>.689</td></tr><tr><td>Rank-R1-14B</td><td>Zeroshot</td><td>.679</td><td>.652</td></tr><tr><td>Rank-R1-14B</td><td>GRPO</td><td>.714</td><td>.691</td></tr></table>
125
+
126
+ Table 1: TREC DL19 and DL20 nDCG@10 results. SFT=supervised fine tuned. GRPO trained on only $18\%$ of 400k data used to train SFT.
127
+
128
+ ![](images/c1476da0554e945ca909e8455bd43585b6e7854ec087c1ca7fadec2fbf1ae44c.jpg)
129
+ Figure 2: Data efficiency comparison between Setwise SFT and Rank-R1.
130
+
131
+ Next we consider the effects of training with GRPO. With GRPO training, Rank-R1 effectiveness increases, indicating that reasoning and answer generation (i.e. ranking) are enhanced by RL training. This improvement makes Rank-R1 comparable to the Setwise SFT trained on the full dataset and brings it closer to the effectiveness of the state-of-the-art RankZephyr.
132
+
133
+ # 4.2 Effect of quantity of training data
134
+
135
+ The results in Table 1 for Rank-R1 trained with GRPO are obtained when using only $18\%$ of the MSMARCO training data (while SFT used all available training data). To explore whether longer training could further improve effectiveness, we continued training the 3B and 7B Rank-R1 models for an additional two days and evaluated check
136
+
137
+ <table><tr><td>Model</td><td>Training</td><td>Bio.</td><td>Earth.</td><td>Econ.</td><td>Psy.</td><td>Rob.</td><td>Stack.</td><td>Sus.</td><td>Pony</td><td>Leet.</td><td>AoPS</td><td>TheoT.</td><td>TheoQ.</td><td>Avg.</td></tr><tr><td>BM25</td><td>zeroshot</td><td>.182</td><td>.279</td><td>.164</td><td>.134</td><td>.109</td><td>.163</td><td>.161</td><td>.043</td><td>.247</td><td>.065</td><td>.021</td><td>.073</td><td>.137</td></tr><tr><td>RankZephyr-7b</td><td>GPT4-distill</td><td>.219</td><td>.237</td><td>.144</td><td>.103</td><td>.076</td><td>.137</td><td>.166</td><td>.065</td><td>.247</td><td>.068</td><td>.020</td><td>.073</td><td>.130</td></tr><tr><td>RankGPT4*</td><td>Zeroshot</td><td>.338</td><td>.342</td><td>.167</td><td>.270</td><td>.223</td><td>.277</td><td>.111</td><td>.156</td><td>.034</td><td>.012</td><td>.086</td><td>.002</td><td>.170</td></tr><tr><td>Setwise-3B</td><td>Zeroshot</td><td>.143</td><td>.175</td><td>.120</td><td>.102</td><td>.077</td><td>.079</td><td>.154</td><td>.053</td><td>.154</td><td>.017</td><td>.042</td><td>.021</td><td>.095</td></tr><tr><td>Setwise-3B</td><td>SFT</td><td>.220</td><td>.188</td><td>.104</td><td>.115</td><td>.091</td><td>.058</td><td>.167</td><td>.057</td><td>.099</td><td>.040</td><td>.034</td><td>.038</td><td>.101</td></tr><tr><td>Rank-R1-3B</td><td>Zeroshot</td><td>.137</td><td>.173</td><td>.119</td><td>.152</td><td>.100</td><td>.066</td><td>.178</td><td>.037</td><td>.077</td><td>.040</td><td>.060</td><td>.025</td><td>.097</td></tr><tr><td>Rank-R1-3B</td><td>GRPO</td><td>.184</td><td>.171</td><td>.137</td><td>.169</td><td>.090</td><td>.100</td><td>.165</td><td>.047</td><td>.111</td><td>.035</td><td>.059</td><td>.032</td><td>.108</td></tr><tr><td>Setwise-7B</td><td>Zeroshot</td><td>.236</td><td>.223</td><td>.161</td><td>.171</td><td>.149</td><td>.092</td><td>.183</td><td>.063</td><td>.149</td><td>.041</td><td>.104</td><td>.056</td><td>.136</td></tr><tr><td>Setwise-7B</td><td>SFT</td><td>.287</td><td>.301</td><td>.141</td><td>.239</td><td>.189</td><td>.137</td><td>.196</td><td>.071</td><td>.207</td><td>.070</td><td>.082</td><td>.082</td><td>.167</td></tr><tr><td>Rank-R1-7B</td><td>Zeroshot</td><td>.268</td><td>.248</td><td>.179</td><td>.221</td><td>.174</td><td>.103</td><td>.211</td><td>.044</td><td>.156</td><td>.033</td><td>.104</td><td>.059</td><td>.150</td></tr><tr><td>Rank-R1-7B</td><td>GRPO</td><td>.260</td><td>.285</td><td>.172</td><td>.242</td><td>.191</td><td>.104</td><td>.242</td><td>.043</td><td>.198</td><td>.043</td><td>.109</td><td>.083</td><td>.164</td></tr><tr><td>Setwise-14B</td><td>Zeroshot</td><td>.295</td><td>.322</td><td>.205</td><td>.248</td><td>.189</td><td>.147</td><td>.236</td><td>.087</td><td>.187</td><td>.080</td><td>.093</td><td>.076</td><td>.180</td></tr><tr><td>Setwise-14B</td><td>SFT</td><td>.220</td><td>.293</td><td>.154</td><td>.230</td><td>.201</td><td>.157</td><td>.203</td><td>.062</td><td>.194</td><td>.095</td><td>.099</td><td>.097</td><td>.167</td></tr><tr><td>Rank-R1-14B</td><td>Zeroshot</td><td>.301</td><td>.366</td><td>.221</td><td>.246</td><td>.217</td><td>.154</td><td>.250</td><td>.090</td><td>.170</td><td>.091</td><td>.116</td><td>.092</td><td>.193</td></tr><tr><td>Rank-R1-14B</td><td>GRPO</td><td>.312</td><td>.385</td><td>.212</td><td>.264</td><td>.226</td><td>.189</td><td>.275</td><td>.092</td><td>.202</td><td>.097</td><td>.119</td><td>.092</td><td>.205</td></tr></table>
138
+
139
+ Table 2: BRIGHT nDCG@10 results. All methods rerank BM25 top-100 documents (First line). $\ast$ : Results directly copied from the paper which uses a different BM25 ranking system (has a higher average nDCG@10 than our BM25).
140
+
141
+ points saved during training. We report the results in Figure 2. In the figure, we also include results obtained when using SFT on incremental parts of the training data.
142
+
143
+ From the figure, we observe that Rank-R1 requires significantly less data than Setwise SFT to achieve the same level of performance at early training stage – however this data efficiency effect vanishes early on during the training phase. Passed $5 - 7\%$ of training data, in fact, the two training approaches tend to track each other. SFT has a clear advantage over GRPO in that it is by far less computationally expensive. On the other hand, GRPO adds new features to the reranker, introducing the ability to perform reasoning.
144
+
145
+ # 4.3 Reasoning intensive out-of-domain effectiveness
146
+
147
+ Next we consider results from our out-of-domain experiments, reported in Table 2.
148
+
149
+ We observe that the SOTA RankZephyr reranker, which does not incorporate reasoning, does not provide better rankings than BM25 in most datasets from the BRIGHT benchmark - effectively failing at the reranking task. This suggests that the BRIGHT benchmark poses a challenge for current SOTA LLM rerankers.
150
+
151
+ On the other hand, Rank-R1 trained with GPRO outperforms or is on par with both zero-shot and Setwise SFT models in most cases. Notably, when using the 14B model, Setwise SFT effectiveness plateaued and even performed worse than its zero-shot counterparts, suggesting that the large model trained on the MSMARCO could not generalize to the BRIGHT using the standard Setwise ap
152
+
153
+ proach. However, Rank-R1 based on the 14B model achieves the largest performance gain over Setwise SFT and even surpasses the GPT-4-based Listwise reranker baseline (RankGPT4). These results highlight that the reasoning process can help the model generalize to different domains and that strong reasoning abilities, along with larger model sizes, are crucial for LLM-based rerankers to be effective in reasoning-intensive ranking tasks.
154
+
155
+ # 5 Analysis
156
+
157
+ # 5.1 Reward score v.s. Response length
158
+
159
+ In Figure 3, we present the received reward values and model completion lengths logged during training for Rank-R1, across different model sizes. Rewards consistently increase throughout training, with smaller models showing a higher rate of increase, while larger models start with a higher initial reward.
160
+
161
+ Regarding completion length, larger models tend to generate longer responses; however, we do not observe a noticeable increase in length as training proceeds. This observation differs from the findings for DeepSeek-R1 (Guo et al., 2025). This may be attributed to two factors. First, we initialize RL training from an instruction-tuned model rather than a base model, meaning the instruction model already follows a reasonable reasoning process. Second, the MSMARCO passage ranking dataset is relatively simple compared to tasks like math or coding, where a longer reasoning process is more essential. Thus, extensive reasoning may not be necessary for achieving high effectiveness in this task.
162
+
163
+ ![](images/5526761c70a68372ea70cb8a85788dcf7940cef10806a774f6a4b4ff6cff2088.jpg)
164
+
165
+ ![](images/841c8ba914964229a06742463ad1a26b8534c8a57b907702c417e76170048961.jpg)
166
+ Figure 3: Rewards (top) and model completion length (bottom) obtained during GRPO training.
167
+ Figure 4: Example of completion. The Rank-R1 used in this example is based on Qwen2.5-3B-Instruct. The GRPO-Trained Rank-R1 gives the correct answer.
168
+
169
+ # 5.2 Case study
170
+
171
+ In Figure 4, we provide an example of Rank-R1's generation. We compare the outputs of the Zereshot model and the model after GPRO training. Both models successfully follow the instruction by providing a reasoning process within the <think> span and predicting a relevant document label in the correct format. However, the Zereshot model tends to merely describe what each document mentions and ultimately makes an incorrect prediction. In contrast, the GPRO-trained model focuses on the most relevant documents, compares them, and correctly selects the best one. In addition, we argue that Rank-R1's transparent reasoning process makes its predictions more explainable, which could be particularly important in domains such as medical document ranking.
172
+
173
+ # 6 Conclusion
174
+
175
+ In this paper, we introduce Rank-R1, which uses training based on the GPRO reinforcement learning strategy to enhance the reasoning process of an LLM-based Setwise reranker. Our training approach does not require human-annotated reasoning data; instead, it relies only on doument rele
176
+
177
+ Query: common minerals list
178
+
179
+ # Zeroshot Rank-R1 Completion:
180
+
181
+ <think>The query is "common minerals list". Document [3] talks about mica being a common rock-forming mineral. Document [19] mentions that chemical sedimentary rocks are formed through precipitation of minerals. However, it does not provide a comprehensive list of common minerals. Documents [1], [2], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], [18], and [20] do not mention anything related to common minerals.</think>
182
+ <answer>[3]</answer>
183
+
184
+ # GRPO-Trained Rank-R1 Completion:
185
+
186
+ <think>The query is "common minerals list". Document [3] talks about mica, which is a common rock-forming mineral. Document [4] also mentions common rock-forming minerals. Both documents seem relevant to the query. However, document [4] provides a list of common rock-forming minerals, which directly answers the query. </think> <answer>[4]</answer>
187
+
188
+ vance assessments. We find that RL training performs similarly to supervised fine tuning on indomain tasks. However, in reasoning-intensive ranking tasks, Rank-R1 based on a 14B LLM achieves considerable higher effectiveness than the SOTA non-reasoning Listwise reranker, highlighting the importance of incorporating a reasoning process in document reranking. Moreover, this reasoning capability might improve the explainability of black-box LLM-based rerankers, and offer new affordances in terms of search engine result presentation and fruition.
189
+
190
+ We have made our code open-source at https://github.com/ielab/l1m-rankers/tree/main/Rank-R1.
191
+
192
+ # References
193
+
194
+ Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the trec 2020 deep learning track. Preprint, arXiv:2102.07662.
195
+ Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M. Voorhees. 2020. Overview of the trec 2019 deep learning track. *Preprint*, arXiv:2003.07820.
196
+ Luyu Gao, Xueguang Ma, Jimmy Lin, and Jamie Callan. 2023. Tevatron: An efficient and flexible toolkit for
197
+
198
+ neural retrieval. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, page 3120-3124, New York, NY, USA. Association for Computing Machinery.
199
+ Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv:2501.12948.
200
+ Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2022. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations.
201
+ Jimmy Lin, Xueguang Ma, Sheng-Chieh Lin, Jheng-Hong Yang, Ronak Pradeep, and Rodrigo Nogueira. 2021. Pyserini: A Python toolkit for reproducible information retrieval research with sparse and dense representations. In Proceedings of the 44th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR 2021), pages 2356-2362.
202
+ Xueguang Ma, Liang Wang, Nan Yang, Furu Wei, and Jimmy Lin. 2024. Fine-tuning llama for multi-stage text retrieval. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, page 2421-2425, New York, NY, USA. Association for Computing Machinery.
203
+ Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023. Zero-shot listwise document reranking with a large language model. Preprint, arXiv:2305.02156.
204
+ OpenAI et al. 2024. Gpt-4 technical report. Preprint, arXiv:2303.08774.
205
+ Ronak Pradeep, Sahel Sharifymoghaddam, and Jimmy Lin. 2023. Rankzephyr: Effective and robust zero-shot listwise reranking is a breeze! Preprint, arXiv:2312.02724.
206
+ Qwen et al. 2025. Qwen2.5 technical report. Preprint, arXiv:2412.15115.
207
+ Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. Preprint, arXiv:2402.03300.
208
+ Hongjin SU, Howard Yen, Mengzhou Xia, Weijia Shi, Niklas Muennighoff, Han yu Wang, Liu Haisu, Quan Shi, Zachary S Siegel, Michael Tang, Ruoxi Sun, Jinsung Yoon, Sercan O Arik, Danqi Chen, and Tao Yu. 2025. BRIGHT: A realistic and challenging benchmark for reasoning-intensive retrieval. In The Thirteenth International Conference on Learning Representations.
209
+
210
+ Shuoqi Sun, Shengyao Zhuang, Shuai Wang, and Guido Zuccon. 2024. An investigation of prompt variations for zero-shot llm-based rankers. arXiv preprint arXiv:2406.14117.
211
+ Weiwei Sun, Lingyong Yan, Xinyu Ma, Shuaiqiang Wang, Pengjie Ren, Zhumin Chen, Dawei Yin, and Zhaochun Ren. 2023. Is ChatGPT good at search? investigating large language models as re-ranking agents. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 14918-14937, Singapore. Association for Computational Linguistics.
212
+ Lewis Tunstall, Edward Beeching, Nathan Lambert, Nazneen Rajani, Kashif Rasul, Younes Belkada, Shengyi Huang, Leandro von Werra, Clémentine Fourrier, Nathan Habib, Nathan Sarrazin, Omar Sanseviero, Alexander M. Rush, and Thomas Wolf. 2023. Zephyr: Direct distillation of lm alignment. Preprint, arXiv:2310.16944.
213
+ Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. 2020. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl.
214
+ Zhichao Xu. 2024. Rankmamba: Benchmarking mamba's document ranking performance in the era of transformers. Preprint, arXiv:2403.18276.
215
+ Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 2025. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason. Notion Blog.
216
+ Honglei Zhuang, Zhen Qin, Kai Hui, Junru Wu, Le Yan, Xuanhui Wang, and Michael Bendersky. 2024a. Beyond yes and no: Improving zero-shot LLM rankers via scoring fine-grained relevance labels. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pages 358-370, Mexico City, Mexico. Association for Computational Linguistics.
217
+ Shengyao Zhuang, Bing Liu, Bevan Koopman, and Guido Zuccon. 2023. Open-source large language models are strong zero-shot query likelihood models for document ranking. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 8807-8817, Singapore. Association for Computational Linguistics.
218
+ Shengyao Zhuang, Honglei Zhuang, Bevan Koopman, and Guido Zuccon. 2024b. A setwise approach for effective and highly efficient zero-shot ranking with large language models. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '24, page 38-47, New York, NY, USA. Association for Computing Machinery.
219
+
220
+ <table><tr><td>Parameter</td><td>GRPO</td><td>SFT</td></tr><tr><td>learning_rate</td><td>1e-5</td><td>1e-5</td></tr><tr><td>batch_size</td><td>64</td><td>64</td></tr><tr><td>optimizer</td><td>AdamW</td><td>AdamW</td></tr><tr><td>lora_rank</td><td>16</td><td>16</td></tr><tr><td>max_prompt_length</td><td>4096</td><td>4096</td></tr><tr><td>max Completion_length</td><td>2048</td><td>na</td></tr><tr><td>group_size</td><td>8</td><td>na</td></tr></table>
221
+
222
+ Table 3: Hyper-parameters for training.
223
+
224
+ # A Prompt for Setwise reranker
225
+
226
+ # SYSTEM:
227
+
228
+ A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant provides the user with the answer enclosed within <answer> </answer> tags, i.e., <answer> answer here </answer>.
229
+
230
+ # USER:
231
+
232
+ Given the query: {"query)", which of the following documents is most relevant?
233
+
234
+ [1] {document1}
235
+ [2] {document2}
236
+
237
+ [20] {document20}
238
+
239
+ Please provide only the label of the most relevant document to the query, enclosed in square brackets, within the answer tags. For example, if the third document is the most relevant, the answer should be: <answer>[3]</answer>.
240
+
241
+ We use the above prompt for both zero-shot and supervised fine-tuning of Setwise rerankers. The only difference from the prompt used for Rank- $R1$ is that the reasoning instructions are removed.
242
+
243
+ # B Training hyper-parameters
244
+
245
+ We use the TRL (von Werra et al., 2020) library for both GPRP and SFT training. Both methods are trained with LoRA adapter (Hu et al., 2022). For SFT, we compute the cross-entropy loss only on the answer span and ignore the loss on the prompt tokens. The hyperparameters are listed in Table 3, while other parameters follow the default settings of the TRL trainer.
data/2025/2503_06xxx/2503.06034/images/19bd12ea78b936391312be3542c89a51b650df6a46d4d0aae1b61c8758274193.jpg ADDED

Git LFS Details

  • SHA256: 62857e35415c6ba596c78f89e414daa7dcf85dff8133df6ccc4bd5fd59ee7306
  • Pointer size: 130 Bytes
  • Size of remote file: 32.5 kB
data/2025/2503_06xxx/2503.06034/images/201276d89336b192c93edab42d7fd1f3756ef49b8fe971ce2da2ea08cf452b45.jpg ADDED

Git LFS Details

  • SHA256: cc3aabc1d8bc2cfedcb8405c563e3edcda408447fb1621eaf791ee9ee5874eac
  • Pointer size: 129 Bytes
  • Size of remote file: 2.02 kB
data/2025/2503_06xxx/2503.06034/images/33f6b988f7f82d10a201c8e7946b04b7b4e90b5c40ca8fdca7c7d3c2952cd20d.jpg ADDED

Git LFS Details

  • SHA256: 87960f3d0726c23c98709852c105a1890e2b9c8df6f6061b1e7e2d51ed0f4eaf
  • Pointer size: 129 Bytes
  • Size of remote file: 7.93 kB
data/2025/2503_06xxx/2503.06034/images/5526761c70a68372ea70cb8a85788dcf7940cef10806a774f6a4b4ff6cff2088.jpg ADDED

Git LFS Details

  • SHA256: 8619b2031121ae74f423015fb2bdeac077f8fcc86b33a1d2b2b86b98ad4ccd0c
  • Pointer size: 130 Bytes
  • Size of remote file: 29.2 kB
data/2025/2503_06xxx/2503.06034/images/7129538c3cce7d017491c1eb6e639b82fe2d3ce74339f8151667ec7ad63d8f2f.jpg ADDED

Git LFS Details

  • SHA256: 9c2c596b7e53717db0b58e05fd188068f0b20e8e6dec4449b2c5e981835266ee
  • Pointer size: 130 Bytes
  • Size of remote file: 19.3 kB
data/2025/2503_06xxx/2503.06034/images/841c8ba914964229a06742463ad1a26b8534c8a57b907702c417e76170048961.jpg ADDED

Git LFS Details

  • SHA256: 01ed74561c1323491643a69b04d8ff050f7c6c6e1208da58fa13eca3bc8db0d4
  • Pointer size: 130 Bytes
  • Size of remote file: 34.1 kB
data/2025/2503_06xxx/2503.06034/images/8ec781e53444e1410a1f7eed8b074c0242f2a33deab5c7c2e018480cfe44d841.jpg ADDED

Git LFS Details

  • SHA256: d868a1b9b31c103588f438ead38bb94651201fd83e808a78b645591b9e88aa26
  • Pointer size: 129 Bytes
  • Size of remote file: 7.8 kB
data/2025/2503_06xxx/2503.06034/images/9ea4efa287dabb14400778b8c69544876c275e2979203a7c8a83ec4a6573c34b.jpg ADDED

Git LFS Details

  • SHA256: 888450de5dcf2b390757d88efddfbdc274f3f551cc38adebf43299255454e115
  • Pointer size: 130 Bytes
  • Size of remote file: 78.9 kB
data/2025/2503_06xxx/2503.06034/images/b61c0ad51072c6e5fb85db75333e280138e16595dfcf5a63de76802a6ebbf36d.jpg ADDED

Git LFS Details

  • SHA256: 73751cef53236873bfd40ec7ceddf8785bbd525c8e9b2e71ab0958ba57de6a78
  • Pointer size: 131 Bytes
  • Size of remote file: 138 kB
data/2025/2503_06xxx/2503.06034/images/c1476da0554e945ca909e8455bd43585b6e7854ec087c1ca7fadec2fbf1ae44c.jpg ADDED

Git LFS Details

  • SHA256: 87e783143f712c6cc01489aaa3a159c49b0c8d8ae6e290dbae20a3d67d29ce33
  • Pointer size: 130 Bytes
  • Size of remote file: 33.8 kB
data/2025/2503_06xxx/2503.06034/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_06xxx/2503.06072/ab21925c-33f1-43f4-a470-379226ecf3cb_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_06xxx/2503.06072/ab21925c-33f1-43f4-a470-379226ecf3cb_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_06xxx/2503.06072/ab21925c-33f1-43f4-a470-379226ecf3cb_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6895124ced226c583de73eb353c65083711d9cc4d6535b1054ebb2167abdb96f
3
+ size 4259637
data/2025/2503_06xxx/2503.06072/full.md ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2503_06xxx/2503.06072/images/0189318ba1f014900b935a12a8a899913bb183a223da671c7ff7c8182bd98b07.jpg ADDED

Git LFS Details

  • SHA256: 65835afab275d47f23eed3824dd5640d715ad46593151a4e60699fdd191a5df7
  • Pointer size: 130 Bytes
  • Size of remote file: 70.4 kB
data/2025/2503_06xxx/2503.06072/images/0203984daef82ac56f8e726f63158f0e8ae803834f3c08bef25d8ea99306b545.jpg ADDED

Git LFS Details

  • SHA256: 323dc8cd39b68c9d4ee176b6f5394aeb4ad3409b3abef05916c0946c2432b11a
  • Pointer size: 129 Bytes
  • Size of remote file: 7.44 kB
data/2025/2503_06xxx/2503.06072/images/08d7291a0233cbff66fedcc73c37117b8964ed7b6c23469843605665e99020be.jpg ADDED

Git LFS Details

  • SHA256: 13d5855e4dcca9ab18fdf28b86f46a12deb2319251d6014510a571650b99ffef
  • Pointer size: 129 Bytes
  • Size of remote file: 7.7 kB
data/2025/2503_06xxx/2503.06072/images/0c8fe2f79dc4a652c0633aa5a01560234ebbba1b0a05dc7717028cfc46d2171c.jpg ADDED

Git LFS Details

  • SHA256: d259d4ba095ca16e778a8349cc6ecdb63940294ee621e1606496efb300269b1b
  • Pointer size: 130 Bytes
  • Size of remote file: 24 kB
data/2025/2503_06xxx/2503.06072/images/0e0ea3161ef3fa6246f85462de2e851c6834b67ef42eaf4eda6b0e93ee38020b.jpg ADDED

Git LFS Details

  • SHA256: c3296934f726e11b4c3660c8c37873b93e20470081306184a50f743c6b33430e
  • Pointer size: 131 Bytes
  • Size of remote file: 159 kB
data/2025/2503_06xxx/2503.06072/images/0e1b8367982f98fb83896a36de0896732e87edf014aceff48385292f06f3f2c6.jpg ADDED

Git LFS Details

  • SHA256: 4efc182e6255eb6b3d60c3e6a207033eee3349b66a353a83b92f2afc4ac60f5b
  • Pointer size: 129 Bytes
  • Size of remote file: 4.3 kB
data/2025/2503_06xxx/2503.06072/images/10a4b3c0c7e978ebb888e89286bc4d170fab570434c1cd29338a15ada828e765.jpg ADDED

Git LFS Details

  • SHA256: a7269eece210943d5adad5fb48d9c11608662ad2b8dc7011180be464b2eb235b
  • Pointer size: 129 Bytes
  • Size of remote file: 6.81 kB
data/2025/2503_06xxx/2503.06072/images/1307a3e1a3f64b78100b2aa1f683beae0d4ead4d1415f8fc8875bf98f75a4b60.jpg ADDED

Git LFS Details

  • SHA256: 5f9bf60c8b0210be8c645e80f051a10159a92a59c8e6e038614bdf5fa0d71903
  • Pointer size: 129 Bytes
  • Size of remote file: 5.19 kB
data/2025/2503_06xxx/2503.06072/images/15fd582f6e71b50f45d63f0917378c25b08d547dc161b8ce4ecd34d00f1a4994.jpg ADDED

Git LFS Details

  • SHA256: 94e59aaba7c411494b5a84401888b36280bdef4d2938bccb6594c13655c4b6c5
  • Pointer size: 130 Bytes
  • Size of remote file: 10.5 kB
data/2025/2503_06xxx/2503.06072/images/17c4fdfd8a06ef096d0d51f9701f9caa167a2d979a4d240fbdc26f1d6da84e92.jpg ADDED

Git LFS Details

  • SHA256: 5a164b727c6258541d93091b8a7c940c2379d9c1d31ed53b4c8acd985ffacecb
  • Pointer size: 130 Bytes
  • Size of remote file: 12.5 kB
data/2025/2503_06xxx/2503.06072/images/182497f6d38bc1c4e889271e45996f2cf73b13809a7a085e35d48d14dd4f5ac7.jpg ADDED

Git LFS Details

  • SHA256: 6bedce935975de18c01262d242248d81564aa104199c78b02c3a66a5b08bb9ca
  • Pointer size: 129 Bytes
  • Size of remote file: 4.53 kB
data/2025/2503_06xxx/2503.06072/images/1bf98390f938c7a7467a23dafc76ae0d83d73cc263774a37047f8e6a67780fc3.jpg ADDED

Git LFS Details

  • SHA256: 093ace9ad53fd4c071a747d3daf2b8cc4ecbcf69d96395524c6eebf74cef2a20
  • Pointer size: 131 Bytes
  • Size of remote file: 209 kB
data/2025/2503_06xxx/2503.06072/images/1f580d3caa54a8c1ad14eeeb0b9524ea6a25be9c120f9ea3be6868f2c589dddb.jpg ADDED

Git LFS Details

  • SHA256: 2b4647c24ad793efba695db46c8494a8fbe9878c309445c01ce0b05122d55d2a
  • Pointer size: 130 Bytes
  • Size of remote file: 57.2 kB
data/2025/2503_06xxx/2503.06072/images/2019b1dd85bc1c3e4845446e15902307a368b391de1c78ed4e83e4e89106ee70.jpg ADDED

Git LFS Details

  • SHA256: a073ce757ef2a8a05bddd5d99168726e588dd99387fb6209a90be8323938148f
  • Pointer size: 130 Bytes
  • Size of remote file: 12.6 kB
data/2025/2503_06xxx/2503.06072/images/2173ae04cebe2c3863c2c0cf9123d98bf615ef6051e6c4191bd55e5ce8a0efc3.jpg ADDED

Git LFS Details

  • SHA256: 15f7a3b4b772e9a932e3e9529ca1a0e2cb55bc790589659ca05cb28b2e30f697
  • Pointer size: 130 Bytes
  • Size of remote file: 20.6 kB
data/2025/2503_06xxx/2503.06072/images/223eb80a4314fa372e3c47c429314ba7ee1f9d9762237046ce7a5de74ef3485f.jpg ADDED

Git LFS Details

  • SHA256: 3458afb72b98c40bdf88e5b81bf9dbdfdc0cffd89ae500dfa29d4df97f2ffe32
  • Pointer size: 129 Bytes
  • Size of remote file: 6.59 kB
data/2025/2503_06xxx/2503.06072/images/27643558b74f8b2d7609f8b89f64b85023d6cd4592e7cce758abdb14c4ea0e83.jpg ADDED

Git LFS Details

  • SHA256: f300d67a0e074f4de14d0ab635783a55f5b3c01a666ef5c5c1cc3cc3a4c26ebb
  • Pointer size: 130 Bytes
  • Size of remote file: 48.9 kB
data/2025/2503_06xxx/2503.06072/images/27c46c383e853427ef82465e2281e439bf21aca6f673dfc01e3d6f5784f23286.jpg ADDED

Git LFS Details

  • SHA256: 01a8de49c955334e37950907d971517526a0b3c9146527bc32e981fbe963a892
  • Pointer size: 130 Bytes
  • Size of remote file: 11.8 kB
data/2025/2503_06xxx/2503.06072/images/35c7ba3fad913e20399bd9397d748298f1fcbcf0d604361c3137a8dc43a77ffc.jpg ADDED

Git LFS Details

  • SHA256: 86fc1ffdd771511d2c6811328e79ad895855a74ad767b04b51e31a495fd510b3
  • Pointer size: 131 Bytes
  • Size of remote file: 132 kB
data/2025/2503_06xxx/2503.06072/images/374ef5377b9dbc5e164405b18882c5dbe9bca35a19bc2d9fe189f202361f23f1.jpg ADDED

Git LFS Details

  • SHA256: b78fda39b2924482e18082aae7d6e4f8d38490e927c7722a8dc4f75056c5c11e
  • Pointer size: 131 Bytes
  • Size of remote file: 179 kB
data/2025/2503_06xxx/2503.06072/images/37e469b3c82780ef32d32ff106bd4f045eea6b9ff314edea6cb99e1afb1bdb45.jpg ADDED

Git LFS Details

  • SHA256: 5a5ba0cb307318bc117d2e2300257f19523cfb7ad6f5e06332bdede16ddfc184
  • Pointer size: 130 Bytes
  • Size of remote file: 22.9 kB
data/2025/2503_06xxx/2503.06072/images/3cd34579598d7287fbdf429f2b1bbf106b7fdf1d2083e7b62c81397797f94baf.jpg ADDED

Git LFS Details

  • SHA256: 160197acccf25d2f40e1d6f969ba232f242ceb7d621d092564ac09ab139ab734
  • Pointer size: 129 Bytes
  • Size of remote file: 9.64 kB
data/2025/2503_06xxx/2503.06072/images/424cd433d41e56ffc3a768b9a071e34032aaee6c4e7f2914229f2ea9be0fd921.jpg ADDED

Git LFS Details

  • SHA256: a5f773c58e13ba9099fa5d23988041f20bd96d100e0751dbdf32f3baae4ceda9
  • Pointer size: 129 Bytes
  • Size of remote file: 9.75 kB
data/2025/2503_06xxx/2503.06072/images/425d11a682cb1e770ff86eee388c25a4e602a1f0334ec8e07b9639b3f75048d3.jpg ADDED

Git LFS Details

  • SHA256: 93483da2c8891dd0a7ddd5287ce58244bd79c1008d064fec5132fbaca7f82dbe
  • Pointer size: 130 Bytes
  • Size of remote file: 14.1 kB
data/2025/2503_06xxx/2503.06072/images/44e7d69795c791fd1119416a2ee726d19789ea60d167a36338a21b7a80552f1c.jpg ADDED

Git LFS Details

  • SHA256: 9b7015ca5dea49856ce054b6d19501aff85b3695c6dca2b2d63110600a38b26b
  • Pointer size: 130 Bytes
  • Size of remote file: 22.8 kB
data/2025/2503_06xxx/2503.06072/images/4505beccc08a752eb0429b0d8551eb748ab3429b6a65a8d188898611bfe59041.jpg ADDED

Git LFS Details

  • SHA256: 3ecc30ccac67e3f93a0efe3b72735b649adba15ac705ded6a51d57e100edc72d
  • Pointer size: 129 Bytes
  • Size of remote file: 5.48 kB
data/2025/2503_06xxx/2503.06072/images/49f52cbadc78923e815b72e1986b3bd486d400ab3358db927b07ad349721325b.jpg ADDED

Git LFS Details

  • SHA256: 900a176ce3abd464bfb8666cfa05338bedcdab0ae96f45a2873e94e3faa8a724
  • Pointer size: 129 Bytes
  • Size of remote file: 5.6 kB
data/2025/2503_06xxx/2503.06072/images/4e5adc43cc428a4495ef911b14c8d6fbb5da3c24fb516dca29ffabf37a12c1ba.jpg ADDED

Git LFS Details

  • SHA256: b3b41de1a3f3f6d83d79c853983c592616257d7232ccdb69039437c08acf46c7
  • Pointer size: 131 Bytes
  • Size of remote file: 230 kB
data/2025/2503_06xxx/2503.06072/images/4f5590c99bea44f972908c6d46f573881e9049485d1de1a876f23547c228e2cd.jpg ADDED

Git LFS Details

  • SHA256: 879697bcfe45b4f64db1970e91ca72cc4217e78c9e3f82a7aab00c7fb620e652
  • Pointer size: 130 Bytes
  • Size of remote file: 53.6 kB
data/2025/2503_06xxx/2503.06072/images/50940339ea4cfe17073ed58d9e475f9e60810e2637fcfd510950681c9ef92d60.jpg ADDED

Git LFS Details

  • SHA256: 0e0e0a94313db9c460d127ea9c5814d4793d73ffe889393189badc24235f0860
  • Pointer size: 129 Bytes
  • Size of remote file: 4.74 kB