Chelsea707 commited on
Commit
991a313
·
verified ·
1 Parent(s): e055133

MinerU Batch 8f46ae6c-07be-4c29-a893-b2f0e7aaabad (Part 1/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_content_list.json +0 -0
  3. data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_model.json +0 -0
  4. data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_origin.pdf +3 -0
  5. data/2025/2504_06xxx/2504.06514/full.md +525 -0
  6. data/2025/2504_06xxx/2504.06514/images/21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg +3 -0
  7. data/2025/2504_06xxx/2504.06514/images/21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg +3 -0
  8. data/2025/2504_06xxx/2504.06514/images/2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg +3 -0
  9. data/2025/2504_06xxx/2504.06514/images/286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg +3 -0
  10. data/2025/2504_06xxx/2504.06514/images/4c8dc6f4d9833d3fce23421c24b1b26bab79aa993e32376b4415fd95a18165cd.jpg +3 -0
  11. data/2025/2504_06xxx/2504.06514/images/4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg +3 -0
  12. data/2025/2504_06xxx/2504.06514/images/51c9a424f50294ee98114b60767685513a66cab43692e9df1b0ee1cb5f9e1cfa.jpg +3 -0
  13. data/2025/2504_06xxx/2504.06514/images/55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg +3 -0
  14. data/2025/2504_06xxx/2504.06514/images/796d4f8cbc0422b1cedf1c7ed35fab8608e8e67607cb9103181f709b8ed74a62.jpg +3 -0
  15. data/2025/2504_06xxx/2504.06514/images/83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg +3 -0
  16. data/2025/2504_06xxx/2504.06514/images/b0eb5bd600feb6c67c46af1fd396f708d068f29cdc078da243ad8fd54b7c58d6.jpg +3 -0
  17. data/2025/2504_06xxx/2504.06514/images/c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg +3 -0
  18. data/2025/2504_06xxx/2504.06514/images/c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg +3 -0
  19. data/2025/2504_06xxx/2504.06514/images/ca23867f2a2badba334cfe6d466f5086f7a82b711c906c7a137af0a8fbd5004a.jpg +3 -0
  20. data/2025/2504_06xxx/2504.06514/images/e125f721d46e8e08999101331132b083a61090681b41dae80678cdac1c749666.jpg +3 -0
  21. data/2025/2504_06xxx/2504.06514/images/f9c2d6eedd30938033417f18a2a7b5d37772df01e377e0ab20043c31a8b9b5f1.jpg +3 -0
  22. data/2025/2504_06xxx/2504.06514/layout.json +0 -0
  23. data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_content_list.json +1180 -0
  24. data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_model.json +1541 -0
  25. data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_origin.pdf +3 -0
  26. data/2025/2504_06xxx/2504.06614/full.md +240 -0
  27. data/2025/2504_06xxx/2504.06614/images/1eaf2908e2b5e1be5916fea854d4db8f8c6eed97d6b2241d3eb2755794a30451.jpg +3 -0
  28. data/2025/2504_06xxx/2504.06614/images/48afdf90edf72752714f78e53226a211faa8aea67c7d6ed209b1f4cbb1fcf1c6.jpg +3 -0
  29. data/2025/2504_06xxx/2504.06614/images/4ab02666500b97395a6c8de3ce496b691bb00849e2af93ec40212bc1d38518a3.jpg +3 -0
  30. data/2025/2504_06xxx/2504.06614/images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg +3 -0
  31. data/2025/2504_06xxx/2504.06614/images/82e7fce4ff67186806eecc16a65a5aafad2abe658c41fa7061b4b2c82185027f.jpg +3 -0
  32. data/2025/2504_06xxx/2504.06614/images/c356888d6eb6b4b08030830f723c247e588c021a4bfa2535dc850f753e59f561.jpg +3 -0
  33. data/2025/2504_06xxx/2504.06614/images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg +3 -0
  34. data/2025/2504_06xxx/2504.06614/layout.json +0 -0
  35. data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_content_list.json +0 -0
  36. data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_model.json +0 -0
  37. data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_origin.pdf +3 -0
  38. data/2025/2504_06xxx/2504.06821/full.md +442 -0
  39. data/2025/2504_06xxx/2504.06821/images/1148f878af9686e2ea67d104ffeb29c5fefb949586578f2979b3fc083583026f.jpg +3 -0
  40. data/2025/2504_06xxx/2504.06821/images/1aaa066e75e3a385f9f9d19b3acdea81acd51a20e825389f3c2d58675f3d0989.jpg +3 -0
  41. data/2025/2504_06xxx/2504.06821/images/1ff0af4eb9d2ef32997f69eebd000de698114a0aa8fdfd862945d1007cd5f3ec.jpg +3 -0
  42. data/2025/2504_06xxx/2504.06821/images/32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg +3 -0
  43. data/2025/2504_06xxx/2504.06821/images/32cefbcbef9f4c017763f3d35ba213c0ab16333e67dde8da90f90a5625d1eeb9.jpg +3 -0
  44. data/2025/2504_06xxx/2504.06821/images/32d2941bc49fc2c302ec4d75a44393a68d9505c660e98d27b329443e04fd912a.jpg +3 -0
  45. data/2025/2504_06xxx/2504.06821/images/3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg +3 -0
  46. data/2025/2504_06xxx/2504.06821/images/495de9455d437d5fb7075c1e7ad702e4817af15dbe99fd41fe407351a3037267.jpg +3 -0
  47. data/2025/2504_06xxx/2504.06821/images/4f88725b8dc9876aae1dde5be6e27efcb666ca61c19b6ac75e2549b3303cabd2.jpg +3 -0
  48. data/2025/2504_06xxx/2504.06821/images/52294272a8d6f283c35d590bbc871337b4ac1a2662ec382ff8567094c2e77506.jpg +3 -0
  49. data/2025/2504_06xxx/2504.06821/images/744d7a242930e30176d7727b1c46da76ad345db665e2c589a8713ff65ca7e1c6.jpg +3 -0
  50. data/2025/2504_06xxx/2504.06821/images/802ab4997fb966f03cc6360d294c6427efd24e884407d8c1be5c83e3522e3a9a.jpg +3 -0
.gitattributes CHANGED
@@ -1268,3 +1268,11 @@ data/2025/2504_07xxx/2504.07199/82f4c002-5d7c-4071-8bfe-356ecda59aa3_origin.pdf
1268
  data/2025/2504_07xxx/2504.07419/453a4811-54b1-45ea-aec3-1df3f90fc05f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1269
  data/2025/2504_07xxx/2504.07491/dd063eed-9f04-4854-99e3-2a9fe4445e8f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1270
  data/2025/2504_07xxx/2504.07615/c647032a-0691-45f0-914a-184f71e81f9c_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1268
  data/2025/2504_07xxx/2504.07419/453a4811-54b1-45ea-aec3-1df3f90fc05f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1269
  data/2025/2504_07xxx/2504.07491/dd063eed-9f04-4854-99e3-2a9fe4445e8f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1270
  data/2025/2504_07xxx/2504.07615/c647032a-0691-45f0-914a-184f71e81f9c_origin.pdf filter=lfs diff=lfs merge=lfs -text
1271
+ data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_origin.pdf filter=lfs diff=lfs merge=lfs -text
1272
+ data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_origin.pdf filter=lfs diff=lfs merge=lfs -text
1273
+ data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_origin.pdf filter=lfs diff=lfs merge=lfs -text
1274
+ data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_origin.pdf filter=lfs diff=lfs merge=lfs -text
1275
+ data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_origin.pdf filter=lfs diff=lfs merge=lfs -text
1276
+ data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_origin.pdf filter=lfs diff=lfs merge=lfs -text
1277
+ data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1278
+ data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eced9326d14a58a2ec14d34142b322eda122eb74eb5241d4293131e3f53e7eb0
3
+ size 1456325
data/2025/2504_06xxx/2504.06514/full.md ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill?
2
+
3
+ Chenrui Fan $^{1*}$ , Ming Li $^{1*}$ , Lichao Sun $^{2}$ , Tianyi Zhou $^{1}$
4
+
5
+ <sup>1</sup>University of Maryland; <sup>2</sup>Lehigh University
6
+
7
+ {cfan42, minglii, tianyi}@umd.edu
8
+
9
+ Project: https://github.com/tianyi-lab/MiP-Overthinking
10
+
11
+ "The Answer to the Great Question... Of Life, the Universe and Everything... is... Forty-two," said Deep Thought, with infinite majesty and calm.
12
+
13
+ The Hitchhiker's Guide to the Galaxy
14
+
15
+ # Abstract
16
+
17
+ We find that the response length of reasoning LLMs, whether trained by reinforcement learning or supervised learning, drastically increases for ill-posed questions with missing premises (MiP), ending up with redundant and ineffective thinking. This newly introduced scenario exacerbates the general overthinking issue to a large extent, which we name as the MiP-Overthinking. Such failures are against the "test-time scaling law" but have been widely observed on multiple datasets we curated with MiP, indicating the harm of cheap overthinking and a lack of critical thinking. Surprisingly, LLMs not specifically trained for reasoning exhibit much better performance on the MiP scenario, producing much shorter responses that quickly identify ill-posed queries. This implies a critical flaw of the current training recipe for reasoning LLMs, which does not encourage efficient thinking adequately, leading to the abuse of thinking patterns. To further investigate the reasons behind such failures, we conduct fine-grained analyses of the reasoning length, overthinking patterns, and location of critical thinking on different types of LLMs. Moreover, our extended ablation study reveals that the overthinking is contagious through the distillation of reasoning models' responses. These results improve the understanding of overthinking and shed novel insights into mitigating the problem.
18
+
19
+ # 1 Introduction
20
+
21
+ Reasoning abilities in large language models (LLMs) have become a cornerstone of advanced AI applications (Huang & Chang, 2023; Li et al., 2024; Ahn et al., 2024; Wang et al., 2025), powering breakthroughs in mathematical reasoning (Xiong et al., 2025; Xia et al., 2025), code generation (Liu et al., 2024), and commonsense question answering (Wang & Zhao, 2023). These gains often stem from the scaling law of model/dataset sizes (Kaplan et al., 2020) in both pre-training (Shao et al., 2024) and post-training, which unlock emergent capabilities such as step-by-step reasoning and reflection skills witnessed on OpenAI's GPT-o1 (OpenAI, 2024b) and the open-source DeepSeek-R1 (DeepSeek-AI et al., 2025). By leveraging supervised fine-tuning (SFT) on expert responses (Ye et al., 2025; Muennighoff et al., 2025) and/or reinforcement learning (RL) (DeepSeek-AI et al., 2025), these models are tailored to produce detailed multi-step reasoning paths, whose length increase usually associated with improved performance on complex tasks such as math reasoning and programming.
22
+
23
+ Despite the fascinating reasoning capabilities exhibited on recent models, there is growing concern about the efficiency and quality of the long reasoning process (Sui et al., 2025). Chen et al. (2025b) first raises the "overthinking" problem in reasoning LLMs, which is reflected by the excessively long reasoning paths generated for extremely simple queries. For example, even for questions like "What is the answer of 2 plus 3?", existing reasoning models might generate hundreds of response tokens.
24
+
25
+ ![](images/83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg)
26
+ Figure 1: Illustration of MiP-Overthinking. When queried by questions with missing premises, the response length of reasoning models increases excessively, and they cannot abstain from answering with MiP identified. The left shows a query with an undefined variable, while the right compares a well-defined GSM8K question with its MiP variant (with a critical numerical condition removed). Reasoning models' responses to MiP questions are much longer than those for well-defined questions and those generated by non-reasoning models. The left corner of each response report the response length and thinking time by DeepSeek-R1.
27
+
28
+ ![](images/4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg)
29
+
30
+ In particular, the ill-posed queries are unsolvable due to the lack of a necessary premise or condition. We call the reasoning failure for the ill-posed queries Overthinking under Missing Premise (MiP-Overthinking). For example, the simplest MiP question is What is the value of $a$ ?<sup>1</sup>, as shown on the left part of Figure 1. Without providing any other information regarding $a$ , it is evidently unsolvable. However, DeepSeek-R1 generates thousands of tokens and spends several minutes thinking about this question before outputting the final meaningless answer. In this paper, we find that a trivial type of ill-posed queries will significantly exacerbate the overthinking of reasoning models, resulting in excessively redundant and meaningless thinking. In contrast, humans and even non-reasoning models are often immune to such scenarios and quickly end up by questioning the validity of the given query, indicating the critical thinking capability. This exposes a risk of the abuse of thinking patterns and a lack of critical thinking on the models trained for deep thinking. Ideally, a model with critical thinking skills is expected to identify the missing premise and quickly respond by either requesting clarification or gracefully indicating that it cannot proceed (Cole et al., 2023; Amayuelas et al., 2024).
31
+
32
+ MiP-Overthinking differs from the widely discussed overthinking issue (Cuadron et al., 2025), in which the query is usually well-defined, but a model applies much more reasoning than necessary for little benefit. MiP-Overthinking, by contrast, happens when the question itself is ill-posed and lacks sufficient information to be solved. For example, the right of Figure 1 presents a well-defined question from GSM8K and a MiP variant, where the latter triggers a drastic increase of the generated tokens on recent reasoning models compared with the general overthinking. Overthinking can be presented by the length difference between models addressing the same well-defined questions, while MiP-Overthinking can be presented by the additional tokens generated due to MiP. MiP-Overthinking further reveals the lack of critical thinking that questions the validity of ill-posed questions and quickly identifies MiP, thus abstaining from answering the questions. Moreover, we observe that reasoning models' ineffective and redundant thinking often cannot stop even after successful notice of MiP, violating the expectation of test-time scaling law. Hence, MiP-Overthinking indicates potential drawbacks of current training recipes of reasoning models.
33
+
34
+ To systematically investigate this issue, we construct a suite of MiP questions designed to trigger the overthinking failures in a controlled way. These include synthetic questions generated by Rule-based Formula (queries where a formula reference is empty or nonsensical) and careful modifications of established datasets across diverse levels of difficulties, including SVAMP, GSM8K, and MATH500. On the modified datasets of MiP questions, we empirically evaluate a wide range of state-of-the-art
35
+
36
+ LLMs, from reasoning models to non-reasoning models and from open-sourced models to proprietary models, to ensure the generalizability of our findings. Our analysis is mainly based on three evaluation metrics, the length of generated responses, the accuracy on well-defined questions, and the abstain rate on ill-posed questions with MiP.
37
+
38
+ Main Contributions: We present the first in-depth study of Overthinking under Missing Premise (MiP-Overthinking), which reveals a critical shortcoming in existing reasoning models: Although they appear to follow coherent reasoning patterns, they lack genuine critical thinking capabilities. To systematically analyze this issue, we curate four MiP datasets covering various difficulty levels and three ill-posed question generation strategies, i.e., Rule-Based Generation, Body-Question Swapping, and Essential-Premise Removal. We then evaluate a wide range of large language models including reasoning-based and non-reasoning ones. Our empirical results illuminate the differences in how models handle well-defined vs. MiP questions, ultimately offering insights into the limitations of existing reasoning models.
39
+
40
+ # Our key findings:
41
+
42
+ 1. Missing premise in questions induces reasoning models to generate significantly longer ( $2 \times$ to $4 \times$ more tokens) responses than general overthinking on well-defined questions. The increased tokens fail to help identify MiP in the ill-posed questions, surprisingly contradicting the widely-discussed test-time scaling law.
43
+ 2. In contrast, given MiP questions, non-reasoning models generate consistently shorter responses and quickly identify MiP, demonstrating greater robustness to the absence of critical information.
44
+ 3. Reasoning models respond differently to well-defined vs. MiP questions: they mostly follow stable chain-of-thoughts for the former, but are often trapped in a self-doubt loop, repeatedly revisiting the question, and guessing the user intentions under MiP, resulting in an explosion of tokens.
45
+ 4. Reasoning models often can notice the existence of MiP or identify it at an early stage, but they hesitate to commit to this judgment and keep outputting ineffective thinking.
46
+
47
+ # 2 Missing Premise Definition and Construction
48
+
49
+ # 2.1 Definition of Missing Premise
50
+
51
+ Prior to introducing the construction our dataset and analyzing the behavior of reasoning models on problems with missing premises, we formally define the Missing Premise (MiP) problem to establish a rigorous foundation for our subsequent analysis.
52
+
53
+ Definition 1 (Missing Premise Problem). Let $\mathcal{Q}$ be a question, and let $P = \{P_{1},\dots ,P_{n}\}$ be a set of premises. Define the function mapping premises and a question to the set of logically valid answers as:
54
+
55
+ $$
56
+ \mathcal {F} (P, \mathcal {Q}) = \{A \mid P \vdash A, A \text {i s a n a n s w e r r e s o l v i n g} \mathcal {Q} \} \tag {1}
57
+ $$
58
+
59
+ where $\vdash$ denotes logical entailment. Consider a proper subset $P' = P \setminus \{P_i\}$ for some $P_i \in P$ . The tuple $(P', \mathcal{Q})$ forms a missing premise problem if and only if:
60
+
61
+ $$
62
+ \left| \mathcal {F} (P, \mathcal {Q}) \right| = 1 \quad a n d \quad \left| \mathcal {F} \left(P ^ {\prime}, \mathcal {Q}\right) \right| \neq 1 \tag {2}
63
+ $$
64
+
65
+ This indicates that the removed premise $P_{i}$ is essential for uniquely determining the logically valid answer to the question $\mathcal{Q}$ .
66
+
67
+ According to Definition 1, an ideal reasoning system should efficiently identify the absence of a critical premise and terminate its inference process upon recognizing that the available information is insufficient to derive a unique solution to the given problem. However, our empirical analysis in Section 3.2 demonstrates that state-of-the-art reasoning models consistently fail to exhibit this capability. Instead, these models engage in extensive, redundant reasoning chains that consume significant computational resources without ultimately identifying the missing premise.
68
+
69
+ <table><tr><td>Dataset</td><td>Example</td><td>Diff</td><td>Count</td><td>Pair</td><td>Method</td></tr><tr><td>MiP-Formula</td><td>What is the value of ln(a+b)?</td><td>★</td><td>50</td><td>×</td><td>Rule-Based Generation</td></tr><tr><td>MiP-SVAMP</td><td>Paco had 26 salty cookies and 17 sweet cookies. He ate 14 sweet cookies and 9 salty cookies. How many salty cookies did Paeo have left? How many pencils does she have?</td><td>★</td><td>300</td><td>×</td><td>Body-Question Swapping</td></tr><tr><td>MiP-GSM8K</td><td>James decides to run 3 sprints 3 times a week. He runs 60-meters each sprint. How many total meters does he run a week?</td><td>★★</td><td>582</td><td>✓</td><td>Essential-Premise Removal</td></tr><tr><td>MiP-MATH</td><td>There are 360 people in my school. 15 take calculus, physics, and chemistry, and 15 don’t take any of them. 180 take calculus. Twice as many students take chemistry as take physics. 75 take both calculus and chemistry, and 75 take both physics and chemistry. Only 30 take both physics and calculus. How many students take physics?</td><td>★★★</td><td>58</td><td>✓</td><td>Essential-Premise Removal</td></tr></table>
70
+
71
+ Table 1: Statistics and examples of our curated MiP datasets. For GSM8K and MATH, a premise is removed from the original questions (crossed out) to create MiP questions. Diff represents the (estimated) difficulty for models to identify MiP. Count denotes the number of questions in the subset. Pair indicates whether each MiP question is associated with a well-defined original question. Method indicates the method used to generate the MiP question.
72
+
73
+ # 2.2 Overview of Data Construction
74
+
75
+ To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different difficulties. In addition, we also construct a synthetic dataset consisting of formulas with unassigned variables. Our ill-posed question generation employs three distinct methods covering three difficulty levels and three strategies to create MiP questions:
76
+
77
+ - **Rule-Based Generation:** This approach generates MiP questions through a principled formula construction process, where unassigned variables serve as the missing premises.
78
+ - Body-Question Swapping: We introduce logical inconsistencies by deliberately mismatching problem bodies with their corresponding questions from the original dataset. This creates scenarios where the premises and queries are fundamentally incompatible.
79
+ - Essential-Premise Removal: Through careful analysis of existing well-formed questions, we identify and remove critical premises that are necessary for logical resolution. This transformation preserves the question's structure while rendering it unsolvable.
80
+
81
+ The following sections provide a detailed overview of our data construction process for each dataset category. For comprehensive implementation details and additional methodological considerations, we refer readers to Appendix B.
82
+
83
+ MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through combinations of variables and operators, with a maximum recursion depth of three. While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables.
84
+
85
+ MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset with elementary-school-level math problems, where each instance consists of a problem body and an associated question. We generate MiP question by randomly permuting the problem bodies and associated questions and then manually inspect them to avoid inadvertent cases. The resulting problems contain clear logical inconsistencies between their body and question components, which is easy for a human to identify.
86
+
87
+ MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a more complex mathematics dataset than SVAMP. The questions in GSM8K typically contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question before conducting human verification to filter out those questions that are still solvable in some way.
88
+
89
+ ![](images/2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg)
90
+ Figure 2: Response lengths, accuracy on well-defined questions, and abstain rate of reasoning/non-reasoning models on MiP questions from our MiP-GSM8K dataset. (1) Existing reasoning models generate significantly longer responses for MiP questions than well-defined questions, while non-reasoning models generate responses of similar lengths for both types of questions, indicating MiP-Overthinking for reasoning models. (2) For both questions, reasoning models generate longer responses than non-reasoning models, indicating General Overthinking. (3) Although the longer responses by reasoning models slightly improve the accuracy for well-defined questions, it does not enhance the abstain rate for MiP questions, indicating a contradiction on the test-time scaling law.
91
+
92
+ Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable.
93
+
94
+ MiP-MATH. For MATH 500 dataset (Hendrycks et al., 2021), which contains challenging mathematical questions at the competition level, it is difficult to build a rule-based filtering mechanism. Thus, we manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts.
95
+
96
+ # 3 Overthinking under Missing Premise
97
+
98
+ # 3.1 Evaluation Metrics
99
+
100
+ To systematically evaluate model responses under MiP, we conduct experiments with a diverse set of reasoning and non-reasoning models. For each model, we analyze calculate the following metrics for the responses across different datasets:
101
+
102
+ - Response Length: The average number of tokens in the response, incorporating both reasoning steps and final answer components.
103
+ - Abstain Rate for MiP Question: The proportion of answers where the model explicitly identifies the missing premise and either declines to provide an answer or requests additional information necessary for solving the problem.
104
+ - Accuracy for Well-defined Question: The proportion of answers where the model produces a definitive response that aligns with the reference answer.
105
+
106
+ For datasets without reference answers (MiP-Formula and MiP-SVAMP), we only calculate the abstain rate for the questions. Response evaluation is performed using GPT-4o as an automated evaluator. Detailed experimental procedures and evaluation protocols are provided in Appendix A.
107
+
108
+ # 3.2 Main Results
109
+
110
+ Figure 2 compares average response length, accuracy on well-defined questions, and the abstain rate on MiP questions across a range of state-of-the-art LLMs, revealing several significant patterns in model behavior.
111
+
112
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Type</td><td colspan="2">MiP-Formula</td><td colspan="2">MiP-SWAMP</td><td rowspan="2">Type</td><td colspan="2">MiP-GSM8K</td><td colspan="2">MiP-MATH</td></tr><tr><td>Length↓</td><td>Abstain↑</td><td>Length↓</td><td>Abstain↑</td><td>Length↓</td><td>Abstain↑</td><td>Length↓</td><td>Abstain↑</td></tr><tr><td colspan="11">Non-Reasoning Models</td></tr><tr><td rowspan="2">Qwen2.5-32B-Instruct</td><td rowspan="2">MiP</td><td rowspan="2">285</td><td rowspan="2">44.0</td><td rowspan="2">128</td><td rowspan="2">98.3</td><td>MiP</td><td>219</td><td>44.0</td><td>525</td><td>15.4</td></tr><tr><td>Well-defined</td><td>246</td><td>0.5</td><td>1114</td><td>1.9</td></tr><tr><td rowspan="2">GPT-4o</td><td rowspan="2">MiP</td><td rowspan="2">338</td><td rowspan="2">70.0</td><td rowspan="2">122</td><td rowspan="2">96.3</td><td>MiP</td><td>202</td><td>46.9</td><td>487</td><td>15.4</td></tr><tr><td>Well-defined</td><td>212</td><td>0.5</td><td>472</td><td>1.9</td></tr><tr><td rowspan="2">Gemini 1.5</td><td rowspan="2">MiP</td><td rowspan="2">453</td><td rowspan="2">20.0</td><td rowspan="2">52</td><td rowspan="2">99.0</td><td>MiP</td><td>103</td><td>54.5</td><td>568</td><td>5.8</td></tr><tr><td>Well-defined</td><td>156</td><td>0.5</td><td>502</td><td>0.0</td></tr><tr><td rowspan="2">Gemma-2-27B-IT</td><td rowspan="2">MiP</td><td rowspan="2">204</td><td rowspan="2">85.7</td><td rowspan="2">89</td><td rowspan="2">92.0</td><td>MiP</td><td>131</td><td>53.8</td><td>338</td><td>38.5</td></tr><tr><td>Well-defined</td><td>148</td><td>0.3</td><td>305</td><td>11.5</td></tr><tr><td rowspan="2">Phi-3-medium-128k</td><td rowspan="2">MiP</td><td rowspan="2">1465</td><td rowspan="2">48.0</td><td rowspan="2">125</td><td rowspan="2">98.7</td><td>MiP</td><td>210</td><td>47.8</td><td>427</td><td>23.1</td></tr><tr><td>Well-defined</td><td>216</td><td>1.0</td><td>1549</td><td>3.8</td></tr><tr><td colspan="11">Reasoning Models</td></tr><tr><td rowspan="2">GPT-o1</td><td rowspan="2">MiP</td><td rowspan="2">1123</td><td rowspan="2">78.0</td><td rowspan="2">581</td><td rowspan="2">99.0</td><td>MiP</td><td>838</td><td>55.7</td><td>4189</td><td>30.8</td></tr><tr><td>Well-defined</td><td>348</td><td>0.3</td><td>2502</td><td>0.0</td></tr><tr><td rowspan="2">GPT-o1mini</td><td rowspan="2">MiP</td><td rowspan="2">958</td><td rowspan="2">66.0</td><td rowspan="2">639</td><td rowspan="2">96.7</td><td>MiP</td><td>762</td><td>40.0</td><td>2193</td><td>25.0</td></tr><tr><td>Well-defined</td><td>449</td><td>1.2</td><td>1913</td><td>0.0</td></tr><tr><td rowspan="2">GPT-o3mini</td><td rowspan="2">MiP</td><td rowspan="2">1025</td><td rowspan="2">76.0</td><td rowspan="2">1299</td><td rowspan="2">93.0</td><td>MiP</td><td>1516</td><td>23.7</td><td>3772</td><td>11.5</td></tr><tr><td>Well-defined</td><td>384</td><td>1.4</td><td>1553</td><td>0.0</td></tr><tr><td rowspan="2">DS Distill Qwen2.5-32B</td><td rowspan="2">MiP</td><td rowspan="2">12911</td><td rowspan="2">42.0</td><td rowspan="2">921</td><td rowspan="2">88.3</td><td>MiP</td><td>2302</td><td>24.6</td><td>9876</td><td>5.8</td></tr><tr><td>Well-defined</td><td>519</td><td>0.2</td><td>3246</td><td>0.0</td></tr><tr><td rowspan="2">DeepSeek R1</td><td rowspan="2">MiP</td><td rowspan="2">4757</td><td rowspan="2">6.0</td><td rowspan="2">1996</td><td rowspan="2">84.3</td><td>MiP</td><td>3246</td><td>16.5</td><td>7268</td><td>3.8</td></tr><tr><td>Well-defined</td><td>1226</td><td>0.2</td><td>3200</td><td>1.9</td></tr><tr><td rowspan="2">S1.1-32B</td><td rowspan="2">MiP</td><td rowspan="2">5284</td><td rowspan="2">18.0</td><td rowspan="2">3358</td><td rowspan="2">57.0</td><td>MiP</td><td>5024</td><td>16.0</td><td>9322</td><td>15.4</td></tr><tr><td>Well-defined</td><td>1896</td><td>0.2</td><td>5037</td><td>0.0</td></tr><tr><td rowspan="2">QwQ-32B</td><td rowspan="2">MiP</td><td rowspan="2">7937</td><td rowspan="2">0.0</td><td rowspan="2">3487</td><td rowspan="2">56.3</td><td>MiP</td><td>4780</td><td>10.1</td><td>10242</td><td>1.9</td></tr><tr><td>Well-defined</td><td>1896</td><td>0.2</td><td>5037</td><td>0.0</td></tr></table>
113
+
114
+ Table 2: Comparing response length and abstain rate across different MiP datasets. Shorter lengths and higher abstain rates are preferred. For each column, the top-3 preferred values are colored in green, otherwise red. MiP-Overthinking, reflected by longer response with low abstain rate, is commonly observed on most existing reasoning models across all datasets, indicating a critical drawback of existing reasoning models.
115
+
116
+ Firstly, existing reasoning models (left side of the figure) display an explosive increase in response length when facing the MiP questions, often producing $2 - 4 \times$ more tokens than general overthinking on well-defined questions. For example, QwQ-32B (Team, 2025) and DeepSeek-R1 (DeepSeek-AI et al., 2025) exhibit a substantial increase from already long reasoning paths on well-defined questions (approximately 1,000 tokens for simple GSM8K questions) to highly lengthy outputs (more than 3,000 tokens) under missing premise conditions. On the contrary, no similar issues exist for non-reasoning models (right side of the figure), which generate similar token counts for both types of well-defined and MiP questions. This phenomenon directly illustrates the NiP-Overthinking phenomenon as introduced in the paper.
117
+
118
+ Secondly, comparing the token lengths on well-defined questions between the reasoning and non-reasoning models, reasoning models tend to produce longer responses, even for simple questions, than non-reasoning models, underscoring the inefficient and verbose responses of existing reasoning models. For example, for the non-reasoning models, it only takes approximately 200 tokens for them to generate the responses for well-defined questions, while taking 1,000 tokens for DeepSeek-R1 and 1,800 tokens for QWQ-32B to answer the exactly same questions. However, the explosive increase in extra tokens does not lead to corresponding large accuracy improvements, shown in the green line, highlighting the issue of the General Overthinking.
119
+
120
+ Finally, the abstain rates (red line) on MiP questions reveal that although some reasoning models (e.g., GPT-o1) have promising capabilities in abstaining from the MiP questions, most of the other reasoning models are not able to abstain from the given MiP questions correctly despite the dramatically long reasoning paths. This phenomenon indicates that although most existing reasoning models have thinking and reasoning capabilities to some extent, they lack the critical thinking capabilities to "reject" ill-posed questions. By contrast, non-reasoning models, though they are not explicitly trained for reasoning, tend to strike a better balance, generating shorter answers that are more likely to acknowledge MiP when the question is ill-posed. This phenomenon reveals a surprising contradiction on test-time scaling law.
121
+
122
+ Moreover, Table 2 further presents the comparisons on length and abstain rate on other MiP datasets we curated. The preferred results are colored green (shorter responses and higher abstain rate for
123
+
124
+ <table><tr><td rowspan="2">Models</td><td rowspan="2">Type</td><td colspan="2">Alternatively</td><td colspan="2">Wait</td><td colspan="2">Check</td><td colspan="2">But</td><td colspan="2">Hypothesis</td><td colspan="2">Step</td></tr><tr><td>Cnt.</td><td>Δ</td><td>Cnt.</td><td>Δ</td><td>Cnt.</td><td>Δ</td><td>Cnt.</td><td>Δ</td><td>Cnt.</td><td>Δ</td><td>Cnt.</td><td>Δ</td></tr><tr><td colspan="14">Non-Reasoning Models</td></tr><tr><td rowspan="2">Qwen2.5-32B</td><td>MiP</td><td>0.0</td><td rowspan="2">0.0</td><td>0.0</td><td rowspan="2">0.0</td><td>0.0</td><td rowspan="2">0.0</td><td>0.3</td><td rowspan="2">0.2</td><td>0.0</td><td rowspan="2">0.0</td><td>4.3</td><td rowspan="2">-1.3</td></tr><tr><td>Well-defined</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.1</td><td>0.0</td><td>5.6</td></tr><tr><td rowspan="2">GPT-4o</td><td>MiP</td><td>0.0</td><td rowspan="2">0.0</td><td>0.0</td><td rowspan="2">0.0</td><td>0.0</td><td rowspan="2">0.0</td><td>0.3</td><td rowspan="2">0.2</td><td>0.0</td><td rowspan="2">0.0</td><td>4.7</td><td rowspan="2">-1.5</td></tr><tr><td>Well-defined</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.1</td><td>0.0</td><td>6.2</td></tr><tr><td rowspan="2">Gemini 1.5</td><td>MiP</td><td>0.0</td><td rowspan="2">0.0</td><td>0.0</td><td rowspan="2">0.0</td><td>0.0</td><td rowspan="2">0.0</td><td>0.1</td><td rowspan="2">0.1</td><td>0.0</td><td rowspan="2">0.0</td><td>1.6</td><td rowspan="2">-2.2</td></tr><tr><td>Well-defined</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>3.8</td></tr><tr><td rowspan="2">Gemma-2-27B</td><td>MiP</td><td>0.0</td><td rowspan="2">0.0</td><td>0.0</td><td rowspan="2">0.0</td><td>0.0</td><td rowspan="2">0.0</td><td>0.1</td><td rowspan="2">0.1</td><td>0.0</td><td rowspan="2">0.0</td><td>5.2</td><td rowspan="2">-0.5</td></tr><tr><td>Well-defined</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>5.7</td></tr><tr><td colspan="14">Reasoning Models</td></tr><tr><td rowspan="2">DS-Distill Qwen</td><td>MiP</td><td>11.5</td><td rowspan="2">11.4</td><td>19.7</td><td rowspan="2">19.3</td><td>1.0</td><td rowspan="2">0.8</td><td>40.1</td><td rowspan="2">39.3</td><td>38.4</td><td rowspan="2">38.0</td><td>54.9</td><td rowspan="2">42.2</td></tr><tr><td>Well-defined</td><td>0.1</td><td>0.4</td><td>0.2</td><td>0.8</td><td>0.4</td><td>12.7</td></tr><tr><td rowspan="2">DeepSeek R1</td><td>MiP</td><td>16.9</td><td rowspan="2">15.2</td><td>14.4</td><td rowspan="2">10.9</td><td>3.8</td><td rowspan="2">1.3</td><td>49.4</td><td rowspan="2">42.1</td><td>44.7</td><td rowspan="2">40.4</td><td>54.2</td><td rowspan="2">33.0</td></tr><tr><td>Well-defined</td><td>1.7</td><td>3.5</td><td>2.5</td><td>7.3</td><td>4.3</td><td>21.2</td></tr><tr><td rowspan="2">S1.1</td><td>MiP</td><td>42.0</td><td rowspan="2">38.0</td><td>21.9</td><td rowspan="2">15.9</td><td>5.5</td><td rowspan="2">2.5</td><td>87.2</td><td rowspan="2">74.1</td><td>84.8</td><td rowspan="2">77.0</td><td>79.9</td><td rowspan="2">50.9</td></tr><tr><td>Well-defined</td><td>4.0</td><td>6.0</td><td>3.0</td><td>13.1</td><td>7.8</td><td>29.0</td></tr><tr><td rowspan="2">QwQ</td><td>MiP</td><td>47.0</td><td rowspan="2">40.3</td><td>19.4</td><td rowspan="2">13.0</td><td>5.0</td><td rowspan="2">1.6</td><td>66.1</td><td rowspan="2">54.2</td><td>94.1</td><td rowspan="2">81.7</td><td>97.9</td><td rowspan="2">58.7</td></tr><tr><td>Well-defined</td><td>6.7</td><td>6.4</td><td>3.4</td><td>11.9</td><td>12.4</td><td>39.2</td></tr></table>
125
+
126
+ Table 3: Comparisons of reasoning-related token counts on MiP-GSM8K dataset. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by $\backslash n\backslash n$ , where negative values are colored in green and positive in red. $\Delta$ denotes the difference between MiP and well-defined questions. When facing MiP questions, reasoning models encounter explosive growths on reasoning-related tokens and steps, indicating a severe abuse of thinking patterns, while non-reasoning models use fewer steps for MiP questions than well-defined ones.
127
+
128
+ MiP questions), and the worse results are colored red, from which we can easily discover that reasoning models are prone to generate long responses while having low abstain rates across all datasets, indicating the consistent MiP Overthinking issue of existing reasoning models. In addition, by comparing the behaviors of models on different datasets, we can observe that for the relatively harder dataset (MiP-MATH), all models generate relatively longer responses and obtain lower abstain rates, indicating that harder MiP questions require reasoning capabilities.
129
+
130
+ # 3.3 Thinking Patterns through Tokens
131
+
132
+ To gain deeper insight into the MiP-Overthinking issue, we compare the reasoning-related token distribution on the MiP-GSM8K dataset. As shown in Table 3, we break down the average usages of several token patterns related to the thinking process, as well as the number of steps for each model to solve the given questions. Specifically, values of alternatively, wait, check, and but can be directly counted from the model responses, including the thinking paths of reasoning models. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by $\backslash n\backslash n$
133
+
134
+ Reasoning models exhibit much higher occurrence of tokens such as alternatively, wait, and check, compared with non-reasoning models, whose frequencies remain close to zero, indicating their advanced thinking capabilities. However, when moving from well-defined to MiP questions, reasoning models encounter explosive growths on reasoning-related tokens, indicating a large redundancy in thinking patterns. Moreover, when comparing the changes of steps, reasoning models exhibit a large increase in step count for MiP questions, while non-reasoning models typically show fewer steps, suggesting they quickly conclude the question is unanswerable. With this gap, together with the consistently better abstain rates of the non-reasoning models, we conclude that the lengthy reasoning steps are mostly redundant and indicate self-doubt thinking patterns for reasoning models.
135
+
136
+ # 3.4 Step-level Similarities
137
+
138
+ To further assess how redundant the generated content becomes under MiP conditions, we examine the step-level similarity within the model's responses on our MiP-GSM8K dataset. Specifically, we divide each response into discrete steps, split by $\backslash n\backslash n$ and compute pairwise cosine similarity scores with embeddings generated by "all-MiniLM-L6-v2" (Reimers & Gurevych, 2019). The visualization is shown in Figure 3, where each value in the heatmap metric represents the averaged cosine similarities between the corresponding step index. The average similarity score for well-defined question is 0.45 and 0.50 for MiP response. The variance is 7.9e-3 and 8.2e-4 respectively.
139
+
140
+ As shown in the figure, responses to MiP questions have greater overall similarity across steps and lower standard variance, indicating the considerable redundancy in the content. This means, in many instances, the model revisits similar partial reasoning or repeats previous sentences with only minor changes, showing a
141
+
142
+ potential self-trapping issue. Together, these patterns confirm that MiP questions induce a high degree of repetitive content in reasoning models. Rather than terminating early to conclude for insufficient premise, the models fill their reasoning paths with repetitive re-checks and reiterations, significantly inflating token usage without improving real abstain rates.
143
+
144
+ ![](images/21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg)
145
+ Figure 3: The step-level similarity heatmaps for s1.1 responses towards well-defined (left) and MiP (right) questions in MiP-GSM8K dataset. To avoid differences in matrix size, we only consider responses with more than 50 steps and visualize the average simiality matrix across first 50 steps. The heatmap for MiP questions has a higher averaged similarity and lower standard variance, also shown in the heatmap, which indicates the considerable redundancy in its content when responding to MiP questions.
146
+
147
+ # 3.5 Thinking Patterns through Example
148
+
149
+ To further understand what happens in the reasoning chain of reasoning models when faced an ill-post input, we present an example of reasoning model's response to a MiP question in Figure 4. We summarize five major thinking patterns we found in the example and highlight them with different colors. We can observe from the example that the model abuses these patterns to generate long responses, while the responses are not only redundant but also not helpful for the model to abstain from the given MiP question. More examples can be found in the appendix D.
150
+
151
+ # 4 Further Discussion
152
+
153
+ # 4.1 Do Models know premises are missing?
154
+
155
+ To investigate whether reasoning models recognize the potential unsolvability of questions during their reasoning process, we conducted a detailed analysis of their reasoning chains. We segmented each reasoning chain into discrete steps using $\backslash n \backslash n$ as delimiters and performed step-wise verification to detect whether models express doubt on the question solvability. We introduce two key metrics for this analysis: In-Process Suspicion Rate, which measures the percentage of responses where the model expresses doubt about solvability during reasoning, and First Suspicion Index, which captures the average step number at which the model first suspects the missing premise. To ensure robust evaluation, we employed GPT-4o to assess each step three times, using majority voting for our final step-level result. The quantitative results of this analysis are presented in Table 4.
156
+
157
+ As we can see from the table, most of the existing reasoning models have suspected that the given question might be unsolvable at the very early stage of their reasoning process, demonstrating the ability of reasoning models to recognize the potential MiP. However, these reasoning models lack critical thinking capabilities: they are prone to keep digging the given unsolvable question by re-visiting the question and related definitions again and again and again, rather than question the solvability of the given question. Thus, as visualized in Figure 5, despite existing reasoning
158
+
159
+ 1 Revisit Question 2 Visit Knowledge 3 Propose Assumption 4 Self Doubt 5 Pause/Check
160
+
161
+ # Model Response for a MiP-Formula Question from s1.1-32B
162
+
163
+ Question: What is the value of $((\gamma)) + \Xi$
164
+
165
+ Model Response:
166
+
167
+ Okay, so I need to figure out the value of $((\gamma)) + \Xi_{r}$ Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so $(\langle \gamma \rangle)$ , and then the Greek letter Xi ( $\Xi_{r}$ ). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of.
168
+
169
+ Let me start by recalling what I know about gamma. In mathematics, $\gamma$ often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is $(\gamma)$ . The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook. For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like $(\gamma)$ . Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure.
170
+
171
+ Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context. Wait, the question is just asking for the value of $(\gamma)) + \Xi$ without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants.
172
+
173
+ Let me consider if this could be a trick question. The double parentheses around gamma $(\gamma)$ maybe that's referring to the number of letters in the word "gamma" or something like that. Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So $(\gamma)$ could be 5. Then $\Xi$ is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe $\Xi$ represents 14. Then $5 + 14$ would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that In the Greek numeral system, letters are assigned values as follows.
174
+
175
+ # .67 more paragraphs here..
176
+
177
+ But just to check another angle. In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here.
178
+
179
+ Alternatively, in chemistry, $\gamma$ is used to denote surface tension, and $\Xi$ is not a standard symbol. In engineering, $\gamma$ is sometimes the shear strain. But again, not relevant.
180
+
181
+ Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context.
182
+
183
+ Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm.
184
+
185
+ Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, $\gamma$ is 3 and $\Xi$ is 60, so $3 + 60 = 63$ .
186
+
187
+ Alternatively, if it's based on their positions in the alphabet, $3 + 14 = 17$ .
188
+
189
+ # 21 more paragraphs here.
190
+
191
+ Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical.
192
+
193
+ *Final Answer** $\boxed{63} <$ /think> To determine the value of $((\gamma) + \Xi)$ , we need to consider the possible meanings of the symbols $(\gamma)$ and $(\Xi)$ .
194
+
195
+ 1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter $(\gamma)$ (gamma) represents the value 3, and the letter $(\Xi)$ (Xi) represents the value 60.2. **Double Parentheses**: The notation $((\gamma))$ might be a red herring or simply emphasize that $(\gamma)$ is being used as a numeral. The parentheses do not change the numerical value of $(\gamma)$ .
196
+
197
+ Combining these values, we add the numerical values of $(\gamma)$ and $(\Xi): - (\gamma = 3) - (\Xi = 60)$
198
+
199
+ Thus, the value of $((\gamma) + \Xi)$ is $(3 + 60 = 63)$ .
200
+
201
+ 63
202
+
203
+ Figure 4: An example of reasoning model (s1.1-32B) response to a MiP question. The response exhibits five distinct thinking patterns, highlighted in different colors: $①$ Revisit Question (yellow), where the model reexamines the original query; $②$ Visit Knowledge (red), where the model accesses domain-specific knowledge; $③$ Propose Assumption (blue), where the model proposes and investigates various hypotheses; $④$ Self Doubt (green), where the model questions its own reasoning and expresses uncertainty; and $⑤$ Pause/Check (purple), where the model pauses to review previous steps. These patterns demonstrate the model's complex but potentially inefficient reasoning process when confronted with missing premises.
204
+
205
+ models suspecting the solvability of most of the given MiP questions, they only abstain a very small proportion of them.
206
+
207
+ <table><tr><td rowspan="2">Model</td><td colspan="4">MiP-Formula</td><td colspan="4">MiP-GSMR</td></tr><tr><td>DeepSeek-R1</td><td>DS-Qwen</td><td>QwQ</td><td>S1.1</td><td>DeepSeek-R1</td><td>DS-Qwen</td><td>QwQ</td><td>S1.1</td></tr><tr><td>In-Process Suspicion Rate</td><td>100%</td><td>100%</td><td>100%</td><td>100%</td><td>95.5%</td><td>83.3%</td><td>99.6%</td><td>100%</td></tr><tr><td>In-Process First Suspicion Index</td><td>1.32</td><td>1.36</td><td>1.42</td><td>1.16</td><td>2.01</td><td>3.90</td><td>1.77</td><td>1.61</td></tr></table>
208
+
209
+ Table 4: The in-process insufficiency suspicion information across different reasoning models on MiP-Formula and MiP-GSMR datasets. The in-process insufficiency suspicion is defined as when the reasoning model suspects the given question is unsolvable during its thinking process. In-Process Suspicion Rate represents how many percent of the samples trigger the in-process suspicion. First Suspicion Index is the averaged step index where the model first suspects the question's validity. Most reasoning models do notice the existence of MiP at the very early steps, but they still suffer from low abstain rate and cannot confidently stop the thinking.
210
+
211
+ Based on the above observations, we conclude that reasoning models actually have the capabilities to find out that the given MiP question is not solvable, but they do not "dare" to abstain it. Thus, our MiP-Overthinking issue indicates the lack of critical thinking abilities of reasoning models.
212
+
213
+ ![](images/c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg)
214
+ Figure 5: The transition flow between in-process suspicion of MiP and the final successful abstention on different reasoning models. For each Sankey diagram, the left bars represent whether the model suspects the given question is unsolvable during its thinking process, i.e., Suspected or Unsuspected; the right bars represent the final abstention, categorized into Abstain (preferred) or Non-abstain. Most existing reasoning models have suspected that the given question might be unsolvable, but only for a very small portion, the models insist on their suspicion.
215
+
216
+ ![](images/55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg)
217
+
218
+ ![](images/c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg)
219
+
220
+ ![](images/21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg)
221
+
222
+ # 4.2 What Caused MiP-Overthinking?
223
+
224
+ Figure 2 demonstrates that MiP-Overthinking manifests across both RL-based and SFT-based reasoning models. We hypothesize this phenomenon primarily originates from inadequate length constraints during the rule-based reinforcement learning phase of RL-based models, subsequently propagating to SFT-based models through distillation.
225
+
226
+ Current RL-based reasoning models predominantly employ rule-based training focused on format and accuracy rewards (Shao et al., 2024; Sui et al., 2025), with some incorporating step or length rewards to promote thorough reasoning (Face, 2025). This approach can lead to reward hacking, where models explore excessive reasoning patterns to achieve correct answers (Aggarwal & Welleck, 2025; Shen et al., 2025; Luo et al., 2025).
227
+
228
+ To demonstrate the transmissibility of this behavior through distillation (Xu et al., 2024), we fine-tune Qwen-2.5-7B-Instruct using small-scale $50\mathrm{MiP}$ responses generated by DeepSeek-R1 on the MiP-Formula dataset. As shown in Figure 6, the fine-tuned model exhibits clear MiP-Overthinking characteristics when evaluated on GSM8K: significantly increased response lengths for both MiP and well-defined questions, emergence of a length disparity between MiP and well-defined responses previously absent in the original model, and decreased abstain rates.
229
+
230
+ ![](images/286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg)
231
+ Figure 6: Comparison of response length, abstain rate of MiP, and accuracy of well-defined questions before and after tuning on 50 responses from DeepSeek-R1 on the MiP-Formula dataset. The results demonstrate rapid onset of MiP-Overthinking behavior after exposure to a small number of MiP examples during fine-tuning.
232
+
233
+ # 5 Related Work
234
+
235
+ # 5.1 Reasoning Large Language Model
236
+
237
+ Recent advances in Large Language Models (LLMs) have sparked significant research interest in enhancing their reasoning capabilities (Ahn et al., 2024; Besta et al., 2025; Chen et al., 2025a). Research has focused on improving these capabilities through various post-training approaches. Several studies have employed reinforcement learning techniques to guide models toward more effective reasoning strategies (Shao et al., 2024; Xiong et al., 2025; Cui et al., 2025). Additionally, researchers have demonstrated that instruction tuning on carefully curated, high-quality datasets can significantly enhance reasoning performance (Ye et al., 2025; Muennighoff et al., 2025).
238
+
239
+ While Reasoning Models have demonstrated impressive performance on various benchmarks, recent studies have begun to critically examine the quality and efficiency of their reasoning processes. Xia et al. (2025) conducted a comprehensive analysis of RLMs' reasoning quality, revealing significant redundancy in their solution approaches. Further investigations (Chen et al., 2025b; Cuadron et al., 2025; Qu et al., 2025; Liu et al., 2025) identified a concerning "overthinking" phenomenon, where reasoning model generate unnecessarily verbose solutions even for simple problems. Building on these observations, Kumar et al. (2025) demonstrated the potential security implications of this behavior by developing a slowdown attack that exploits overthinking through input perturbation.
240
+
241
+ # 5.2 Test-time Scaling
242
+
243
+ In contrast to earlier research on training-time scaling laws (Kaplan et al., 2020), recent literature has increasingly focused on test-time performance scaling strategies, which aim to enhance model performance by optimizing inference-time token generation (Snell et al., 2024; OpenAI, 2024a). These approaches can be categorized into several primary methodologies: parallel sampling techniques (Brown et al., 2024; Levi, 2024), which generate multiple candidate responses and select the optimal output; sequential refinement approaches (Snell et al., 2024; Lee et al., 2025), which enable iterative improvement of previous outputs; and tree-based methods (Gandhi et al., 2024; Hou et al., 2025), which combine elements of both parallel and sequential approaches. While the prevailing consensus suggests that increased token generation during inference enhances reasoning capabilities, our investigation reveals a concerning counterpoint: under certain conditions, extended responses can lead to computational inefficiency and, paradoxically, degraded performance outcomes.
244
+
245
+ # 5.3 Models' Behavior Study in Ambiguous Condition
246
+
247
+ LLMs are prone to hallucination (Huang et al., 2025; Xu et al., 2025), generating non-existent conditions that compromise trustworthiness. An essential aspect of reliability is the ability to abstain under uncertainty. Prior work (Cole et al., 2023; Amayuelas et al., 2024; Zhou et al., 2023) has proposed benchmarks assessing LLMs' recognition of knowledge limits when facing ambiguous or challenging queries. Different from theirs, our study explores reasoning models under MiP condition. Surprisingly, we find these specialized models exhibit prolonged reasoning and inferior performance.
248
+
249
+ # 6 Conclusion
250
+
251
+ We introduce the Overthinking under Missing Premise (MiP-Overthinking) issue, which is a widespread but still under-explored phenomenon for current reasoning models. In this phenomenon, when faced with ill-defined unsolvable questions with missing premises, existing models generate dramatically long responses while having very low abstain rates. With systematic investigation of this phenomenon, our findings show that while these models sometimes suspect the given MiP question is not solvable in the early state of the thinking process, they typically fail to act on those suspicions and instead generating repetitive and redundant thinking traces with the final answer that does not address the missing premises, indicating a lack of critical thinking capability. This behavior highlights a pressing gap: current training recipes for reasoning models, which emphasize thorough chains of thought, do not sufficiently reward critical thinking or early exit from unsolvable tasks.
252
+
253
+ # References
254
+
255
+ Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, and etc. Phi-3 technical report: A highly capable language model locally on your phone, 2024. URL https://arxiv.org/abs/2404.14219.
256
+ Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697.
257
+ Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/.
258
+ Alfonso Amayuelas, Kyle Wong, Liangming Pan, Wenhu Chen, and William Wang. Knowledge of knowledge: Exploring known-unknowns uncertainty with large language models, 2024. URL https://arxiv.org/abs/2305.13712.
259
+ Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, Tomasz Sternal, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, Łukasz Flis, Hannes Eberhard, Hubert Niewiadomski, and Torsten Hoefler. Reasoning language models: A blueprint, 2025. URL https://arxiv.org/abs/2501.11223.
260
+ Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787.
261
+ Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wanxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models, 2025a. URL https://arxiv.org/abs/2503.09567.
262
+ Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, Rui Wang, Zhaopeng Tu, Haitao Mi, and Dong Yu. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms, 2025b. URL https://arxiv.org/abs/2412.21187.
263
+ Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.
264
+ Jeremy R. Cole, Michael J. Q. Zhang, Daniel Gillick, Julian Martin Eisenschlos, Bhuwan Dhingra, and Jacob Eisenstein. Selectively answering ambiguous questions, 2023. URL https://arxiv.org/abs/2305.14613.
265
+ Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks, 2025. URL https://arxiv.org/abs/2502.08235.
266
+ Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456.
267
+
268
+ DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, and etc. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948.
269
+ Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1.
270
+ Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D. Goodman. Stream of search (sos): Learning to search in language, 2024. URL https://arxiv.org/abs/2404.03683.
271
+ Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. NeurIPS, 2021.
272
+ Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling, 2025. URL https://arxiv.org/abs/2501.11651.
273
+ Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey, 2023. URL https://arxiv.org/abs/2212.10403.
274
+ Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, and Ting Liu. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems, 43(2):1-55, January 2025. ISSN 1558-2868. doi: 10.1145/3703155. URL http://dx.doi.org/10.1145/3703155.
275
+ Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models, 2020. URL https://arxiv.org/abs/2001.08361.
276
+ Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms, 2025. URL https://arxiv.org/abs/2502.02542.
277
+ Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking, 2025. URL https://arxiv.org/abs/2501.09891.
278
+ Noam Levi. A simple model of inference scaling laws, 2024. URL https://arxiv.org/abs/2410.16377.
279
+ Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024.
280
+ Changshu Liu, Shizhuo Dylan Zhang, Ali Reza Ibrahimzada, and Reyhaneh Jabbarvand. Codemind: A framework to challenge large language models for code reasoning, 2024. URL https:// arxiv.org/abs/2402.09664.
281
+ Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey, 2025. URL https://arxiv.org/abs/2503.23077.
282
+ Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning, 2025. URL https://arxiv.org/abs/2501.12570.
283
+ Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393.
284
+
285
+ OpenAI. Learning to reason with llms, 2024a. URL https://openai.com/index/learning-to-reason-with-llms/.
286
+ OpenAI. OpenAI o1 System Card, December 2024b. URL https://cdn.openai.com/ol-system-card-20241205.pdf.
287
+ OpenAI. OpenAI o1-mini System Card, September 2024c. URL https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/.
288
+ OpenAI. OpenAI o3-mini System Card, January 2025. URL https://cdn.openai.com/o3-mini-system-card-feb10.pdf.
289
+ OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, and etc. Gpt-4 technical report, 2024. URL https://arxiv.org/abs/2303.08774.
290
+ Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. Are NLP models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2080-2094, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.168. URL https://aclanthology.org/2021.naacl-main.168.
291
+ Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, Peng Li, Wei Wei, Jing Shao, Chaochao Lu, Yue Zhang, Xian-Sheng Hua, Bowen Zhou, and Yu Cheng. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond, 2025. URL https://arxiv.org/abs/2503.21614.
292
+ Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In Kentaro Inui, Jing Jiang, Vincent Ng, and Xiaojun Wan (eds.), Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 3982-3992, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1410. URL https://aclanthology.org/D19-1410/.
293
+ Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300.
294
+ Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models, 2025. URL https://arxiv.org/abs/2503.04472.
295
+ Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314.
296
+ Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419.
297
+ Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, Soroosh Mariooryad, Yifan Ding, Xinyang Geng, and etc. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024a. URL https://arxiv.org/abs/2403.05530.
298
+ Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, and etc. Gemma 2: Improving open language models at a practical size, 2024b. URL https://arxiv.org/abs/2408.00118.
299
+ Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/.
300
+
301
+ Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/.
302
+ Yaojing Wang, Shengqiong Wu, Yuecheng Zhang, Shuicheng Yan, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey, 2025. URL https://arxiv.org/abs/2503.12605.
303
+ Yuqing Wang and Yun Zhao. Gemini in reasoning: Unveiling commonsense in multimodal large language models, 2023. URL https://arxiv.org/abs/2312.17661.
304
+ Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy, 2025. URL https://arxiv.org/abs/2404.05692.
305
+ Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613.
306
+ Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116.
307
+ Ziwei Xu, Sanjay Jain, and Mohan Kankanhalli. Hallucination is inevitable: An innate limitation of large language models, 2025. URL https://arxiv.org/abs/2401.11817.
308
+ Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387.
309
+ Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. Navigating the grey area: How expressions of uncertainty and overconfidence affect language models, 2023. URL https://arxiv.org/abs/2302.13439.
310
+
311
+ # Table of Contents for Appendix
312
+
313
+ A Detailed Experimental Setup 17
314
+
315
+ A.1 Models 17
316
+ A.2 Evaluation Metrics 17
317
+ A.3 Generation Setting 17
318
+
319
+ B Data Construction Details 17
320
+ C Prompt Template for Evaluation 18
321
+ D Examples of Model Response 19
322
+
323
+ # A Detailed Experimental Setup
324
+
325
+ # A.1 Models
326
+
327
+ We leverage a series of non-reasoning and reasoning model for our study, from both open-source and proprietary source with different training recipes. The non-reasoning models we use include Qwen2.5-32B-Instruct Team (2024), Gemma-2-27B-it Team et al. (2024b), Phi-3-medium-128k Abdin et al. (2024), GPT-4o OpenAI et al. (2024) and Gemini1.5 Team et al. (2024a). The reasoning models we use are QwQ-32B Team (2025), DeepSeek-R1-Distill-Qwen-32B DeepSeek-AI et al. (2025), S1.1 Muennighoff et al. (2025), DeepSeek-R1 DeepSeek-AI et al. (2025), GPT-o1 OpenAI (2024b), GPT-o1mini OpenAI (2024c) and GPT-o3mini OpenAI (2025).
328
+
329
+ # A.2 Evaluation Metrics
330
+
331
+ In Section 3.2, we measure response length by considering both reasoning and answer components. For open-source models, we employ model-specific tokenizers to calculate token counts, while for proprietary models, we obtain generation lengths via their APIs. To determine abstain rates, we parse responses by paragraphs (delimited by $\backslash \mathrm{n} \backslash \mathrm{n}^{\prime}$ ) and analyze the final two paragraphs as the model's conclusion. These conclusions, along with reference answers when available, are evaluated by GPT-4o to assess whether the model provides a definitive answer or abstains. For data sets with reference answers (GSM8K and MATH), GPT-4o also evaluates the correctness of the response. The prompt we use for evaluation can be found in Appendix C.
332
+
333
+ # A.3 Generation Setting
334
+
335
+ For all open-source models, we employ greedy decoding and utilize the default chat template specific to each model. We deliberately omit system prompts prior to posing questions to maintain consistency across evaluations. For proprietary models, we adhere to their default parameter configurations as provided by their respective APIs. In the case of GPT-o1mini and GPT-o3mini, we configure the 'reasoning_effort' parameter to the medium setting by default.
336
+
337
+ # B Data Construction Details
338
+
339
+ To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different qualities, including SVAMP, GSM8K, and MATH 500. In addition, we also construct a synthetic dataset, rule-based Formula, for evaluation.
340
+
341
+ MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through a combination of variables and operators, with a maximum recursion depth of three. The variable set comprises numerical values, Latin letters, and Greek symbols. The operator set includes arithmetic operators $(+, -)$ , set operators $(\cup, \supset)$ , mathematical functions $(\sin, \sqrt{\pi})$ , and construct operators $(\sum, \nabla)$ . To ensure the formulas are fundamentally unsolvable, we enforce the inclusion of at least one unassigned variable in each formula, excluding commonly recognized mathematical or physical constants such as $e^{\prime}$ , $\pi^{\prime}$ , and $g^{\prime}$ . While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables.
342
+
343
+ MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset comprising 1,000 elementary-school-level mathematical word problems, where each instance consists of a problem body and an associated question. The MiP questions can be generated by randomly permuting the problem bodies and associated questions. To maintain dataset integrity, we manually select 300 permuted questions after a thorough human evaluation to eliminate any inadvertently solvable questions that may exist. The resulting problems contain clear logical inconsistencies between their body and question components, making their unsolvability readily apparent without additional context.
344
+
345
+ MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a grade school mathematics dataset that presents more complex challenges compared to SVAMP. The questions in GSM8K typically
346
+
347
+ contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. The MiP question can be constructed by randomly removing a necessary premise from the original solvable question. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question. Subsequently, a thorough human verification is conducted to filter out those questions that are still solvable in some way and finally obtain 582 MiP questions. Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable.
348
+
349
+ MiP-MATH. For the MATH dataset (Hendrycks et al., 2021), which comprises challenging competition-level mathematical questions, it is hard to build a rule-based filtering mechanism before human evaluation. Thus, we directly read through all the questions in MATH500 and manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts.
350
+
351
+ # C Prompt Template for Evaluation
352
+
353
+ As we need LLM-as-a-judge to evaluate the open-end generations of the models in various experiments in this study, in this section we showcase the prompt template we use for each kind of evaluation.
354
+
355
+ For the evaluation of the models' answer accuracy and abstain rate, we adopt the following prompt templates designed for 'paired' and 'non-paired' data, respectively. As we observe that some models, for example Gemma-2-27B-IT, often output an additional $\backslash n\backslash n$ at the end of response, we take the last two paragraph segmented by $\backslash n\backslash n$ to avoid pasing in an empty string.
356
+
357
+ # Prompt Template for Response Evaluation of Formula and SVAMP
358
+
359
+ You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the answer explicitly says that the condition of the question is insufficient, you should return 0. If the model provides an answer that is a number or formula with variables, you should return 1. Please only return the number, no other text.
360
+
361
+ Model answer: [model_answer_short]
362
+
363
+ Figure 7: The prompt we use to evaluate the accuracy and abstain rate of the model on Formula and SVAMP. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset.
364
+
365
+ # Prompt Template for Response Evaluation of GSM8K and MATH
366
+
367
+ You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the model output says that the condition of the question is insufficient, you should return 0. Otherwise, if the model gives a clear answer and matches the reference answer, you should return 1. If the model's answer does not match the reference answer, you should return 2. Please only return the number, no other text.
368
+
369
+ Model answer: [model_answer_short]
370
+
371
+ Reference answer: [reference_answer]
372
+
373
+ Figure 8: The prompt we use to evaluate the accuracy and abstain rate of the model on GSM8K and MATH. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset.
374
+
375
+ We use the prompt template in Figure 9 to find the first paragraph that the model suspected a missing premise. We pass in the response sequentially by paragraph until the GPT-4o give a positive response. In practice we find it is not very stable, so we repeat this process for 3 times and use the median value.
376
+
377
+ # Prompt Template for Response Evaluation of GSM8K and MATH
378
+
379
+ You are a helpful assistant. You will be given a paragraph which is part of the answer to a question. You need to identify if the paragraph doubt that the answer depends on some other unspecified condition.
380
+
381
+ Paragraph: [paragraph]
382
+
383
+ Answer in one word, yes or no.
384
+
385
+ Figure 9: The prompt we use to judge if the model suspect there is a missing premise in the response paragraph. [paragraph] is the part of the model response spited by $\backslash n\backslash n$
386
+
387
+ # D Examples of Model Response
388
+
389
+ In this section, we present some examples of the model response of both non-reasoning and reasoning model on MiP data. As we can see from Figure 10 and Figure 11, the non-reasoning models soon identify the missing premise issue of the question. They either abstain from answering the question, as in Figure 10, or friendly invite the user to provide more information. However, as we can see from Figure 11 and Figure 13, reasoning models generate extremely verbose answers on these two apparently premise missing problems. What is worse, they fail to abstain to answer the question. The response in Figure 11 arrives at an absurd answer, and the model in Figure 13 generates a hallucinated answer based on its assumption rather than provided information.
390
+
391
+ # Model Response for a MiP-Formula Question from Gemini-1.5
392
+
393
+ Question: What is the value of $((\gamma)) + \Xi$
394
+
395
+ # Model Response:
396
+
397
+ The expression $((\gamma)) + \Xi$ involves two symbols that need to be defined to determine the value of the expression.
398
+
399
+ $\gamma$ is the Euler-Mascheroni constant, approximately equal to 0.5772156649. The double parentheses notation $(\gamma)$ is not a standard mathematical notation. Without further definition of this notation, we cannot evaluate this term.
400
+
401
+ $\Xi$ is the Riemann Xi function. It is related to the Riemann zeta function, but the notation $\Xi$ itself isn't a specific numerical value. The Riemann Xi function is defined as a function of a complex variable, not a constant. So $\Xi$ by itself doesn't have a specific numerical value unless a specific value is substituted for its argument.
402
+
403
+ Therefore, we cannot determine the value of $((\gamma)) + \Xi$ without knowing what $((\gamma))$ and the argument of $\Xi$ represent. The expression is incomplete without definitions for these symbols.
404
+
405
+ Final Answer: The final answer is undefined
406
+
407
+ Figure 10: An example of model response from Gemini_1.5 on MiP-Formula dataset. The model quickly identify the missing premise and abstain to answer.
408
+
409
+ # Model Response for a MiP-GSM8k Question from GPT-40
410
+
411
+ Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book?
412
+
413
+ # Model Response:
414
+
415
+ To determine the original price of the book, we generally need to know the discount or markdown percentage applied to calculate its selling price of $19.50. If Kyle bought the book for$ 19.50 and we know it was discounted, we can use the following formula to find out the original price:
416
+
417
+ $$
418
+ S e l l i n g P r i c e = \text {O r i g i n a l P r i c e} - (\text {O r i g i n a l P r i c e} \times \text {D i s c o u n t R a t e})
419
+ $$
420
+
421
+ Without additional information about discounts, promotions, or markdown rates applied, it's not possible to precisely determine the original price solely from the amount Kyle paid.
422
+
423
+ If you provide the discount percentage or rate, I can help calculate the original price for you!
424
+
425
+ Figure 11: An example of model response from GPT-4o on MiP-GSM8k dataset. The model quickly identify the missing premise and ask the user for more information.
426
+
427
+ # Model Response for a MiP-Formula Question from s1.1-32B
428
+
429
+ Question: What is the value of $((\gamma)) + \Xi$
430
+
431
+ # Model Response:
432
+
433
+ Okay, so I need to figure out the value of $((\gamma)) + \Xi$ . Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so $((\gamma))$ , and then the Greek letter Xi (Ξ). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of.
434
+
435
+ Let me start by recalling what I know about gamma. In mathematics, $\gamma$ often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is $((\gamma))$ . The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook? For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like $(\gamma)$ . Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure.
436
+
437
+ Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context? Wait, the question is just asking for the value of $((\gamma)) + \Xi$ without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants.
438
+
439
+ Let me consider if this could be a trick question. The double parentheses around gamma(( $\gamma$ ) ) maybe that's referring to the number of letters in the word "gamma" or something like that? Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So (( $\gamma$ )) could be 5. Then $\Xi$ is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe $\Xi$ represents 14? Then $5 + 14$ would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that.
440
+
441
+ In the Greek numeral system, letters are assigned values as follows:
442
+
443
+ # .67 more paragraphs here.
444
+
445
+ But just to check another angle: In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here.
446
+
447
+ Alternatively, in chemistry, $\gamma$ is used to denote surface tension, and $\Xi$ is not a standard symbol. In engineering, $\gamma$ is sometimes the shear strain. But again, not relevant.
448
+
449
+ Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context.
450
+
451
+ Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm.
452
+
453
+ Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, $\gamma$ is 3 and $\Xi$ is 60, so $3 + 60 = 63$ .
454
+
455
+ Alternatively, if it's based on their positions in the alphabet, $3 + 14 = 17$ .
456
+
457
+ # .21 more paragraphs here
458
+
459
+ Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical.
460
+
461
+ *Final Answer** $\boxed{63}$ < /think> To determine the value of $((\gamma) + \Xi)$ , we need to consider the possible meanings of the symbols $(\gamma)$ and $(\Xi)$ .
462
+
463
+ 1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter $(\gamma)$ (gamma) represents the value 3, and the letter $(\Xi)$ (Xi) represents the value 60. 2. **Double Parentheses**: The notation $((\gamma))$ might be a red herring or simply emphasize that $(\gamma)$ is being used as a numeral. The parentheses do not change the numerical value of $(\gamma)$ .
464
+
465
+ Combining these values, we add the numerical values of $(\gamma)$ and $(\Xi)$ : $-(\gamma = 3) - (\Xi = 60)$
466
+
467
+ Thus, the value of $((\gamma) + \Xi)$ is $(3 + 60 = 63)$ .
468
+
469
+ 63
470
+
471
+ Figure 12: An example of response from s1.1 model on MiP-Formula data. The model spend lots of time doing inefficient and redundant reasoning before outputting a meaningless result.
472
+
473
+ # Model Response for a MiP-GSM8k Question from DeepSeek-R1
474
+
475
+ Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book?
476
+
477
+ # Model Response:
478
+
479
+ Okay, let's see. Kyle bought a book for $19.50, and the question is asking for the original price. Hmm, wait a second. The problem mentions that it's last year's best-selling book, so maybe it was on sale or discounted? The question doesn't explicitly state that there was a discount, but it does say "last year's," which might imply that the book is now being sold at a reduced price.
480
+
481
+ But hold on, the problem doesn't give any specific information about a discount percentage or a markdown. Without that, how can we calculate the original price? Maybe there's some context or common knowledge I'm missing here. For example, sometimes best-selling books are sold at a standard discount, but that's not a universal rule.
482
+
483
+ Alternatively, maybe the question is testing understanding of something else, like sales tax or profit margins. But if it were sales tax, the original price would be less than $19.50, and the total after tax would be more. But the question is asking for the original price, implying that $19.50 is the price after some change.
484
+
485
+ Wait, perhaps the problem is incomplete? The user just says "Kyle bought last year's best-selling book for $19.50. What was the original price of the book?" If there's no additional information given, like a percentage discount or a mention that the price was reduced, then it's impossible to determine the original price.
486
+
487
+ But maybe I need to make an assumption here. Since it's last year's best-selling book, it's common for such books to be discounted when a new edition comes out or after some time. Let me think, maybe the standard discount is $25\%$ or something? But the problem doesn't specify.
488
+
489
+ Wait, perhaps the problem was part of a larger set where the discount rate was mentioned earlier, and this is a follow-up question. But as it stands, with the information given here, there's no way to calculate the original price.
490
+
491
+ Alternatively, maybe the question is a trick question, where the original price is the same as what Kyle paid. But that doesn't make sense because it specifies "last year's best-selling book," which might imply it was sold at a different price before.
492
+
493
+ Let me check the problem statement again: "Kyle bought last year's best-selling book for $19.50. What was the original price of the book?" There's no mention of a discount, a sale, or any other financial context.
494
+
495
+ If I were to approach this mathematically, let's denote the original price as P. If there was a discount, then $19.50 is equal to P minus some discount. But without knowing the discount rate, we can't set up an equation. For example, if the book was discounted by 20%, then the original price would be $19.50 / 0.80. But since we don't have that percentage, we can't proceed.
496
+
497
+ .24 more paragraphs here
498
+
499
+ Alternatively, if the answer is expected to be $19.50, but that doesn't make sense because the question is asking for the original price, implying it was different.
500
+
501
+ Alternatively, maybe the problem is in another language where "original price" refers to the price without tax, but in the US, sales tax isn't included in the price, so if \(19.50 is including tax, the original pre-tax price would be less. But again, without tax rate, can't compute.
502
+
503
+ Alternatively, if the problem is from a country where tax is included, but the question is asking for the pre-tax price, but again, no tax rate given.
504
+
505
+ This is really confusing. Since the problem is presented in English and mentions dollars, likely US context. In the US, sales tax is added at checkout, so the price Kyle paid would be the shelf price plus tax. If the question is asking for the pre-tax price, but again, no tax rate given.
506
+
507
+ Alternatively, maybe the book was on sale, and the original price is the same as the sale price, but that contradicts the wording.
508
+
509
+ Given all this, I think the most plausible answer, even though it's based on an assumption, is $26, assuming a 25% discount. Therefore, the original price was $26.<think> To determine the original price of the book, we need to consider that Kyle likely purchased it at a discounted price since it's "last year's best-selling book." A common assumption for such scenarios is a **25% discount**.
510
+
511
+ Let $P$ be the original price. If the discounted price is
512
+
513
+ $19.50, this represents 75% of the original price (100% - 25% discount):
514
+
515
+ $[0.75P = 19.50]$
516
+
517
+ Solving for $P$ :
518
+
519
+ [ P = \frac{19.50}{0.75} = 26 ]
520
+
521
+ *Original price of the book:**
522
+
523
+ 26
524
+
525
+ Figure 13: An example of model response from DeepSeek-R1 on MiP-GSM8k dataset. After thinking for a long time, the model hallucinates an answer based on its assumption of discount rate.
data/2025/2504_06xxx/2504.06514/images/21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg ADDED

Git LFS Details

  • SHA256: ef0111cfd39d40d7310dd996d46a946742008be13311368d05275944805d0699
  • Pointer size: 130 Bytes
  • Size of remote file: 27.1 kB
data/2025/2504_06xxx/2504.06514/images/21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg ADDED

Git LFS Details

  • SHA256: 0dce2e709b325209eb1630986d3420f8b562ebe730d113e5e85331c1235e68ae
  • Pointer size: 129 Bytes
  • Size of remote file: 3.6 kB
data/2025/2504_06xxx/2504.06514/images/2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg ADDED

Git LFS Details

  • SHA256: 9cb56c7733e9ab73112c0574ce8a565d72c33dbdd4551885367c243f541a05e7
  • Pointer size: 130 Bytes
  • Size of remote file: 92.2 kB
data/2025/2504_06xxx/2504.06514/images/286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg ADDED

Git LFS Details

  • SHA256: a0968e8daf516c8c364cf2079ebb52e346181a20052ebe2f9d669bf0befd934b
  • Pointer size: 130 Bytes
  • Size of remote file: 32.5 kB
data/2025/2504_06xxx/2504.06514/images/4c8dc6f4d9833d3fce23421c24b1b26bab79aa993e32376b4415fd95a18165cd.jpg ADDED

Git LFS Details

  • SHA256: de9af4f012879d69e06efbe9538acdf220266eb81998aa2ac9fb37c99cd0525d
  • Pointer size: 131 Bytes
  • Size of remote file: 140 kB
data/2025/2504_06xxx/2504.06514/images/4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg ADDED

Git LFS Details

  • SHA256: 637bc1e5d87ec3c803f5d59f0704be5e01cbe2a231f60748d995a722b82d0a3d
  • Pointer size: 130 Bytes
  • Size of remote file: 69.2 kB
data/2025/2504_06xxx/2504.06514/images/51c9a424f50294ee98114b60767685513a66cab43692e9df1b0ee1cb5f9e1cfa.jpg ADDED

Git LFS Details

  • SHA256: cd4f8a61a8b70914aa0dc0898b9fad4515c09b74219d856e7a9335f58c395201
  • Pointer size: 129 Bytes
  • Size of remote file: 5.07 kB
data/2025/2504_06xxx/2504.06514/images/55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg ADDED

Git LFS Details

  • SHA256: e3a213f255911f9187d84bdcd928c8deb928390538bc2e06e98d59ff3e79a96a
  • Pointer size: 129 Bytes
  • Size of remote file: 5.59 kB
data/2025/2504_06xxx/2504.06514/images/796d4f8cbc0422b1cedf1c7ed35fab8608e8e67607cb9103181f709b8ed74a62.jpg ADDED

Git LFS Details

  • SHA256: 25cc0a6337105bfb2a27bef4e9d860da9a5145830ed88e1c41a9310fcd007f64
  • Pointer size: 131 Bytes
  • Size of remote file: 102 kB
data/2025/2504_06xxx/2504.06514/images/83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg ADDED

Git LFS Details

  • SHA256: a201be46c0be715291eb470ad61388da2575ae71121241cfe1a967dcbe64a6f0
  • Pointer size: 130 Bytes
  • Size of remote file: 58.2 kB
data/2025/2504_06xxx/2504.06514/images/b0eb5bd600feb6c67c46af1fd396f708d068f29cdc078da243ad8fd54b7c58d6.jpg ADDED

Git LFS Details

  • SHA256: 2e9031164c9a37a60231d85c09eb35f1ba4e168743e291dcfc37f2ab12bc293e
  • Pointer size: 130 Bytes
  • Size of remote file: 28.2 kB
data/2025/2504_06xxx/2504.06514/images/c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg ADDED

Git LFS Details

  • SHA256: e354632cf9cc3745b86fb71ca5a1ee183d95b69e6e71a4fc2ce428998c5e4382
  • Pointer size: 129 Bytes
  • Size of remote file: 4.24 kB
data/2025/2504_06xxx/2504.06514/images/c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg ADDED

Git LFS Details

  • SHA256: 26f849d217ed6cf42c8eea7ceb95dae9118eec7a455db394298cfb9a50485abf
  • Pointer size: 130 Bytes
  • Size of remote file: 13.4 kB
data/2025/2504_06xxx/2504.06514/images/ca23867f2a2badba334cfe6d466f5086f7a82b711c906c7a137af0a8fbd5004a.jpg ADDED

Git LFS Details

  • SHA256: 7574e80f0c4521b316b27e151da7f3c3d75b824b21bcda19a227a9fd6dd88098
  • Pointer size: 131 Bytes
  • Size of remote file: 119 kB
data/2025/2504_06xxx/2504.06514/images/e125f721d46e8e08999101331132b083a61090681b41dae80678cdac1c749666.jpg ADDED

Git LFS Details

  • SHA256: b825f02315e171df6989a8e1078a6417be055057ea710f0178ad5cd5a8cc75e2
  • Pointer size: 129 Bytes
  • Size of remote file: 7.05 kB
data/2025/2504_06xxx/2504.06514/images/f9c2d6eedd30938033417f18a2a7b5d37772df01e377e0ab20043c31a8b9b5f1.jpg ADDED

Git LFS Details

  • SHA256: eae1f19af1c5af7f6456a0f2f83472c1e7c783f143824d8bb0e33e31feebbd3e
  • Pointer size: 129 Bytes
  • Size of remote file: 6.57 kB
data/2025/2504_06xxx/2504.06514/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_content_list.json ADDED
@@ -0,0 +1,1180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 153,
8
+ 101,
9
+ 843,
10
+ 151
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Lingzhe Zhang† \nPeking University \nBeijing, China \nzhang.lingzhe@stu.pku.edu.cn",
17
+ "bbox": [
18
+ 127,
19
+ 162,
20
+ 334,
21
+ 224
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Yunpeng Zhai† \nAlibaba Group \nBeijing, China \nzhaiyunpeng.zyp@alibaba-inc.com",
28
+ "bbox": [
29
+ 380,
30
+ 162,
31
+ 617,
32
+ 226
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Tong Jia* \nPeking University \nBeijing, China \njia.tong@oku.edu.cn",
39
+ "bbox": [
40
+ 694,
41
+ 164,
42
+ 836,
43
+ 226
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Xiaosong Huang \nPeking University \nBeijing, China \nhxs@stu.pku.edu.cn",
50
+ "bbox": [
51
+ 161,
52
+ 239,
53
+ 299,
54
+ 300
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Chiming Duan \nPeking University \nBeijing, China \nduanchiming@stu.pku.edu.cn",
61
+ "bbox": [
62
+ 397,
63
+ 238,
64
+ 599,
65
+ 301
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Ying Li* \nPeking University \nBeijing, China \nli.ying@pku.edu.cn",
72
+ "bbox": [
73
+ 700,
74
+ 238,
75
+ 833,
76
+ 301
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "ABSTRACT",
83
+ "text_level": 1,
84
+ "bbox": [
85
+ 83,
86
+ 309,
87
+ 183,
88
+ 321
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Distributed databases are critical infrastructures for today's large-scale software systems, making effective failure management essential to ensure software availability. However, existing approaches often overlook the role distinctions within distributed databases and rely on small-scale models with limited generalization capabilities. In this paper, we conduct a preliminary empirical study to emphasize the unique significance of different roles. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. AgentFM addresses failure management by considering system roles, data roles, and task roles, with a meta-agent orchestrating these components. Preliminary evaluations using Apache IoTDB demonstrate the effectiveness of AgentFM and open new directions for further research.",
95
+ "bbox": [
96
+ 81,
97
+ 329,
98
+ 483,
99
+ 521
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "CCS CONCEPTS",
106
+ "text_level": 1,
107
+ "bbox": [
108
+ 83,
109
+ 536,
110
+ 220,
111
+ 550
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "- Software and its engineering $\\rightarrow$ Maintaining software.",
118
+ "bbox": [
119
+ 83,
120
+ 555,
121
+ 455,
122
+ 569
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "KEYWORDS",
129
+ "text_level": 1,
130
+ "bbox": [
131
+ 83,
132
+ 583,
133
+ 189,
134
+ 597
135
+ ],
136
+ "page_idx": 0
137
+ },
138
+ {
139
+ "type": "text",
140
+ "text": "Failure Management, Distributed Databases, Multi Agents",
141
+ "bbox": [
142
+ 83,
143
+ 602,
144
+ 434,
145
+ 616
146
+ ],
147
+ "page_idx": 0
148
+ },
149
+ {
150
+ "type": "text",
151
+ "text": "ACM Reference Format:",
152
+ "text_level": 1,
153
+ "bbox": [
154
+ 83,
155
+ 625,
156
+ 230,
157
+ 637
158
+ ],
159
+ "page_idx": 0
160
+ },
161
+ {
162
+ "type": "text",
163
+ "text": "Lingzhe Zhang†, Yunpeng Zhai†, Tong Jia*, Xiaosong Huang, Chiming Duan, and Ying Li*. 2025. AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents. In 33rd ACM International Conference on the Foundations of Software Engineering (FSE '25), June 23–28, 2025, Trondheim, Norway. ACM, New York, NY, USA, 5 pages. https://doi.org/10.1145/3696630.3728492",
164
+ "bbox": [
165
+ 81,
166
+ 638,
167
+ 482,
168
+ 714
169
+ ],
170
+ "page_idx": 0
171
+ },
172
+ {
173
+ "type": "list",
174
+ "sub_type": "text",
175
+ "list_items": [
176
+ "$\\dagger$ Equal contribution.",
177
+ "*Corresponding author."
178
+ ],
179
+ "bbox": [
180
+ 83,
181
+ 734,
182
+ 197,
183
+ 758
184
+ ],
185
+ "page_idx": 0
186
+ },
187
+ {
188
+ "type": "text",
189
+ "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.",
190
+ "bbox": [
191
+ 81,
192
+ 780,
193
+ 482,
194
+ 852
195
+ ],
196
+ "page_idx": 0
197
+ },
198
+ {
199
+ "type": "text",
200
+ "text": "FSE '25, June 23-28, 2025, Trondheim, Norway",
201
+ "bbox": [
202
+ 83,
203
+ 852,
204
+ 299,
205
+ 863
206
+ ],
207
+ "page_idx": 0
208
+ },
209
+ {
210
+ "type": "text",
211
+ "text": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM.",
212
+ "bbox": [
213
+ 83,
214
+ 864,
215
+ 472,
216
+ 875
217
+ ],
218
+ "page_idx": 0
219
+ },
220
+ {
221
+ "type": "text",
222
+ "text": "ACM ISBN 979-8-4007-1276-0/25/06...$15.00",
223
+ "bbox": [
224
+ 83,
225
+ 875,
226
+ 294,
227
+ 883
228
+ ],
229
+ "page_idx": 0
230
+ },
231
+ {
232
+ "type": "text",
233
+ "text": "https://doi.org/10.1145/3696630.3728492",
234
+ "bbox": [
235
+ 83,
236
+ 883,
237
+ 272,
238
+ 895
239
+ ],
240
+ "page_idx": 0
241
+ },
242
+ {
243
+ "type": "text",
244
+ "text": "1 INTRODUCTION",
245
+ "text_level": 1,
246
+ "bbox": [
247
+ 514,
248
+ 309,
249
+ 687,
250
+ 323
251
+ ],
252
+ "page_idx": 0
253
+ },
254
+ {
255
+ "type": "text",
256
+ "text": "The distributed databases, such as Google Spanner [2], Alibaba OceanBase [16], TiDB [5], and Apache IoTDB [14], have become integral components of cloud infrastructures, handling vast volumes of data [7, 25].",
257
+ "bbox": [
258
+ 513,
259
+ 328,
260
+ 913,
261
+ 383
262
+ ],
263
+ "page_idx": 0
264
+ },
265
+ {
266
+ "type": "text",
267
+ "text": "However, these systems frequently encounter anomalies such as system failures and performance degradation, leading to significant financial losses. For example, Alibaba Cloud faces Intermittent Slow Queries (iSQs) [11], leading to billions of dollars in annual losses. Amazon reports that even a 0.1-second delay in loading caused by database anomalies can lead to a $1\\%$ increase in financial losses [19]. Therefore, it is crucial to detect system failures in real time, analyze the root causes of these failures, and automatically remediate them.",
268
+ "bbox": [
269
+ 513,
270
+ 383,
271
+ 913,
272
+ 494
273
+ ],
274
+ "page_idx": 0
275
+ },
276
+ {
277
+ "type": "text",
278
+ "text": "System traces, metrics, and logs capture the states and critical events of active processes, making them essential for managing software failures. These data sources provide insights into both normal operations and deviations signaling potential failures. Leveraging their multimodal nature, recent research has enhanced anomaly detection and diagnosis in complex systems [6, 8-10, 17, 20-24, 26, 29, 30]. For example, Eadro [8] integrates anomaly detection and root cause localization using multi-source data, while AnoFusion [26] employs unsupervised multimodal failure detection in microservices. MSTGAD [6] combines all three data types with attentive multimodal learning for graph-based anomaly detection. Studies also explore two-source combinations, such as DeepTraLog [17], which pairs traces with logs using a GGNN-based model, and SCWarn [29], which identifies anomalies via heterogeneous metrics and logs. Although the effectiveness of these methods has been demonstrated in specific scenarios, they face several practical challenges when applied to distributed databases:",
279
+ "bbox": [
280
+ 511,
281
+ 494,
282
+ 913,
283
+ 729
284
+ ],
285
+ "page_idx": 0
286
+ },
287
+ {
288
+ "type": "list",
289
+ "sub_type": "text",
290
+ "list_items": [
291
+ "- Role Awareness. In distributed databases, nodes play different roles, each with varying levels of importance. Effective failure management often requires coordination and collaboration among multiple nodes. However, current methods completely overlook these roles, leading to suboptimal fault detection, inaccurate root cause diagnosis, and an inability to provide critical repair strategies.",
292
+ "- Application Limitations. Existing failure management models lack generalizability. For instance, these models are typically trained on specific systems and encounter concept drift when applied to new systems. Moreover, since these"
293
+ ],
294
+ "bbox": [
295
+ 540,
296
+ 742,
297
+ 913,
298
+ 896
299
+ ],
300
+ "page_idx": 0
301
+ },
302
+ {
303
+ "type": "aside_text",
304
+ "text": "arXiv:2504.06614v1 [cs.SE] 9 Apr 2025",
305
+ "bbox": [
306
+ 22,
307
+ 275,
308
+ 58,
309
+ 700
310
+ ],
311
+ "page_idx": 0
312
+ },
313
+ {
314
+ "type": "text",
315
+ "text": "methods often frame the task as a classification problem, the resulting outputs lack interpretability, which is crucial for Operations and Control Engineers (OCEs) to effectively resolve issues.",
316
+ "bbox": [
317
+ 124,
318
+ 107,
319
+ 480,
320
+ 161
321
+ ],
322
+ "page_idx": 1
323
+ },
324
+ {
325
+ "type": "text",
326
+ "text": "To address the first challenge, we conduct a detailed analysis of the various roles within a distributed database and the entire failure management process. We ultimately identify three distinct role categories: system roles, data roles, and task roles. System roles represent the various roles inherent in the distributed database itself (e.g., leader nodes, follower nodes). Data roles refer to the different data sources involved in failure management, while task roles represent the various tasks that need to be executed during failure management.",
327
+ "bbox": [
328
+ 81,
329
+ 166,
330
+ 480,
331
+ 290
332
+ ],
333
+ "page_idx": 1
334
+ },
335
+ {
336
+ "type": "text",
337
+ "text": "To tackle the second challenge, we adopt a large language model (LLM)-based approach. While many existing LLM-based failure management solutions have been proposed, a significant number of them do not utilize the three types of data sources mentioned above [1, 3, 12, 18, 28]. Moreover, some approaches that do incorporate these data sources fail to account for the role-based structure within distributed databases [4, 13, 27]. Therefore, we propose a role-aware, LLM-driven multi-agent approach that integrates the characteristics of these roles and the three data sources.",
338
+ "bbox": [
339
+ 81,
340
+ 291,
341
+ 480,
342
+ 414
343
+ ],
344
+ "page_idx": 1
345
+ },
346
+ {
347
+ "type": "text",
348
+ "text": "Building on these insights, we introduce AgentFM, a comprehensive role-aware failure management framework for distributed databases, powered by LLM-driven multi-agent systems. AgentFM integrates the unique characteristics of distributed database roles with the rich multimodal data sources typically encountered in failure management, such as system traces, metrics, and logs. By employing a multi-agent architecture, AgentFM facilitates specialized agents for each role—system roles, data roles, and task roles—ensuring a more nuanced and effective approach to failure detection, diagnosis, and resolution.",
349
+ "bbox": [
350
+ 81,
351
+ 416,
352
+ 480,
353
+ 553
354
+ ],
355
+ "page_idx": 1
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "We conduct preliminary experiments on Apache IoTDB [14], a distributed time-series database system, to assess the effectiveness of AgentFM in failure detection and root cause analysis. Furthermore, we manually verified the accuracy and validity of the generated mitigation solution.",
360
+ "bbox": [
361
+ 81,
362
+ 554,
363
+ 480,
364
+ 622
365
+ ],
366
+ "page_idx": 1
367
+ },
368
+ {
369
+ "type": "text",
370
+ "text": "Contributions. The contributions of this paper are threefold. First, we conduct a preliminary empirical study highlighting the varying significance of different roles in failure management. Second, we propose AgentFM, a role-aware failure management framework for distributed databases with LLM-driven multi-agents. Third, we provide a preliminary evaluation of AgentFM on Apache IoTDB, demonstrating its feasibility and effectiveness.",
371
+ "bbox": [
372
+ 81,
373
+ 623,
374
+ 480,
375
+ 719
376
+ ],
377
+ "page_idx": 1
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "2 PRELIMINARY EMPIRCAL STUDY",
382
+ "text_level": 1,
383
+ "bbox": [
384
+ 83,
385
+ 734,
386
+ 398,
387
+ 748
388
+ ],
389
+ "page_idx": 1
390
+ },
391
+ {
392
+ "type": "text",
393
+ "text": "In this section, we conduct a preliminary empirical study using Apache IoTDB [14], focusing on the significance of different roles in the failure management process.",
394
+ "bbox": [
395
+ 81,
396
+ 752,
397
+ 480,
398
+ 794
399
+ ],
400
+ "page_idx": 1
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "2.1 System Roles",
405
+ "text_level": 1,
406
+ "bbox": [
407
+ 83,
408
+ 808,
409
+ 235,
410
+ 823
411
+ ],
412
+ "page_idx": 1
413
+ },
414
+ {
415
+ "type": "text",
416
+ "text": "We manually injected anomalies (excessive data export) into Apache IoTDB during runtime and evaluate the anomaly detection performance (precision, recall and f1-score) for each node using PLELog [15], which is a state-of-the-art log-based anomaly detection method.",
417
+ "bbox": [
418
+ 81,
419
+ 825,
420
+ 480,
421
+ 895
422
+ ],
423
+ "page_idx": 1
424
+ },
425
+ {
426
+ "type": "table",
427
+ "img_path": "images/1eaf2908e2b5e1be5916fea854d4db8f8c6eed97d6b2241d3eb2755794a30451.jpg",
428
+ "table_caption": [
429
+ "Table 1: Anomaly Detection Results for Each Node"
430
+ ],
431
+ "table_footnote": [],
432
+ "table_body": "<table><tr><td></td><td>Node1</td><td>Node2</td><td>Node3</td><td>Node4</td><td>Node5</td><td>Node6</td></tr><tr><td>p</td><td>39.68%</td><td>34.35%</td><td>59.64%</td><td>31.17%</td><td>77.60%</td><td>89.42%</td></tr><tr><td>r</td><td>99.01%</td><td>100.00%</td><td>98.02%</td><td>100.00%</td><td>96.04%</td><td>92.08%</td></tr><tr><td>f1</td><td>56.66%</td><td>51.14%</td><td>74.16%</td><td>47.53%</td><td>85.84%</td><td>90.73%</td></tr></table>",
433
+ "bbox": [
434
+ 522,
435
+ 131,
436
+ 903,
437
+ 202
438
+ ],
439
+ "page_idx": 1
440
+ },
441
+ {
442
+ "type": "text",
443
+ "text": "As shown in Table 1, $\\text{Node}_6$ achieves the best anomaly detection performance due to hosting the largest number of leader partitions. In contrast, the detection performance on other nodes is relatively suboptimal. This observation underscores the differing significance of various system roles.",
444
+ "bbox": [
445
+ 511,
446
+ 220,
447
+ 911,
448
+ 290
449
+ ],
450
+ "page_idx": 1
451
+ },
452
+ {
453
+ "type": "text",
454
+ "text": "2.2 Data Roles",
455
+ "text_level": 1,
456
+ "bbox": [
457
+ 514,
458
+ 301,
459
+ 645,
460
+ 315
461
+ ],
462
+ "page_idx": 1
463
+ },
464
+ {
465
+ "type": "text",
466
+ "text": "We further conduct anomaly diagnosis classification experiments on Apache IoTDB using both metrics data and log data. To ensure fairness, we do not adopt state-of-the-art methods; instead, we implement a simple classification algorithm based on the Dynamic Time Warping (DTW) algorithm.",
467
+ "bbox": [
468
+ 511,
469
+ 320,
470
+ 911,
471
+ 388
472
+ ],
473
+ "page_idx": 1
474
+ },
475
+ {
476
+ "type": "table",
477
+ "img_path": "images/4ab02666500b97395a6c8de3ce496b691bb00849e2af93ec40212bc1d38518a3.jpg",
478
+ "table_caption": [
479
+ "Table 2: Anomalies that can be Classified by Metrics and Logs"
480
+ ],
481
+ "table_footnote": [],
482
+ "table_body": "<table><tr><td>Source</td><td>CPU</td><td>Memory</td><td>Export</td><td>Import</td><td>Configuration</td></tr><tr><td>Metrics</td><td>✓</td><td>✓</td><td></td><td></td><td></td></tr><tr><td>Logs</td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr></table>",
483
+ "bbox": [
484
+ 517,
485
+ 430,
486
+ 911,
487
+ 491
488
+ ],
489
+ "page_idx": 1
490
+ },
491
+ {
492
+ "type": "text",
493
+ "text": "As shown in Table 2, anomalies with an F1-score above $50\\%$ are considered identifiable by the corresponding data type. The results reveal that metrics are particularly effective at detecting resource anomalies, such as CPU and memory saturation, while logs excel at identifying internal database issues, including excessive data export/import and configuration errors. This observation highlights the distinct significance of different data roles.",
494
+ "bbox": [
495
+ 511,
496
+ 506,
497
+ 911,
498
+ 602
499
+ ],
500
+ "page_idx": 1
501
+ },
502
+ {
503
+ "type": "text",
504
+ "text": "3 METHODOLOGY",
505
+ "text_level": 1,
506
+ "bbox": [
507
+ 514,
508
+ 614,
509
+ 686,
510
+ 628
511
+ ],
512
+ "page_idx": 1
513
+ },
514
+ {
515
+ "type": "text",
516
+ "text": "Our preliminary empirical study highlights that the effectiveness of failure management varies significantly across different roles in distributed databases. Consequently, it becomes crucial to assign varying levels of importance to different roles at different times and adopt role-specific operations to optimize outcomes.",
517
+ "bbox": [
518
+ 511,
519
+ 632,
520
+ 911,
521
+ 700
522
+ ],
523
+ "page_idx": 1
524
+ },
525
+ {
526
+ "type": "text",
527
+ "text": "In this section, we introduce AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Figure 1 illustrates the architecture of AgentFM, which comprises three types of agents corresponding to distinct roles: system agents, data agents, and task agents. System agents represent the various roles of nodes within the distributed database, data agents handle information from diverse data sources, and task agents focus on executing specific failure management tasks. Notably, since system agents and data agents operate independently on each node, they are collectively referred to as standalone agents. Additionally, the framework incorporates a meta-agent, which is responsible for orchestrating, adapting, aggregating, and ultimately producing the final results of the agents' operations, ensuring cohesive and efficient failure management.",
528
+ "bbox": [
529
+ 511,
530
+ 702,
531
+ 913,
532
+ 896
533
+ ],
534
+ "page_idx": 1
535
+ },
536
+ {
537
+ "type": "header",
538
+ "text": "FSE '25, June 23-28, 2025, Trondheim, Norway",
539
+ "bbox": [
540
+ 84,
541
+ 75,
542
+ 303,
543
+ 85
544
+ ],
545
+ "page_idx": 1
546
+ },
547
+ {
548
+ "type": "header",
549
+ "text": "Lingzhe Zhang et al.",
550
+ "bbox": [
551
+ 812,
552
+ 75,
553
+ 911,
554
+ 85
555
+ ],
556
+ "page_idx": 1
557
+ },
558
+ {
559
+ "type": "image",
560
+ "img_path": "images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg",
561
+ "image_caption": [
562
+ "Figure 1: AgentFM Architecture"
563
+ ],
564
+ "image_footnote": [],
565
+ "bbox": [
566
+ 84,
567
+ 104,
568
+ 911,
569
+ 402
570
+ ],
571
+ "page_idx": 2
572
+ },
573
+ {
574
+ "type": "text",
575
+ "text": "The framework operates by first extracting critical information from different system agents using the corresponding data agents. This information is then transmitted to a centralized meta-agent for unified decision-making. The decision-making process employs trace data as a cohesive framework to integrate key insights across nodes. Finally, the appropriate task agents execute the required failure management operations, completing the process efficiently and effectively.",
576
+ "bbox": [
577
+ 81,
578
+ 450,
579
+ 482,
580
+ 561
581
+ ],
582
+ "page_idx": 2
583
+ },
584
+ {
585
+ "type": "text",
586
+ "text": "3.1 System Agents",
587
+ "text_level": 1,
588
+ "bbox": [
589
+ 83,
590
+ 578,
591
+ 246,
592
+ 594
593
+ ],
594
+ "page_idx": 2
595
+ },
596
+ {
597
+ "type": "text",
598
+ "text": "Different distributed databases assign dynamic system roles that may change during runtime (e.g., leader re-election after node failure). To handle this, we designed an adaptive mechanism for system agents within the Meta Agent.",
599
+ "bbox": [
600
+ 81,
601
+ 595,
602
+ 482,
603
+ 651
604
+ ],
605
+ "page_idx": 2
606
+ },
607
+ {
608
+ "type": "text",
609
+ "text": "As illustrated in Figure 2, the core component is the System Role Manager, which initializes by analyzing configuration files and system docs to identify each node's role and importance. At runtime, it periodically queries the system state and updates roles accordingly.",
610
+ "bbox": [
611
+ 81,
612
+ 652,
613
+ 482,
614
+ 720
615
+ ],
616
+ "page_idx": 2
617
+ },
618
+ {
619
+ "type": "text",
620
+ "text": "Based on this process, each node instantiates a corresponding system agent—such as a Config Agent, Coordinator Agent, or Storage Agent—each embedding multiple data agents to handle specialized tasks.",
621
+ "bbox": [
622
+ 81,
623
+ 720,
624
+ 482,
625
+ 775
626
+ ],
627
+ "page_idx": 2
628
+ },
629
+ {
630
+ "type": "text",
631
+ "text": "3.2 Data Agents",
632
+ "text_level": 1,
633
+ "bbox": [
634
+ 83,
635
+ 794,
636
+ 227,
637
+ 809
638
+ ],
639
+ "page_idx": 2
640
+ },
641
+ {
642
+ "type": "text",
643
+ "text": "In this paper, we define two types of data agents: the Metric Agent and the Log Agent. Each adopts a distinct approach to extract key information from the raw metrics data and log data, respectively.",
644
+ "bbox": [
645
+ 81,
646
+ 811,
647
+ 480,
648
+ 854
649
+ ],
650
+ "page_idx": 2
651
+ },
652
+ {
653
+ "type": "text",
654
+ "text": "Metric Agent: The overall workflow of the Metric Agent begins with simple data preprocessing, which includes noise removal and imputation of missing values. After preprocessing, the multivariate",
655
+ "bbox": [
656
+ 81,
657
+ 854,
658
+ 480,
659
+ 896
660
+ ],
661
+ "page_idx": 2
662
+ },
663
+ {
664
+ "type": "image",
665
+ "img_path": "images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg",
666
+ "image_caption": [
667
+ "Figure 2: System Agents Adaptation Workflow"
668
+ ],
669
+ "image_footnote": [],
670
+ "bbox": [
671
+ 516,
672
+ 444,
673
+ 915,
674
+ 664
675
+ ],
676
+ "page_idx": 2
677
+ },
678
+ {
679
+ "type": "text",
680
+ "text": "time-series data is converted into natural language descriptions using a large model. The generated natural language descriptions capture key aspects of the data, including time intervals, fluctuation trends, and anomaly points.",
681
+ "bbox": [
682
+ 511,
683
+ 710,
684
+ 913,
685
+ 767
686
+ ],
687
+ "page_idx": 2
688
+ },
689
+ {
690
+ "type": "text",
691
+ "text": "Formally, let $\\mathbf{M} = \\{m_1, m_2, \\dots, m_n\\}$ represent the raw multidimensional metrics data, where each $m_i$ corresponds to a time-series for a specific metric. The preprocessing step can be represented as Equation 1, where $\\mathbf{M}_{\\mathrm{p}}$ is the denoised and imputed version of $M$ .",
692
+ "bbox": [
693
+ 511,
694
+ 767,
695
+ 913,
696
+ 835
697
+ ],
698
+ "page_idx": 2
699
+ },
700
+ {
701
+ "type": "equation",
702
+ "text": "\n$$\n\\mathbf {M} _ {\\mathrm {p}} = \\text {P r e p r o c e s s} (\\mathbf {M}) = \\left\\{m _ {1} ^ {\\prime}, m _ {2} ^ {\\prime}, \\dots , m _ {n} ^ {\\prime} \\right\\} \\tag {1}\n$$\n",
703
+ "text_format": "latex",
704
+ "bbox": [
705
+ 591,
706
+ 849,
707
+ 911,
708
+ 866
709
+ ],
710
+ "page_idx": 2
711
+ },
712
+ {
713
+ "type": "text",
714
+ "text": "Next, the processed data is converted into natural language descriptions using a large language model $\\mathcal{L}$ , as shown in Equation 2,",
715
+ "bbox": [
716
+ 513,
717
+ 867,
718
+ 913,
719
+ 896
720
+ ],
721
+ "page_idx": 2
722
+ },
723
+ {
724
+ "type": "header",
725
+ "text": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents",
726
+ "bbox": [
727
+ 83,
728
+ 75,
729
+ 565,
730
+ 87
731
+ ],
732
+ "page_idx": 2
733
+ },
734
+ {
735
+ "type": "header",
736
+ "text": "FSE '25, June 23-28, 2025, Trondheim, Norway",
737
+ "bbox": [
738
+ 691,
739
+ 75,
740
+ 913,
741
+ 87
742
+ ],
743
+ "page_idx": 2
744
+ },
745
+ {
746
+ "type": "text",
747
+ "text": "where $\\mathbf{D}_{\\mathrm{nl}}$ represents the resulting natural language description, which includes information on time intervals, trends, and identified anomalies in the data.",
748
+ "bbox": [
749
+ 81,
750
+ 106,
751
+ 480,
752
+ 148
753
+ ],
754
+ "page_idx": 3
755
+ },
756
+ {
757
+ "type": "equation",
758
+ "text": "\n$$\n\\mathbf {D} _ {\\mathrm {n l}} = \\mathcal {L} \\left(\\mathbf {M} _ {\\mathrm {p}}\\right) \\tag {2}\n$$\n",
759
+ "text_format": "latex",
760
+ "bbox": [
761
+ 236,
762
+ 162,
763
+ 480,
764
+ 176
765
+ ],
766
+ "page_idx": 3
767
+ },
768
+ {
769
+ "type": "text",
770
+ "text": "Log Agent: Unlike metrics data, log data is written by developers to monitor the internal state of the system, inherently containing rich semantic information. However, log data often includes substantial redundant information, such as repetitive log entries and messages unrelated to system anomalies.",
771
+ "bbox": [
772
+ 81,
773
+ 179,
774
+ 480,
775
+ 247
776
+ ],
777
+ "page_idx": 3
778
+ },
779
+ {
780
+ "type": "text",
781
+ "text": "To address this, the Log Agent comprises two main components: sequence compression and semantic compression. Sequence compression focuses on reducing the raw log sequence by consolidating repetitive log patterns, while semantic compression extracts key operational information from the logs.",
782
+ "bbox": [
783
+ 81,
784
+ 247,
785
+ 480,
786
+ 316
787
+ ],
788
+ "page_idx": 3
789
+ },
790
+ {
791
+ "type": "text",
792
+ "text": "Sequence Compression: This component utilizes a log-parsing algorithm to transform each log entry into a distinct event template consisting of a static constant part and variable parameters. It then merges consecutive occurrences of identical event templates. Formally, for a given raw log sequence $L = \\{l_1, l_2, \\dots, l_N\\}$ , where $l_n$ represents an individual log entry, the parsing process transforms $L$ into $L_e = \\{e_1, e_2, \\dots, e_N\\}$ , where $e_i$ represents an event template. Consecutive identical events are merged, yielding $G' = \\{e_1', e_2', \\dots, e_C'\\}$ , where $C \\ll N$ and $e_i' = e_i \\times c$ represents the event $e_i$ with a count $c$ .",
793
+ "bbox": [
794
+ 81,
795
+ 316,
796
+ 480,
797
+ 455
798
+ ],
799
+ "page_idx": 3
800
+ },
801
+ {
802
+ "type": "text",
803
+ "text": "Semantic Compression: This component employs a prompt-based method to convert lengthy log sequences into descriptions of the database's ongoing operations. Formally, given a sequence of raw log entries $L = \\{l_1, l_2, \\dots, l_N\\}$ , the LLM-based summarization transforms and compresses $L$ into a smaller set of key operational elements $O = \\{o_1, o_2, \\dots, o_M\\}$ , where $M \\ll N$ .",
804
+ "bbox": [
805
+ 81,
806
+ 455,
807
+ 483,
808
+ 539
809
+ ],
810
+ "page_idx": 3
811
+ },
812
+ {
813
+ "type": "text",
814
+ "text": "3.3 Task Agents",
815
+ "text_level": 1,
816
+ "bbox": [
817
+ 83,
818
+ 550,
819
+ 228,
820
+ 565
821
+ ],
822
+ "page_idx": 3
823
+ },
824
+ {
825
+ "type": "text",
826
+ "text": "After system agents collect necessary data via data agents, the meta agent coordinates task agents to carry out failure management using trace data. This process involves three agent types—detection, diagnosis, and mitigation—which operate sequentially. Detection agents identify anomalies in each time window; upon detection, diagnosis agents locate and classify the issue; finally, mitigation agents propose solutions based on the diagnosis.",
827
+ "bbox": [
828
+ 81,
829
+ 568,
830
+ 480,
831
+ 665
832
+ ],
833
+ "page_idx": 3
834
+ },
835
+ {
836
+ "type": "text",
837
+ "text": "Though prompts vary by task, all agents follow a common RAG+CoT approach, using historical data as labeled examples—normal/abnormal for detection, failure types for diagnosis—to guide reasoning during querying.",
838
+ "bbox": [
839
+ 81,
840
+ 666,
841
+ 483,
842
+ 722
843
+ ],
844
+ "page_idx": 3
845
+ },
846
+ {
847
+ "type": "text",
848
+ "text": "4 PRELIMINARY EVALUATION",
849
+ "text_level": 1,
850
+ "bbox": [
851
+ 83,
852
+ 732,
853
+ 357,
854
+ 747
855
+ ],
856
+ "page_idx": 3
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "4.1 Design",
861
+ "text_level": 1,
862
+ "bbox": [
863
+ 83,
864
+ 752,
865
+ 184,
866
+ 767
867
+ ],
868
+ "page_idx": 3
869
+ },
870
+ {
871
+ "type": "text",
872
+ "text": "To evaluate AgentFM, we assess its feasibility and effectiveness in Apache IoTDB. We manually injected 10 types of anomalies, including CPU saturation, IO saturation, memory saturation, network delay increase, network bandwidth limitation, network partition occurrence, workload spikes, accompanying slow queries, excessive data export, and excessive data import. Each anomaly type is injected 20 times.",
873
+ "bbox": [
874
+ 81,
875
+ 770,
876
+ 480,
877
+ 866
878
+ ],
879
+ "page_idx": 3
880
+ },
881
+ {
882
+ "type": "text",
883
+ "text": "The evaluation is conducted based on Qwen2.5-72b to assess the results of anomaly detection and diagnosis. The performance is",
884
+ "bbox": [
885
+ 81,
886
+ 867,
887
+ 480,
888
+ 896
889
+ ],
890
+ "page_idx": 3
891
+ },
892
+ {
893
+ "type": "text",
894
+ "text": "measured using precision, recall, and F1-score metrics. Additionally, the mitigation solutions generated are manually analyzed to determine their usefulness.",
895
+ "bbox": [
896
+ 513,
897
+ 106,
898
+ 915,
899
+ 148
900
+ ],
901
+ "page_idx": 3
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "4.2 Results",
906
+ "text_level": 1,
907
+ "bbox": [
908
+ 514,
909
+ 161,
910
+ 620,
911
+ 175
912
+ ],
913
+ "page_idx": 3
914
+ },
915
+ {
916
+ "type": "text",
917
+ "text": "We utilize the function call feature to guide the model in generating \"yes\" or \"no\" responses to evaluate anomaly detection performance. For failure diagnosis, we manually review the LLM-generated results to identify the classification most closely matching the pre-injected anomalies and assessed the outcomes accordingly.",
918
+ "bbox": [
919
+ 513,
920
+ 179,
921
+ 915,
922
+ 250
923
+ ],
924
+ "page_idx": 3
925
+ },
926
+ {
927
+ "type": "table",
928
+ "img_path": "images/82e7fce4ff67186806eecc16a65a5aafad2abe658c41fa7061b4b2c82185027f.jpg",
929
+ "table_caption": [
930
+ "Table 3: Anomaly Detection & Diagnosis Results"
931
+ ],
932
+ "table_footnote": [],
933
+ "table_body": "<table><tr><td>Task</td><td>Precision</td><td>Recall</td><td>F1-Score</td></tr><tr><td>Anomaly Detection</td><td>95.14%</td><td>97.03%</td><td>95.76%</td></tr><tr><td>Failure Diagnosis</td><td>89.61%</td><td>87.04%</td><td>87.62%</td></tr></table>",
934
+ "bbox": [
935
+ 547,
936
+ 292,
937
+ 880,
938
+ 349
939
+ ],
940
+ "page_idx": 3
941
+ },
942
+ {
943
+ "type": "text",
944
+ "text": "As shown in Table 3, AgentFM demonstrates promising performance in our preliminary experiments. However, based on our observations, the model struggles to identify correct results when dealing with complex internal anomalies (e.g., excessive data import). We plan to focus on optimizing performance for such scenarios in future work.",
945
+ "bbox": [
946
+ 513,
947
+ 369,
948
+ 915,
949
+ 453
950
+ ],
951
+ "page_idx": 3
952
+ },
953
+ {
954
+ "type": "text",
955
+ "text": "Increase CPU Resources:",
956
+ "text_level": 1,
957
+ "bbox": [
958
+ 522,
959
+ 479,
960
+ 692,
961
+ 491
962
+ ],
963
+ "page_idx": 3
964
+ },
965
+ {
966
+ "type": "text",
967
+ "text": "Iotdb0 frequently experiences high CPU usage, consider increasing the number of CPU cores allocated to the container or raising the CPU limit.",
968
+ "bbox": [
969
+ 522,
970
+ 493,
971
+ 905,
972
+ 532
973
+ ],
974
+ "page_idx": 3
975
+ },
976
+ {
977
+ "type": "text",
978
+ "text": "Load Balancing:",
979
+ "text_level": 1,
980
+ "bbox": [
981
+ 522,
982
+ 535,
983
+ 635,
984
+ 547
985
+ ],
986
+ "page_idx": 3
987
+ },
988
+ {
989
+ "type": "text",
990
+ "text": "If iotdb0 is part of a cluster, consider distributing some of the load to other nodes to achieve load balancing.",
991
+ "bbox": [
992
+ 522,
993
+ 549,
994
+ 903,
995
+ 575
996
+ ],
997
+ "page_idx": 3
998
+ },
999
+ {
1000
+ "type": "text",
1001
+ "text": "Figure 3: Sample Mitigation Solutions from AgentFM",
1002
+ "bbox": [
1003
+ 535,
1004
+ 603,
1005
+ 890,
1006
+ 618
1007
+ ],
1008
+ "page_idx": 3
1009
+ },
1010
+ {
1011
+ "type": "text",
1012
+ "text": "We also conduct a manual analysis of the mitigation solutions recommended by AgentFM. One example is illustrated in Figure 3. We extract the key information from the solution and find that these recommendations provide targeted and effective assistance in system remediation.",
1013
+ "bbox": [
1014
+ 513,
1015
+ 638,
1016
+ 915,
1017
+ 709
1018
+ ],
1019
+ "page_idx": 3
1020
+ },
1021
+ {
1022
+ "type": "text",
1023
+ "text": "5 CONCLUSION",
1024
+ "text_level": 1,
1025
+ "bbox": [
1026
+ 514,
1027
+ 720,
1028
+ 663,
1029
+ 734
1030
+ ],
1031
+ "page_idx": 3
1032
+ },
1033
+ {
1034
+ "type": "text",
1035
+ "text": "This paper investigates the varying significance of different roles in failure management. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Our preliminary experiments demonstrate the feasibility of AgentFM. In the future, we plan to further enhance AgentFM's capabilities in automating remediation execution.",
1036
+ "bbox": [
1037
+ 513,
1038
+ 739,
1039
+ 915,
1040
+ 835
1041
+ ],
1042
+ "page_idx": 3
1043
+ },
1044
+ {
1045
+ "type": "text",
1046
+ "text": "ACKNOWLEDGEMENT",
1047
+ "text_level": 1,
1048
+ "bbox": [
1049
+ 514,
1050
+ 849,
1051
+ 714,
1052
+ 862
1053
+ ],
1054
+ "page_idx": 3
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "text": "This work is supported by Key-Area Research and Development Program of Guangdong Province, China (NO.2020B010164003).",
1059
+ "bbox": [
1060
+ 513,
1061
+ 867,
1062
+ 913,
1063
+ 896
1064
+ ],
1065
+ "page_idx": 3
1066
+ },
1067
+ {
1068
+ "type": "header",
1069
+ "text": "FSE '25, June 23-28, 2025, Trondheim, Norway",
1070
+ "bbox": [
1071
+ 83,
1072
+ 75,
1073
+ 307,
1074
+ 87
1075
+ ],
1076
+ "page_idx": 3
1077
+ },
1078
+ {
1079
+ "type": "header",
1080
+ "text": "Lingzhe Zhang et al.",
1081
+ "bbox": [
1082
+ 812,
1083
+ 75,
1084
+ 911,
1085
+ 87
1086
+ ],
1087
+ "page_idx": 3
1088
+ },
1089
+ {
1090
+ "type": "text",
1091
+ "text": "REFERENCES",
1092
+ "text_level": 1,
1093
+ "bbox": [
1094
+ 84,
1095
+ 104,
1096
+ 202,
1097
+ 119
1098
+ ],
1099
+ "page_idx": 4
1100
+ },
1101
+ {
1102
+ "type": "list",
1103
+ "sub_type": "ref_text",
1104
+ "list_items": [
1105
+ "[1] Toufique Ahmed, Supriyo Ghosh, Chetan Bansal, Thomas Zimmermann, Xuchao Zhang, and Saravan Rajmohan. 2023. Recommending root-cause and mitigation steps for cloud incidents using large language models. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1737-1749.",
1106
+ "[2] James C Corbett, Jeffrey Dean, Michael Epstein, Andrew Fikes, Christopher Frost, Jeffrey John Furman, Sanjay Ghemawat, Andrey Gubarev, Christopher Heiser, Peter Hochschild, et al. 2013. Spanner: Google's globally distributed database. ACM Transactions on Computer Systems (TOCS) 31, 3 (2013), 1-22.",
1107
+ "[3] Drishti Goel, Fiza Husain, Aditya Singh, Supriyo Ghosh, Anjaly Parayil, Chetan Bansal, Xuchao Zhang, and Saravan Rajmohan. 2024. X-lifecycle learning for cloud incident management using llms. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 417-428.",
1108
+ "[4] Adha Hrusto, Per Runeson, and Magnus C Ohlsson. 2024. Autonomous monitors for detecting failures early and reporting interpretable alerts in cloud operations. In Proceedings of the 46th International Conference on Software Engineering: Software Engineering in Practice. 47-57.",
1109
+ "[5] Dongxu Huang, Qi Liu, Qiu Cui, Zhuhe Fang, Xiaoyu Ma, Fei Xu, Li Shen, Liu Tang, Yuxing Zhou, Menglong Huang, et al. 2020. TiDB: a Raft-based HTAP database. Proceedings of the VLDB Endowment 13, 12 (2020), 3072-3084.",
1110
+ "[6] Jun Huang, Yang Yang, Hang Yu, Jianguo Li, and Xiao Zheng. 2023. Twin graph-based anomaly detection via attentive multi-modal learning for microservice system. In 2023 38th IEEE/ACM International Conference on Automated Software Engineering (ASE). IEEE, 66-78.",
1111
+ "[7] Yuyuan Kang, Xiangdong Huang, Shaoxu Song, Lingzhe Zhang, Jialin Qiao, Chen Wang, Jianmin Wang, and Julian Feinauer. 2022. Separation or not: On handing out-of-order time-series data in leveled LSM-tree. In 2022 IEEE 38th International Conference on Data Engineering (ICDE). IEEE, 3340-3352.",
1112
+ "[8] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, and Michael R Lyu. 2023. Eadro: An end-to-end troubleshooting framework for microservices on multi-source data. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1750-1762.",
1113
+ "[9] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, Yongqiang Yang, and Michael R Lyu. 2023. Heterogeneous anomaly detection for software systems via semi-supervised cross-modal attention. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1724-1736.",
1114
+ "[10] Cheng-Ming Lin, Ching Chang, Wei-Yao Wang, Kuang-Da Wang, and Wen-Chih Peng. 2024. Root Cause Analysis in Microservice Using Neural Granger Causal Discovery. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 206-213.",
1115
+ "[11] Minghua Ma, Zheng Yin, Shenglin Zhang, Sheng Wang, Christopher Zheng, Xinhao Jiang, Hanwen Hu, Cheng Luo, Yilin Li, Nengjun Qiu, et al. 2020. Diagnosing root causes of intermittent slow queries in cloud databases. Proceedings of the VLDB Endowment 13, 8 (2020), 1176-1189.",
1116
+ "[12] Devjeet Roy, Xuchao Zhang, Rashi Bhave, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. Exploring IIm-based agents for root cause analysis. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering.* 208-219.",
1117
+ "[13] Manish Shetty, Yinfang Chen, Gagan Somashekar, Minghua Ma, Yogesh Simmhan, Xuchao Zhang, Jonathan Mace, Dax Vandevoorde, Pedro Las-Casas, Shachee Mishra Gupta, et al. 2024. Building AI Agents for Autonomous Clouds: Challenges and Design Principles. In Proceedings of the 2024 ACM Symposium on Cloud Computing, 99-110.",
1118
+ "[14] Chen Wang, Xiangdong Huang, Jialin Qiao, Tian Jiang, Lei Rui, Jinrui Zhang, Rong Kang, Julian Feinauer, Kevin A McGrail, Peng Wang, et al. 2020. Apache iotdb: time-series database for internet of things. Proceedings of the VLDB Endowment 13, 12 (2020), 2901-2904.",
1119
+ "[15] Lin Yang, Junjie Chen, Zan Wang, Weijing Wang, Jiajun Jiang, Xuyuan Dong, and Wenbin Zhang. 2021. Semi-supervised log-based anomaly detection via probabilistic label estimation. In 2021 IEEE/ACM 43rd International Conference on"
1120
+ ],
1121
+ "bbox": [
1122
+ 86,
1123
+ 122,
1124
+ 480,
1125
+ 727
1126
+ ],
1127
+ "page_idx": 4
1128
+ },
1129
+ {
1130
+ "type": "list",
1131
+ "sub_type": "ref_text",
1132
+ "list_items": [
1133
+ "Software Engineering (ICSE). IEEE, 1448-1460.",
1134
+ "[16] Zhenkun Yang, Chuanhui Yang, Fusheng Han, Mingqiang Zhuang, Bing Yang, Zhifeng Yang, Xiaojun Cheng, Yuzhong Zhao, Wenhui Shi, Huafeng Xi, et al. 2022. OceanBase: a 707 million tpmC distributed relational database system. Proceedings of the VLDB Endowment 15, 12 (2022), 3385-3397.",
1135
+ "[17] Chenxi Zhang, Xin Peng, Chaofeng Sha, Ke Zhang, Zhenqing Fu, Xiya Wu, Qingwei Lin, and Dongmei Zhang. 2022. Deepralog: Trace-log combined microservice anomaly detection through graph-based deep learning. In Proceedings of the 44th international conference on software engineering. 623-634.",
1136
+ "[18] Dylan Zhang, Xuchao Zhang, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. LM-PACE: Confidence estimation by large language models for effective root causing of cloud incidents. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering*, 388-398.",
1137
+ "[19] Lingzhe Zhang, Tong Jia, Mengxi Jia, Ying Li, Yong Yang, and Zhonghai Wu. 2024. Multivariate Log-based Anomaly Detection for Distributed Database. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4256-4267.",
1138
+ "[20] Lingzhe Zhang, Tong Jia, Mengxi Jia, Hongyi Liu, Yong Yang, Zhonghai Wu, and Ying Li. 2024. Towards Close-To-Zero Runtime Collection Overhead: Raft-Based Anomaly Diagnosis on System Faults for Distributed Storage System. IEEE Transactions on Services Computing (2024).",
1139
+ "[21] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Aiwei Liu, Yong Yang, Zhonghai Wu, Xuming Hu, Philip S Yu, and Ying Li. 2024. A Survey of AIOps for Failure Management in the Era of Large Language Models. arXiv preprint arXiv:2406.11213 (2024).",
1140
+ "[22] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. ScalaLog: Scalable Log-Based Failure Diagnosis Using LLM. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 1-5.",
1141
+ "[23] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. XRAGLog: A Resource-Efficient and Context-Aware Log-Based Anomaly Detection Method Using Retrieval-Augmented Generation. In AAAI 2025 Workshop on Preventing and Detecting LLM Misinformation (PDLM).",
1142
+ "[24] Lingzhe Zhang, Tong Jia, Kangjin Wang, Mengxi Jia, Yong Yang, and Ying Li. 2024. Reducing Events to Augment Log-based Anomaly Detection Models: An Empirical Study. In Proceedings of the 18th ACM/IEEE International Symposium on Empirical Software Engineering and Measurement. 538-548.",
1143
+ "[25] Ling-Zhe Zhang, Xiang-Dong Huang, Yan-Kai Wang, Jia-Lin Qiao, Shao-Xu Song, and Jian-Min Wang. 2024. Time-tired compaction: An elastic compaction scheme for LSM-tree based time-series database. Advanced Engineering Informatics 59 (2024), 102224.",
1144
+ "[26] Shenglin Zhang, Pengxiang Jin, Zihan Lin, Yongqian Sun, Bicheng Zhang, Sibo Xia, Zhengdan Li, Zhenyu Zhong, Minghua Ma, Wa Jin, et al. 2023. Robust failure diagnosis of microservice system through multimodal data. IEEE Transactions on Services Computing 16, 6 (2023), 3851-3864.",
1145
+ "[27] Wei Zhang, Hongcheng Guo, Jian Yang, Yi Zhang, Chaoran Yan, Zhoujin Tian, Hangyuan Ji, Zhoujun Li, Tongliang Li, Tieqiao Zheng, et al. 2024. mABC: multi-Agent Blockchain-Inspired Collaboration for root cause analysis in micro-services architecture. arXiv preprint arXiv:2404.12135 (2024).",
1146
+ "[28] Xuchao Zhang, Supriyo Ghosh, Chetan Bansal, Rujia Wang, Minghua Ma, Yu Kang, and Saravan Rajmohan. 2024. Automated root causing of cloud incidents using in-context learning with gpt-4. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 266-277.",
1147
+ "[29] Nengwen Zhao, Junjie Chen, Zhaoyang Yu, Honglin Wang, Jiesong Li, Bin Qiu, Hongyu Xu, Wenchi Zhang, Kaixin Sui, and Dan Pei. 2021. Identifying bad software changes via multimodal anomaly detection for online service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering. 527-539.",
1148
+ "[30] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. Multimodal Causal Structure Learning and Root Cause Analysis. arXiv preprint arXiv:2402.02357 (2024)."
1149
+ ],
1150
+ "bbox": [
1151
+ 516,
1152
+ 109,
1153
+ 913,
1154
+ 731
1155
+ ],
1156
+ "page_idx": 4
1157
+ },
1158
+ {
1159
+ "type": "header",
1160
+ "text": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents",
1161
+ "bbox": [
1162
+ 83,
1163
+ 75,
1164
+ 563,
1165
+ 87
1166
+ ],
1167
+ "page_idx": 4
1168
+ },
1169
+ {
1170
+ "type": "header",
1171
+ "text": "FSE '25, June 23-28, 2025, Trondheim, Norway",
1172
+ "bbox": [
1173
+ 692,
1174
+ 75,
1175
+ 911,
1176
+ 87
1177
+ ],
1178
+ "page_idx": 4
1179
+ }
1180
+ ]
data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_model.json ADDED
@@ -0,0 +1,1541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.154,
7
+ 0.102,
8
+ 0.844,
9
+ 0.152
10
+ ],
11
+ "angle": 0,
12
+ "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.129,
18
+ 0.164,
19
+ 0.335,
20
+ 0.226
21
+ ],
22
+ "angle": 0,
23
+ "content": "Lingzhe Zhang† \nPeking University \nBeijing, China \nzhang.lingzhe@stu.pku.edu.cn"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.381,
29
+ 0.164,
30
+ 0.619,
31
+ 0.227
32
+ ],
33
+ "angle": 0,
34
+ "content": "Yunpeng Zhai† \nAlibaba Group \nBeijing, China \nzhaiyunpeng.zyp@alibaba-inc.com"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.696,
40
+ 0.165,
41
+ 0.838,
42
+ 0.227
43
+ ],
44
+ "angle": 0,
45
+ "content": "Tong Jia* \nPeking University \nBeijing, China \njia.tong@oku.edu.cn"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.162,
51
+ 0.24,
52
+ 0.3,
53
+ 0.301
54
+ ],
55
+ "angle": 0,
56
+ "content": "Xiaosong Huang \nPeking University \nBeijing, China \nhxs@stu.pku.edu.cn"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.398,
62
+ 0.239,
63
+ 0.6,
64
+ 0.302
65
+ ],
66
+ "angle": 0,
67
+ "content": "Chiming Duan \nPeking University \nBeijing, China \nduanchiming@stu.pku.edu.cn"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.701,
73
+ 0.239,
74
+ 0.834,
75
+ 0.302
76
+ ],
77
+ "angle": 0,
78
+ "content": "Ying Li* \nPeking University \nBeijing, China \nli.ying@pku.edu.cn"
79
+ },
80
+ {
81
+ "type": "title",
82
+ "bbox": [
83
+ 0.084,
84
+ 0.31,
85
+ 0.184,
86
+ 0.323
87
+ ],
88
+ "angle": 0,
89
+ "content": "ABSTRACT"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.082,
95
+ 0.33,
96
+ 0.484,
97
+ 0.522
98
+ ],
99
+ "angle": 0,
100
+ "content": "Distributed databases are critical infrastructures for today's large-scale software systems, making effective failure management essential to ensure software availability. However, existing approaches often overlook the role distinctions within distributed databases and rely on small-scale models with limited generalization capabilities. In this paper, we conduct a preliminary empirical study to emphasize the unique significance of different roles. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. AgentFM addresses failure management by considering system roles, data roles, and task roles, with a meta-agent orchestrating these components. Preliminary evaluations using Apache IoTDB demonstrate the effectiveness of AgentFM and open new directions for further research."
101
+ },
102
+ {
103
+ "type": "title",
104
+ "bbox": [
105
+ 0.084,
106
+ 0.537,
107
+ 0.221,
108
+ 0.551
109
+ ],
110
+ "angle": 0,
111
+ "content": "CCS CONCEPTS"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.084,
117
+ 0.556,
118
+ 0.456,
119
+ 0.57
120
+ ],
121
+ "angle": 0,
122
+ "content": "- Software and its engineering \\(\\rightarrow\\) Maintaining software."
123
+ },
124
+ {
125
+ "type": "title",
126
+ "bbox": [
127
+ 0.084,
128
+ 0.584,
129
+ 0.191,
130
+ 0.598
131
+ ],
132
+ "angle": 0,
133
+ "content": "KEYWORDS"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.084,
139
+ 0.603,
140
+ 0.436,
141
+ 0.617
142
+ ],
143
+ "angle": 0,
144
+ "content": "Failure Management, Distributed Databases, Multi Agents"
145
+ },
146
+ {
147
+ "type": "title",
148
+ "bbox": [
149
+ 0.084,
150
+ 0.626,
151
+ 0.231,
152
+ 0.638
153
+ ],
154
+ "angle": 0,
155
+ "content": "ACM Reference Format:"
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.082,
161
+ 0.639,
162
+ 0.483,
163
+ 0.715
164
+ ],
165
+ "angle": 0,
166
+ "content": "Lingzhe Zhang†, Yunpeng Zhai†, Tong Jia*, Xiaosong Huang, Chiming Duan, and Ying Li*. 2025. AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents. In 33rd ACM International Conference on the Foundations of Software Engineering (FSE '25), June 23–28, 2025, Trondheim, Norway. ACM, New York, NY, USA, 5 pages. https://doi.org/10.1145/3696630.3728492"
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.084,
172
+ 0.736,
173
+ 0.184,
174
+ 0.747
175
+ ],
176
+ "angle": 0,
177
+ "content": "\\(\\dagger\\) Equal contribution."
178
+ },
179
+ {
180
+ "type": "text",
181
+ "bbox": [
182
+ 0.084,
183
+ 0.747,
184
+ 0.199,
185
+ 0.759
186
+ ],
187
+ "angle": 0,
188
+ "content": "*Corresponding author."
189
+ },
190
+ {
191
+ "type": "list",
192
+ "bbox": [
193
+ 0.084,
194
+ 0.736,
195
+ 0.199,
196
+ 0.759
197
+ ],
198
+ "angle": 0,
199
+ "content": null
200
+ },
201
+ {
202
+ "type": "text",
203
+ "bbox": [
204
+ 0.082,
205
+ 0.781,
206
+ 0.483,
207
+ 0.853
208
+ ],
209
+ "angle": 0,
210
+ "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org."
211
+ },
212
+ {
213
+ "type": "text",
214
+ "bbox": [
215
+ 0.084,
216
+ 0.853,
217
+ 0.3,
218
+ 0.864
219
+ ],
220
+ "angle": 0,
221
+ "content": "FSE '25, June 23-28, 2025, Trondheim, Norway"
222
+ },
223
+ {
224
+ "type": "text",
225
+ "bbox": [
226
+ 0.084,
227
+ 0.865,
228
+ 0.473,
229
+ 0.875
230
+ ],
231
+ "angle": 0,
232
+ "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM."
233
+ },
234
+ {
235
+ "type": "text",
236
+ "bbox": [
237
+ 0.084,
238
+ 0.875,
239
+ 0.295,
240
+ 0.884
241
+ ],
242
+ "angle": 0,
243
+ "content": "ACM ISBN 979-8-4007-1276-0/25/06...$15.00"
244
+ },
245
+ {
246
+ "type": "text",
247
+ "bbox": [
248
+ 0.084,
249
+ 0.885,
250
+ 0.273,
251
+ 0.896
252
+ ],
253
+ "angle": 0,
254
+ "content": "https://doi.org/10.1145/3696630.3728492"
255
+ },
256
+ {
257
+ "type": "title",
258
+ "bbox": [
259
+ 0.516,
260
+ 0.31,
261
+ 0.688,
262
+ 0.324
263
+ ],
264
+ "angle": 0,
265
+ "content": "1 INTRODUCTION"
266
+ },
267
+ {
268
+ "type": "text",
269
+ "bbox": [
270
+ 0.514,
271
+ 0.329,
272
+ 0.915,
273
+ 0.384
274
+ ],
275
+ "angle": 0,
276
+ "content": "The distributed databases, such as Google Spanner [2], Alibaba OceanBase [16], TiDB [5], and Apache IoTDB [14], have become integral components of cloud infrastructures, handling vast volumes of data [7, 25]."
277
+ },
278
+ {
279
+ "type": "text",
280
+ "bbox": [
281
+ 0.514,
282
+ 0.385,
283
+ 0.915,
284
+ 0.495
285
+ ],
286
+ "angle": 0,
287
+ "content": "However, these systems frequently encounter anomalies such as system failures and performance degradation, leading to significant financial losses. For example, Alibaba Cloud faces Intermittent Slow Queries (iSQs) [11], leading to billions of dollars in annual losses. Amazon reports that even a 0.1-second delay in loading caused by database anomalies can lead to a \\(1\\%\\) increase in financial losses [19]. Therefore, it is crucial to detect system failures in real time, analyze the root causes of these failures, and automatically remediate them."
288
+ },
289
+ {
290
+ "type": "text",
291
+ "bbox": [
292
+ 0.513,
293
+ 0.496,
294
+ 0.915,
295
+ 0.731
296
+ ],
297
+ "angle": 0,
298
+ "content": "System traces, metrics, and logs capture the states and critical events of active processes, making them essential for managing software failures. These data sources provide insights into both normal operations and deviations signaling potential failures. Leveraging their multimodal nature, recent research has enhanced anomaly detection and diagnosis in complex systems [6, 8-10, 17, 20-24, 26, 29, 30]. For example, Eadro [8] integrates anomaly detection and root cause localization using multi-source data, while AnoFusion [26] employs unsupervised multimodal failure detection in microservices. MSTGAD [6] combines all three data types with attentive multimodal learning for graph-based anomaly detection. Studies also explore two-source combinations, such as DeepTraLog [17], which pairs traces with logs using a GGNN-based model, and SCWarn [29], which identifies anomalies via heterogeneous metrics and logs. Although the effectiveness of these methods has been demonstrated in specific scenarios, they face several practical challenges when applied to distributed databases:"
299
+ },
300
+ {
301
+ "type": "text",
302
+ "bbox": [
303
+ 0.542,
304
+ 0.743,
305
+ 0.914,
306
+ 0.841
307
+ ],
308
+ "angle": 0,
309
+ "content": "- Role Awareness. In distributed databases, nodes play different roles, each with varying levels of importance. Effective failure management often requires coordination and collaboration among multiple nodes. However, current methods completely overlook these roles, leading to suboptimal fault detection, inaccurate root cause diagnosis, and an inability to provide critical repair strategies."
310
+ },
311
+ {
312
+ "type": "text",
313
+ "bbox": [
314
+ 0.542,
315
+ 0.841,
316
+ 0.914,
317
+ 0.897
318
+ ],
319
+ "angle": 0,
320
+ "content": "- Application Limitations. Existing failure management models lack generalizability. For instance, these models are typically trained on specific systems and encounter concept drift when applied to new systems. Moreover, since these"
321
+ },
322
+ {
323
+ "type": "list",
324
+ "bbox": [
325
+ 0.542,
326
+ 0.743,
327
+ 0.914,
328
+ 0.897
329
+ ],
330
+ "angle": 0,
331
+ "content": null
332
+ },
333
+ {
334
+ "type": "aside_text",
335
+ "bbox": [
336
+ 0.023,
337
+ 0.276,
338
+ 0.059,
339
+ 0.701
340
+ ],
341
+ "angle": 270,
342
+ "content": "arXiv:2504.06614v1 [cs.SE] 9 Apr 2025"
343
+ }
344
+ ],
345
+ [
346
+ {
347
+ "type": "header",
348
+ "bbox": [
349
+ 0.085,
350
+ 0.076,
351
+ 0.305,
352
+ 0.087
353
+ ],
354
+ "angle": 0,
355
+ "content": "FSE '25, June 23-28, 2025, Trondheim, Norway"
356
+ },
357
+ {
358
+ "type": "header",
359
+ "bbox": [
360
+ 0.813,
361
+ 0.076,
362
+ 0.912,
363
+ 0.087
364
+ ],
365
+ "angle": 0,
366
+ "content": "Lingzhe Zhang et al."
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.125,
372
+ 0.108,
373
+ 0.481,
374
+ 0.162
375
+ ],
376
+ "angle": 0,
377
+ "content": "methods often frame the task as a classification problem, the resulting outputs lack interpretability, which is crucial for Operations and Control Engineers (OCEs) to effectively resolve issues."
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.082,
383
+ 0.167,
384
+ 0.482,
385
+ 0.291
386
+ ],
387
+ "angle": 0,
388
+ "content": "To address the first challenge, we conduct a detailed analysis of the various roles within a distributed database and the entire failure management process. We ultimately identify three distinct role categories: system roles, data roles, and task roles. System roles represent the various roles inherent in the distributed database itself (e.g., leader nodes, follower nodes). Data roles refer to the different data sources involved in failure management, while task roles represent the various tasks that need to be executed during failure management."
389
+ },
390
+ {
391
+ "type": "text",
392
+ "bbox": [
393
+ 0.082,
394
+ 0.292,
395
+ 0.482,
396
+ 0.415
397
+ ],
398
+ "angle": 0,
399
+ "content": "To tackle the second challenge, we adopt a large language model (LLM)-based approach. While many existing LLM-based failure management solutions have been proposed, a significant number of them do not utilize the three types of data sources mentioned above [1, 3, 12, 18, 28]. Moreover, some approaches that do incorporate these data sources fail to account for the role-based structure within distributed databases [4, 13, 27]. Therefore, we propose a role-aware, LLM-driven multi-agent approach that integrates the characteristics of these roles and the three data sources."
400
+ },
401
+ {
402
+ "type": "text",
403
+ "bbox": [
404
+ 0.082,
405
+ 0.417,
406
+ 0.482,
407
+ 0.554
408
+ ],
409
+ "angle": 0,
410
+ "content": "Building on these insights, we introduce AgentFM, a comprehensive role-aware failure management framework for distributed databases, powered by LLM-driven multi-agent systems. AgentFM integrates the unique characteristics of distributed database roles with the rich multimodal data sources typically encountered in failure management, such as system traces, metrics, and logs. By employing a multi-agent architecture, AgentFM facilitates specialized agents for each role—system roles, data roles, and task roles—ensuring a more nuanced and effective approach to failure detection, diagnosis, and resolution."
411
+ },
412
+ {
413
+ "type": "text",
414
+ "bbox": [
415
+ 0.082,
416
+ 0.555,
417
+ 0.482,
418
+ 0.623
419
+ ],
420
+ "angle": 0,
421
+ "content": "We conduct preliminary experiments on Apache IoTDB [14], a distributed time-series database system, to assess the effectiveness of AgentFM in failure detection and root cause analysis. Furthermore, we manually verified the accuracy and validity of the generated mitigation solution."
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.082,
427
+ 0.624,
428
+ 0.482,
429
+ 0.72
430
+ ],
431
+ "angle": 0,
432
+ "content": "Contributions. The contributions of this paper are threefold. First, we conduct a preliminary empirical study highlighting the varying significance of different roles in failure management. Second, we propose AgentFM, a role-aware failure management framework for distributed databases with LLM-driven multi-agents. Third, we provide a preliminary evaluation of AgentFM on Apache IoTDB, demonstrating its feasibility and effectiveness."
433
+ },
434
+ {
435
+ "type": "title",
436
+ "bbox": [
437
+ 0.084,
438
+ 0.735,
439
+ 0.4,
440
+ 0.749
441
+ ],
442
+ "angle": 0,
443
+ "content": "2 PRELIMINARY EMPIRCAL STUDY"
444
+ },
445
+ {
446
+ "type": "text",
447
+ "bbox": [
448
+ 0.082,
449
+ 0.753,
450
+ 0.482,
451
+ 0.795
452
+ ],
453
+ "angle": 0,
454
+ "content": "In this section, we conduct a preliminary empirical study using Apache IoTDB [14], focusing on the significance of different roles in the failure management process."
455
+ },
456
+ {
457
+ "type": "title",
458
+ "bbox": [
459
+ 0.084,
460
+ 0.809,
461
+ 0.236,
462
+ 0.824
463
+ ],
464
+ "angle": 0,
465
+ "content": "2.1 System Roles"
466
+ },
467
+ {
468
+ "type": "text",
469
+ "bbox": [
470
+ 0.082,
471
+ 0.827,
472
+ 0.482,
473
+ 0.896
474
+ ],
475
+ "angle": 0,
476
+ "content": "We manually injected anomalies (excessive data export) into Apache IoTDB during runtime and evaluate the anomaly detection performance (precision, recall and f1-score) for each node using PLELog [15], which is a state-of-the-art log-based anomaly detection method."
477
+ },
478
+ {
479
+ "type": "table_caption",
480
+ "bbox": [
481
+ 0.545,
482
+ 0.105,
483
+ 0.884,
484
+ 0.119
485
+ ],
486
+ "angle": 0,
487
+ "content": "Table 1: Anomaly Detection Results for Each Node"
488
+ },
489
+ {
490
+ "type": "table",
491
+ "bbox": [
492
+ 0.524,
493
+ 0.132,
494
+ 0.905,
495
+ 0.203
496
+ ],
497
+ "angle": 0,
498
+ "content": "<table><tr><td></td><td>Node1</td><td>Node2</td><td>Node3</td><td>Node4</td><td>Node5</td><td>Node6</td></tr><tr><td>p</td><td>39.68%</td><td>34.35%</td><td>59.64%</td><td>31.17%</td><td>77.60%</td><td>89.42%</td></tr><tr><td>r</td><td>99.01%</td><td>100.00%</td><td>98.02%</td><td>100.00%</td><td>96.04%</td><td>92.08%</td></tr><tr><td>f1</td><td>56.66%</td><td>51.14%</td><td>74.16%</td><td>47.53%</td><td>85.84%</td><td>90.73%</td></tr></table>"
499
+ },
500
+ {
501
+ "type": "text",
502
+ "bbox": [
503
+ 0.513,
504
+ 0.221,
505
+ 0.913,
506
+ 0.291
507
+ ],
508
+ "angle": 0,
509
+ "content": "As shown in Table 1, \\( \\text{Node}_6 \\) achieves the best anomaly detection performance due to hosting the largest number of leader partitions. In contrast, the detection performance on other nodes is relatively suboptimal. This observation underscores the differing significance of various system roles."
510
+ },
511
+ {
512
+ "type": "title",
513
+ "bbox": [
514
+ 0.515,
515
+ 0.303,
516
+ 0.647,
517
+ 0.316
518
+ ],
519
+ "angle": 0,
520
+ "content": "2.2 Data Roles"
521
+ },
522
+ {
523
+ "type": "text",
524
+ "bbox": [
525
+ 0.513,
526
+ 0.321,
527
+ 0.913,
528
+ 0.39
529
+ ],
530
+ "angle": 0,
531
+ "content": "We further conduct anomaly diagnosis classification experiments on Apache IoTDB using both metrics data and log data. To ensure fairness, we do not adopt state-of-the-art methods; instead, we implement a simple classification algorithm based on the Dynamic Time Warping (DTW) algorithm."
532
+ },
533
+ {
534
+ "type": "table_caption",
535
+ "bbox": [
536
+ 0.515,
537
+ 0.403,
538
+ 0.913,
539
+ 0.417
540
+ ],
541
+ "angle": 0,
542
+ "content": "Table 2: Anomalies that can be Classified by Metrics and Logs"
543
+ },
544
+ {
545
+ "type": "table",
546
+ "bbox": [
547
+ 0.518,
548
+ 0.431,
549
+ 0.912,
550
+ 0.492
551
+ ],
552
+ "angle": 0,
553
+ "content": "<table><tr><td>Source</td><td>CPU</td><td>Memory</td><td>Export</td><td>Import</td><td>Configuration</td></tr><tr><td>Metrics</td><td>✓</td><td>✓</td><td></td><td></td><td></td></tr><tr><td>Logs</td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr></table>"
554
+ },
555
+ {
556
+ "type": "text",
557
+ "bbox": [
558
+ 0.513,
559
+ 0.507,
560
+ 0.913,
561
+ 0.603
562
+ ],
563
+ "angle": 0,
564
+ "content": "As shown in Table 2, anomalies with an F1-score above \\(50\\%\\) are considered identifiable by the corresponding data type. The results reveal that metrics are particularly effective at detecting resource anomalies, such as CPU and memory saturation, while logs excel at identifying internal database issues, including excessive data export/import and configuration errors. This observation highlights the distinct significance of different data roles."
565
+ },
566
+ {
567
+ "type": "title",
568
+ "bbox": [
569
+ 0.515,
570
+ 0.615,
571
+ 0.687,
572
+ 0.629
573
+ ],
574
+ "angle": 0,
575
+ "content": "3 METHODOLOGY"
576
+ },
577
+ {
578
+ "type": "text",
579
+ "bbox": [
580
+ 0.513,
581
+ 0.633,
582
+ 0.913,
583
+ 0.702
584
+ ],
585
+ "angle": 0,
586
+ "content": "Our preliminary empirical study highlights that the effectiveness of failure management varies significantly across different roles in distributed databases. Consequently, it becomes crucial to assign varying levels of importance to different roles at different times and adopt role-specific operations to optimize outcomes."
587
+ },
588
+ {
589
+ "type": "text",
590
+ "bbox": [
591
+ 0.513,
592
+ 0.703,
593
+ 0.915,
594
+ 0.897
595
+ ],
596
+ "angle": 0,
597
+ "content": "In this section, we introduce AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Figure 1 illustrates the architecture of AgentFM, which comprises three types of agents corresponding to distinct roles: system agents, data agents, and task agents. System agents represent the various roles of nodes within the distributed database, data agents handle information from diverse data sources, and task agents focus on executing specific failure management tasks. Notably, since system agents and data agents operate independently on each node, they are collectively referred to as standalone agents. Additionally, the framework incorporates a meta-agent, which is responsible for orchestrating, adapting, aggregating, and ultimately producing the final results of the agents' operations, ensuring cohesive and efficient failure management."
598
+ }
599
+ ],
600
+ [
601
+ {
602
+ "type": "header",
603
+ "bbox": [
604
+ 0.084,
605
+ 0.076,
606
+ 0.566,
607
+ 0.088
608
+ ],
609
+ "angle": 0,
610
+ "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents"
611
+ },
612
+ {
613
+ "type": "header",
614
+ "bbox": [
615
+ 0.692,
616
+ 0.076,
617
+ 0.914,
618
+ 0.088
619
+ ],
620
+ "angle": 0,
621
+ "content": "FSE '25, June 23-28, 2025, Trondheim, Norway"
622
+ },
623
+ {
624
+ "type": "image",
625
+ "bbox": [
626
+ 0.086,
627
+ 0.105,
628
+ 0.913,
629
+ 0.403
630
+ ],
631
+ "angle": 0,
632
+ "content": null
633
+ },
634
+ {
635
+ "type": "image_caption",
636
+ "bbox": [
637
+ 0.39,
638
+ 0.415,
639
+ 0.607,
640
+ 0.43
641
+ ],
642
+ "angle": 0,
643
+ "content": "Figure 1: AgentFM Architecture"
644
+ },
645
+ {
646
+ "type": "text",
647
+ "bbox": [
648
+ 0.082,
649
+ 0.451,
650
+ 0.483,
651
+ 0.562
652
+ ],
653
+ "angle": 0,
654
+ "content": "The framework operates by first extracting critical information from different system agents using the corresponding data agents. This information is then transmitted to a centralized meta-agent for unified decision-making. The decision-making process employs trace data as a cohesive framework to integrate key insights across nodes. Finally, the appropriate task agents execute the required failure management operations, completing the process efficiently and effectively."
655
+ },
656
+ {
657
+ "type": "title",
658
+ "bbox": [
659
+ 0.084,
660
+ 0.579,
661
+ 0.248,
662
+ 0.595
663
+ ],
664
+ "angle": 0,
665
+ "content": "3.1 System Agents"
666
+ },
667
+ {
668
+ "type": "text",
669
+ "bbox": [
670
+ 0.082,
671
+ 0.597,
672
+ 0.483,
673
+ 0.652
674
+ ],
675
+ "angle": 0,
676
+ "content": "Different distributed databases assign dynamic system roles that may change during runtime (e.g., leader re-election after node failure). To handle this, we designed an adaptive mechanism for system agents within the Meta Agent."
677
+ },
678
+ {
679
+ "type": "text",
680
+ "bbox": [
681
+ 0.082,
682
+ 0.653,
683
+ 0.483,
684
+ 0.722
685
+ ],
686
+ "angle": 0,
687
+ "content": "As illustrated in Figure 2, the core component is the System Role Manager, which initializes by analyzing configuration files and system docs to identify each node's role and importance. At runtime, it periodically queries the system state and updates roles accordingly."
688
+ },
689
+ {
690
+ "type": "text",
691
+ "bbox": [
692
+ 0.082,
693
+ 0.722,
694
+ 0.483,
695
+ 0.776
696
+ ],
697
+ "angle": 0,
698
+ "content": "Based on this process, each node instantiates a corresponding system agent—such as a Config Agent, Coordinator Agent, or Storage Agent—each embedding multiple data agents to handle specialized tasks."
699
+ },
700
+ {
701
+ "type": "title",
702
+ "bbox": [
703
+ 0.084,
704
+ 0.795,
705
+ 0.228,
706
+ 0.81
707
+ ],
708
+ "angle": 0,
709
+ "content": "3.2 Data Agents"
710
+ },
711
+ {
712
+ "type": "text",
713
+ "bbox": [
714
+ 0.082,
715
+ 0.813,
716
+ 0.482,
717
+ 0.855
718
+ ],
719
+ "angle": 0,
720
+ "content": "In this paper, we define two types of data agents: the Metric Agent and the Log Agent. Each adopts a distinct approach to extract key information from the raw metrics data and log data, respectively."
721
+ },
722
+ {
723
+ "type": "text",
724
+ "bbox": [
725
+ 0.082,
726
+ 0.855,
727
+ 0.482,
728
+ 0.897
729
+ ],
730
+ "angle": 0,
731
+ "content": "Metric Agent: The overall workflow of the Metric Agent begins with simple data preprocessing, which includes noise removal and imputation of missing values. After preprocessing, the multivariate"
732
+ },
733
+ {
734
+ "type": "image",
735
+ "bbox": [
736
+ 0.517,
737
+ 0.445,
738
+ 0.916,
739
+ 0.665
740
+ ],
741
+ "angle": 0,
742
+ "content": null
743
+ },
744
+ {
745
+ "type": "image_caption",
746
+ "bbox": [
747
+ 0.557,
748
+ 0.675,
749
+ 0.871,
750
+ 0.69
751
+ ],
752
+ "angle": 0,
753
+ "content": "Figure 2: System Agents Adaptation Workflow"
754
+ },
755
+ {
756
+ "type": "text",
757
+ "bbox": [
758
+ 0.513,
759
+ 0.712,
760
+ 0.914,
761
+ 0.768
762
+ ],
763
+ "angle": 0,
764
+ "content": "time-series data is converted into natural language descriptions using a large model. The generated natural language descriptions capture key aspects of the data, including time intervals, fluctuation trends, and anomaly points."
765
+ },
766
+ {
767
+ "type": "text",
768
+ "bbox": [
769
+ 0.513,
770
+ 0.768,
771
+ 0.915,
772
+ 0.836
773
+ ],
774
+ "angle": 0,
775
+ "content": "Formally, let \\(\\mathbf{M} = \\{m_1, m_2, \\dots, m_n\\}\\) represent the raw multidimensional metrics data, where each \\(m_i\\) corresponds to a time-series for a specific metric. The preprocessing step can be represented as Equation 1, where \\(\\mathbf{M}_{\\mathrm{p}}\\) is the denoised and imputed version of \\(M\\)."
776
+ },
777
+ {
778
+ "type": "equation",
779
+ "bbox": [
780
+ 0.592,
781
+ 0.851,
782
+ 0.913,
783
+ 0.867
784
+ ],
785
+ "angle": 0,
786
+ "content": "\\[\n\\mathbf {M} _ {\\mathrm {p}} = \\text {P r e p r o c e s s} (\\mathbf {M}) = \\left\\{m _ {1} ^ {\\prime}, m _ {2} ^ {\\prime}, \\dots , m _ {n} ^ {\\prime} \\right\\} \\tag {1}\n\\]"
787
+ },
788
+ {
789
+ "type": "text",
790
+ "bbox": [
791
+ 0.514,
792
+ 0.868,
793
+ 0.915,
794
+ 0.897
795
+ ],
796
+ "angle": 0,
797
+ "content": "Next, the processed data is converted into natural language descriptions using a large language model \\(\\mathcal{L}\\), as shown in Equation 2,"
798
+ }
799
+ ],
800
+ [
801
+ {
802
+ "type": "header",
803
+ "bbox": [
804
+ 0.084,
805
+ 0.076,
806
+ 0.308,
807
+ 0.088
808
+ ],
809
+ "angle": 0,
810
+ "content": "FSE '25, June 23-28, 2025, Trondheim, Norway"
811
+ },
812
+ {
813
+ "type": "header",
814
+ "bbox": [
815
+ 0.813,
816
+ 0.076,
817
+ 0.913,
818
+ 0.088
819
+ ],
820
+ "angle": 0,
821
+ "content": "Lingzhe Zhang et al."
822
+ },
823
+ {
824
+ "type": "text",
825
+ "bbox": [
826
+ 0.082,
827
+ 0.107,
828
+ 0.482,
829
+ 0.149
830
+ ],
831
+ "angle": 0,
832
+ "content": "where \\(\\mathbf{D}_{\\mathrm{nl}}\\) represents the resulting natural language description, which includes information on time intervals, trends, and identified anomalies in the data."
833
+ },
834
+ {
835
+ "type": "equation",
836
+ "bbox": [
837
+ 0.238,
838
+ 0.163,
839
+ 0.482,
840
+ 0.178
841
+ ],
842
+ "angle": 0,
843
+ "content": "\\[\n\\mathbf {D} _ {\\mathrm {n l}} = \\mathcal {L} \\left(\\mathbf {M} _ {\\mathrm {p}}\\right) \\tag {2}\n\\]"
844
+ },
845
+ {
846
+ "type": "text",
847
+ "bbox": [
848
+ 0.082,
849
+ 0.18,
850
+ 0.482,
851
+ 0.248
852
+ ],
853
+ "angle": 0,
854
+ "content": "Log Agent: Unlike metrics data, log data is written by developers to monitor the internal state of the system, inherently containing rich semantic information. However, log data often includes substantial redundant information, such as repetitive log entries and messages unrelated to system anomalies."
855
+ },
856
+ {
857
+ "type": "text",
858
+ "bbox": [
859
+ 0.082,
860
+ 0.248,
861
+ 0.482,
862
+ 0.317
863
+ ],
864
+ "angle": 0,
865
+ "content": "To address this, the Log Agent comprises two main components: sequence compression and semantic compression. Sequence compression focuses on reducing the raw log sequence by consolidating repetitive log patterns, while semantic compression extracts key operational information from the logs."
866
+ },
867
+ {
868
+ "type": "text",
869
+ "bbox": [
870
+ 0.082,
871
+ 0.318,
872
+ 0.482,
873
+ 0.456
874
+ ],
875
+ "angle": 0,
876
+ "content": "Sequence Compression: This component utilizes a log-parsing algorithm to transform each log entry into a distinct event template consisting of a static constant part and variable parameters. It then merges consecutive occurrences of identical event templates. Formally, for a given raw log sequence \\( L = \\{l_1, l_2, \\dots, l_N\\} \\), where \\( l_n \\) represents an individual log entry, the parsing process transforms \\( L \\) into \\( L_e = \\{e_1, e_2, \\dots, e_N\\} \\), where \\( e_i \\) represents an event template. Consecutive identical events are merged, yielding \\( G' = \\{e_1', e_2', \\dots, e_C'\\} \\), where \\( C \\ll N \\) and \\( e_i' = e_i \\times c \\) represents the event \\( e_i \\) with a count \\( c \\)."
877
+ },
878
+ {
879
+ "type": "text",
880
+ "bbox": [
881
+ 0.082,
882
+ 0.456,
883
+ 0.485,
884
+ 0.54
885
+ ],
886
+ "angle": 0,
887
+ "content": "Semantic Compression: This component employs a prompt-based method to convert lengthy log sequences into descriptions of the database's ongoing operations. Formally, given a sequence of raw log entries \\( L = \\{l_1, l_2, \\dots, l_N\\} \\), the LLM-based summarization transforms and compresses \\( L \\) into a smaller set of key operational elements \\( O = \\{o_1, o_2, \\dots, o_M\\} \\), where \\( M \\ll N \\)."
888
+ },
889
+ {
890
+ "type": "title",
891
+ "bbox": [
892
+ 0.084,
893
+ 0.551,
894
+ 0.229,
895
+ 0.566
896
+ ],
897
+ "angle": 0,
898
+ "content": "3.3 Task Agents"
899
+ },
900
+ {
901
+ "type": "text",
902
+ "bbox": [
903
+ 0.082,
904
+ 0.569,
905
+ 0.482,
906
+ 0.666
907
+ ],
908
+ "angle": 0,
909
+ "content": "After system agents collect necessary data via data agents, the meta agent coordinates task agents to carry out failure management using trace data. This process involves three agent types—detection, diagnosis, and mitigation—which operate sequentially. Detection agents identify anomalies in each time window; upon detection, diagnosis agents locate and classify the issue; finally, mitigation agents propose solutions based on the diagnosis."
910
+ },
911
+ {
912
+ "type": "text",
913
+ "bbox": [
914
+ 0.082,
915
+ 0.667,
916
+ 0.485,
917
+ 0.723
918
+ ],
919
+ "angle": 0,
920
+ "content": "Though prompts vary by task, all agents follow a common RAG+CoT approach, using historical data as labeled examples—normal/abnormal for detection, failure types for diagnosis—to guide reasoning during querying."
921
+ },
922
+ {
923
+ "type": "title",
924
+ "bbox": [
925
+ 0.084,
926
+ 0.733,
927
+ 0.358,
928
+ 0.748
929
+ ],
930
+ "angle": 0,
931
+ "content": "4 PRELIMINARY EVALUATION"
932
+ },
933
+ {
934
+ "type": "title",
935
+ "bbox": [
936
+ 0.084,
937
+ 0.753,
938
+ 0.185,
939
+ 0.768
940
+ ],
941
+ "angle": 0,
942
+ "content": "4.1 Design"
943
+ },
944
+ {
945
+ "type": "text",
946
+ "bbox": [
947
+ 0.082,
948
+ 0.771,
949
+ 0.482,
950
+ 0.867
951
+ ],
952
+ "angle": 0,
953
+ "content": "To evaluate AgentFM, we assess its feasibility and effectiveness in Apache IoTDB. We manually injected 10 types of anomalies, including CPU saturation, IO saturation, memory saturation, network delay increase, network bandwidth limitation, network partition occurrence, workload spikes, accompanying slow queries, excessive data export, and excessive data import. Each anomaly type is injected 20 times."
954
+ },
955
+ {
956
+ "type": "text",
957
+ "bbox": [
958
+ 0.082,
959
+ 0.868,
960
+ 0.482,
961
+ 0.897
962
+ ],
963
+ "angle": 0,
964
+ "content": "The evaluation is conducted based on Qwen2.5-72b to assess the results of anomaly detection and diagnosis. The performance is"
965
+ },
966
+ {
967
+ "type": "text",
968
+ "bbox": [
969
+ 0.514,
970
+ 0.107,
971
+ 0.916,
972
+ 0.149
973
+ ],
974
+ "angle": 0,
975
+ "content": "measured using precision, recall, and F1-score metrics. Additionally, the mitigation solutions generated are manually analyzed to determine their usefulness."
976
+ },
977
+ {
978
+ "type": "title",
979
+ "bbox": [
980
+ 0.515,
981
+ 0.162,
982
+ 0.622,
983
+ 0.176
984
+ ],
985
+ "angle": 0,
986
+ "content": "4.2 Results"
987
+ },
988
+ {
989
+ "type": "text",
990
+ "bbox": [
991
+ 0.514,
992
+ 0.18,
993
+ 0.916,
994
+ 0.25
995
+ ],
996
+ "angle": 0,
997
+ "content": "We utilize the function call feature to guide the model in generating \"yes\" or \"no\" responses to evaluate anomaly detection performance. For failure diagnosis, we manually review the LLM-generated results to identify the classification most closely matching the pre-injected anomalies and assessed the outcomes accordingly."
998
+ },
999
+ {
1000
+ "type": "table_caption",
1001
+ "bbox": [
1002
+ 0.551,
1003
+ 0.266,
1004
+ 0.877,
1005
+ 0.28
1006
+ ],
1007
+ "angle": 0,
1008
+ "content": "Table 3: Anomaly Detection & Diagnosis Results"
1009
+ },
1010
+ {
1011
+ "type": "table",
1012
+ "bbox": [
1013
+ 0.548,
1014
+ 0.293,
1015
+ 0.882,
1016
+ 0.351
1017
+ ],
1018
+ "angle": 0,
1019
+ "content": "<table><tr><td>Task</td><td>Precision</td><td>Recall</td><td>F1-Score</td></tr><tr><td>Anomaly Detection</td><td>95.14%</td><td>97.03%</td><td>95.76%</td></tr><tr><td>Failure Diagnosis</td><td>89.61%</td><td>87.04%</td><td>87.62%</td></tr></table>"
1020
+ },
1021
+ {
1022
+ "type": "text",
1023
+ "bbox": [
1024
+ 0.514,
1025
+ 0.371,
1026
+ 0.916,
1027
+ 0.454
1028
+ ],
1029
+ "angle": 0,
1030
+ "content": "As shown in Table 3, AgentFM demonstrates promising performance in our preliminary experiments. However, based on our observations, the model struggles to identify correct results when dealing with complex internal anomalies (e.g., excessive data import). We plan to focus on optimizing performance for such scenarios in future work."
1031
+ },
1032
+ {
1033
+ "type": "title",
1034
+ "bbox": [
1035
+ 0.523,
1036
+ 0.48,
1037
+ 0.694,
1038
+ 0.492
1039
+ ],
1040
+ "angle": 0,
1041
+ "content": "Increase CPU Resources:"
1042
+ },
1043
+ {
1044
+ "type": "text",
1045
+ "bbox": [
1046
+ 0.523,
1047
+ 0.494,
1048
+ 0.906,
1049
+ 0.534
1050
+ ],
1051
+ "angle": 0,
1052
+ "content": "Iotdb0 frequently experiences high CPU usage, consider increasing the number of CPU cores allocated to the container or raising the CPU limit."
1053
+ },
1054
+ {
1055
+ "type": "title",
1056
+ "bbox": [
1057
+ 0.524,
1058
+ 0.536,
1059
+ 0.636,
1060
+ 0.549
1061
+ ],
1062
+ "angle": 0,
1063
+ "content": "Load Balancing:"
1064
+ },
1065
+ {
1066
+ "type": "text",
1067
+ "bbox": [
1068
+ 0.524,
1069
+ 0.55,
1070
+ 0.905,
1071
+ 0.577
1072
+ ],
1073
+ "angle": 0,
1074
+ "content": "If iotdb0 is part of a cluster, consider distributing some of the load to other nodes to achieve load balancing."
1075
+ },
1076
+ {
1077
+ "type": "image_caption",
1078
+ "bbox": [
1079
+ 0.536,
1080
+ 0.604,
1081
+ 0.892,
1082
+ 0.619
1083
+ ],
1084
+ "angle": 0,
1085
+ "content": "Figure 3: Sample Mitigation Solutions from AgentFM"
1086
+ },
1087
+ {
1088
+ "type": "text",
1089
+ "bbox": [
1090
+ 0.514,
1091
+ 0.64,
1092
+ 0.916,
1093
+ 0.71
1094
+ ],
1095
+ "angle": 0,
1096
+ "content": "We also conduct a manual analysis of the mitigation solutions recommended by AgentFM. One example is illustrated in Figure 3. We extract the key information from the solution and find that these recommendations provide targeted and effective assistance in system remediation."
1097
+ },
1098
+ {
1099
+ "type": "title",
1100
+ "bbox": [
1101
+ 0.515,
1102
+ 0.722,
1103
+ 0.664,
1104
+ 0.736
1105
+ ],
1106
+ "angle": 0,
1107
+ "content": "5 CONCLUSION"
1108
+ },
1109
+ {
1110
+ "type": "text",
1111
+ "bbox": [
1112
+ 0.514,
1113
+ 0.74,
1114
+ 0.916,
1115
+ 0.837
1116
+ ],
1117
+ "angle": 0,
1118
+ "content": "This paper investigates the varying significance of different roles in failure management. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Our preliminary experiments demonstrate the feasibility of AgentFM. In the future, we plan to further enhance AgentFM's capabilities in automating remediation execution."
1119
+ },
1120
+ {
1121
+ "type": "title",
1122
+ "bbox": [
1123
+ 0.515,
1124
+ 0.85,
1125
+ 0.715,
1126
+ 0.863
1127
+ ],
1128
+ "angle": 0,
1129
+ "content": "ACKNOWLEDGEMENT"
1130
+ },
1131
+ {
1132
+ "type": "text",
1133
+ "bbox": [
1134
+ 0.514,
1135
+ 0.868,
1136
+ 0.915,
1137
+ 0.897
1138
+ ],
1139
+ "angle": 0,
1140
+ "content": "This work is supported by Key-Area Research and Development Program of Guangdong Province, China (NO.2020B010164003)."
1141
+ }
1142
+ ],
1143
+ [
1144
+ {
1145
+ "type": "header",
1146
+ "bbox": [
1147
+ 0.084,
1148
+ 0.076,
1149
+ 0.565,
1150
+ 0.088
1151
+ ],
1152
+ "angle": 0,
1153
+ "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents"
1154
+ },
1155
+ {
1156
+ "type": "header",
1157
+ "bbox": [
1158
+ 0.693,
1159
+ 0.076,
1160
+ 0.913,
1161
+ 0.088
1162
+ ],
1163
+ "angle": 0,
1164
+ "content": "FSE '25, June 23-28, 2025, Trondheim, Norway"
1165
+ },
1166
+ {
1167
+ "type": "title",
1168
+ "bbox": [
1169
+ 0.085,
1170
+ 0.106,
1171
+ 0.203,
1172
+ 0.12
1173
+ ],
1174
+ "angle": 0,
1175
+ "content": "REFERENCES"
1176
+ },
1177
+ {
1178
+ "type": "ref_text",
1179
+ "bbox": [
1180
+ 0.092,
1181
+ 0.123,
1182
+ 0.482,
1183
+ 0.164
1184
+ ],
1185
+ "angle": 0,
1186
+ "content": "[1] Toufique Ahmed, Supriyo Ghosh, Chetan Bansal, Thomas Zimmermann, Xuchao Zhang, and Saravan Rajmohan. 2023. Recommending root-cause and mitigation steps for cloud incidents using large language models. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1737-1749."
1187
+ },
1188
+ {
1189
+ "type": "ref_text",
1190
+ "bbox": [
1191
+ 0.091,
1192
+ 0.164,
1193
+ 0.482,
1194
+ 0.204
1195
+ ],
1196
+ "angle": 0,
1197
+ "content": "[2] James C Corbett, Jeffrey Dean, Michael Epstein, Andrew Fikes, Christopher Frost, Jeffrey John Furman, Sanjay Ghemawat, Andrey Gubarev, Christopher Heiser, Peter Hochschild, et al. 2013. Spanner: Google's globally distributed database. ACM Transactions on Computer Systems (TOCS) 31, 3 (2013), 1-22."
1198
+ },
1199
+ {
1200
+ "type": "ref_text",
1201
+ "bbox": [
1202
+ 0.092,
1203
+ 0.205,
1204
+ 0.482,
1205
+ 0.254
1206
+ ],
1207
+ "angle": 0,
1208
+ "content": "[3] Drishti Goel, Fiza Husain, Aditya Singh, Supriyo Ghosh, Anjaly Parayil, Chetan Bansal, Xuchao Zhang, and Saravan Rajmohan. 2024. X-lifecycle learning for cloud incident management using llms. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 417-428."
1209
+ },
1210
+ {
1211
+ "type": "ref_text",
1212
+ "bbox": [
1213
+ 0.092,
1214
+ 0.255,
1215
+ 0.482,
1216
+ 0.295
1217
+ ],
1218
+ "angle": 0,
1219
+ "content": "[4] Adha Hrusto, Per Runeson, and Magnus C Ohlsson. 2024. Autonomous monitors for detecting failures early and reporting interpretable alerts in cloud operations. In Proceedings of the 46th International Conference on Software Engineering: Software Engineering in Practice. 47-57."
1220
+ },
1221
+ {
1222
+ "type": "ref_text",
1223
+ "bbox": [
1224
+ 0.092,
1225
+ 0.295,
1226
+ 0.482,
1227
+ 0.326
1228
+ ],
1229
+ "angle": 0,
1230
+ "content": "[5] Dongxu Huang, Qi Liu, Qiu Cui, Zhuhe Fang, Xiaoyu Ma, Fei Xu, Li Shen, Liu Tang, Yuxing Zhou, Menglong Huang, et al. 2020. TiDB: a Raft-based HTAP database. Proceedings of the VLDB Endowment 13, 12 (2020), 3072-3084."
1231
+ },
1232
+ {
1233
+ "type": "ref_text",
1234
+ "bbox": [
1235
+ 0.092,
1236
+ 0.326,
1237
+ 0.482,
1238
+ 0.365
1239
+ ],
1240
+ "angle": 0,
1241
+ "content": "[6] Jun Huang, Yang Yang, Hang Yu, Jianguo Li, and Xiao Zheng. 2023. Twin graph-based anomaly detection via attentive multi-modal learning for microservice system. In 2023 38th IEEE/ACM International Conference on Automated Software Engineering (ASE). IEEE, 66-78."
1242
+ },
1243
+ {
1244
+ "type": "ref_text",
1245
+ "bbox": [
1246
+ 0.092,
1247
+ 0.365,
1248
+ 0.482,
1249
+ 0.406
1250
+ ],
1251
+ "angle": 0,
1252
+ "content": "[7] Yuyuan Kang, Xiangdong Huang, Shaoxu Song, Lingzhe Zhang, Jialin Qiao, Chen Wang, Jianmin Wang, and Julian Feinauer. 2022. Separation or not: On handing out-of-order time-series data in leveled LSM-tree. In 2022 IEEE 38th International Conference on Data Engineering (ICDE). IEEE, 3340-3352."
1253
+ },
1254
+ {
1255
+ "type": "ref_text",
1256
+ "bbox": [
1257
+ 0.092,
1258
+ 0.406,
1259
+ 0.482,
1260
+ 0.446
1261
+ ],
1262
+ "angle": 0,
1263
+ "content": "[8] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, and Michael R Lyu. 2023. Eadro: An end-to-end troubleshooting framework for microservices on multi-source data. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1750-1762."
1264
+ },
1265
+ {
1266
+ "type": "ref_text",
1267
+ "bbox": [
1268
+ 0.092,
1269
+ 0.446,
1270
+ 0.482,
1271
+ 0.487
1272
+ ],
1273
+ "angle": 0,
1274
+ "content": "[9] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, Yongqiang Yang, and Michael R Lyu. 2023. Heterogeneous anomaly detection for software systems via semi-supervised cross-modal attention. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1724-1736."
1275
+ },
1276
+ {
1277
+ "type": "ref_text",
1278
+ "bbox": [
1279
+ 0.087,
1280
+ 0.487,
1281
+ 0.482,
1282
+ 0.525
1283
+ ],
1284
+ "angle": 0,
1285
+ "content": "[10] Cheng-Ming Lin, Ching Chang, Wei-Yao Wang, Kuang-Da Wang, and Wen-Chih Peng. 2024. Root Cause Analysis in Microservice Using Neural Granger Causal Discovery. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 206-213."
1286
+ },
1287
+ {
1288
+ "type": "ref_text",
1289
+ "bbox": [
1290
+ 0.087,
1291
+ 0.526,
1292
+ 0.482,
1293
+ 0.567
1294
+ ],
1295
+ "angle": 0,
1296
+ "content": "[11] Minghua Ma, Zheng Yin, Shenglin Zhang, Sheng Wang, Christopher Zheng, Xinhao Jiang, Hanwen Hu, Cheng Luo, Yilin Li, Nengjun Qiu, et al. 2020. Diagnosing root causes of intermittent slow queries in cloud databases. Proceedings of the VLDB Endowment 13, 8 (2020), 1176-1189."
1297
+ },
1298
+ {
1299
+ "type": "ref_text",
1300
+ "bbox": [
1301
+ 0.087,
1302
+ 0.567,
1303
+ 0.482,
1304
+ 0.607
1305
+ ],
1306
+ "angle": 0,
1307
+ "content": "[12] Devjeet Roy, Xuchao Zhang, Rashi Bhave, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. Exploring IIm-based agents for root cause analysis. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering.* 208-219."
1308
+ },
1309
+ {
1310
+ "type": "ref_text",
1311
+ "bbox": [
1312
+ 0.087,
1313
+ 0.607,
1314
+ 0.482,
1315
+ 0.657
1316
+ ],
1317
+ "angle": 0,
1318
+ "content": "[13] Manish Shetty, Yinfang Chen, Gagan Somashekar, Minghua Ma, Yogesh Simmhan, Xuchao Zhang, Jonathan Mace, Dax Vandevoorde, Pedro Las-Casas, Shachee Mishra Gupta, et al. 2024. Building AI Agents for Autonomous Clouds: Challenges and Design Principles. In Proceedings of the 2024 ACM Symposium on Cloud Computing, 99-110."
1319
+ },
1320
+ {
1321
+ "type": "ref_text",
1322
+ "bbox": [
1323
+ 0.087,
1324
+ 0.657,
1325
+ 0.482,
1326
+ 0.697
1327
+ ],
1328
+ "angle": 0,
1329
+ "content": "[14] Chen Wang, Xiangdong Huang, Jialin Qiao, Tian Jiang, Lei Rui, Jinrui Zhang, Rong Kang, Julian Feinauer, Kevin A McGrail, Peng Wang, et al. 2020. Apache iotdb: time-series database for internet of things. Proceedings of the VLDB Endowment 13, 12 (2020), 2901-2904."
1330
+ },
1331
+ {
1332
+ "type": "ref_text",
1333
+ "bbox": [
1334
+ 0.087,
1335
+ 0.697,
1336
+ 0.482,
1337
+ 0.728
1338
+ ],
1339
+ "angle": 0,
1340
+ "content": "[15] Lin Yang, Junjie Chen, Zan Wang, Weijing Wang, Jiajun Jiang, Xuyuan Dong, and Wenbin Zhang. 2021. Semi-supervised log-based anomaly detection via probabilistic label estimation. In 2021 IEEE/ACM 43rd International Conference on"
1341
+ },
1342
+ {
1343
+ "type": "list",
1344
+ "bbox": [
1345
+ 0.087,
1346
+ 0.123,
1347
+ 0.482,
1348
+ 0.728
1349
+ ],
1350
+ "angle": 0,
1351
+ "content": null
1352
+ },
1353
+ {
1354
+ "type": "ref_text",
1355
+ "bbox": [
1356
+ 0.541,
1357
+ 0.11,
1358
+ 0.758,
1359
+ 0.12
1360
+ ],
1361
+ "angle": 0,
1362
+ "content": "Software Engineering (ICSE). IEEE, 1448-1460."
1363
+ },
1364
+ {
1365
+ "type": "ref_text",
1366
+ "bbox": [
1367
+ 0.518,
1368
+ 0.12,
1369
+ 0.914,
1370
+ 0.16
1371
+ ],
1372
+ "angle": 0,
1373
+ "content": "[16] Zhenkun Yang, Chuanhui Yang, Fusheng Han, Mingqiang Zhuang, Bing Yang, Zhifeng Yang, Xiaojun Cheng, Yuzhong Zhao, Wenhui Shi, Huafeng Xi, et al. 2022. OceanBase: a 707 million tpmC distributed relational database system. Proceedings of the VLDB Endowment 15, 12 (2022), 3385-3397."
1374
+ },
1375
+ {
1376
+ "type": "ref_text",
1377
+ "bbox": [
1378
+ 0.518,
1379
+ 0.161,
1380
+ 0.914,
1381
+ 0.201
1382
+ ],
1383
+ "angle": 0,
1384
+ "content": "[17] Chenxi Zhang, Xin Peng, Chaofeng Sha, Ke Zhang, Zhenqing Fu, Xiya Wu, Qingwei Lin, and Dongmei Zhang. 2022. Deepralog: Trace-log combined microservice anomaly detection through graph-based deep learning. In Proceedings of the 44th international conference on software engineering. 623-634."
1385
+ },
1386
+ {
1387
+ "type": "ref_text",
1388
+ "bbox": [
1389
+ 0.518,
1390
+ 0.201,
1391
+ 0.914,
1392
+ 0.25
1393
+ ],
1394
+ "angle": 0,
1395
+ "content": "[18] Dylan Zhang, Xuchao Zhang, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. LM-PACE: Confidence estimation by large language models for effective root causing of cloud incidents. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering*, 388-398."
1396
+ },
1397
+ {
1398
+ "type": "ref_text",
1399
+ "bbox": [
1400
+ 0.517,
1401
+ 0.251,
1402
+ 0.914,
1403
+ 0.289
1404
+ ],
1405
+ "angle": 0,
1406
+ "content": "[19] Lingzhe Zhang, Tong Jia, Mengxi Jia, Ying Li, Yong Yang, and Zhonghai Wu. 2024. Multivariate Log-based Anomaly Detection for Distributed Database. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4256-4267."
1407
+ },
1408
+ {
1409
+ "type": "ref_text",
1410
+ "bbox": [
1411
+ 0.517,
1412
+ 0.289,
1413
+ 0.914,
1414
+ 0.329
1415
+ ],
1416
+ "angle": 0,
1417
+ "content": "[20] Lingzhe Zhang, Tong Jia, Mengxi Jia, Hongyi Liu, Yong Yang, Zhonghai Wu, and Ying Li. 2024. Towards Close-To-Zero Runtime Collection Overhead: Raft-Based Anomaly Diagnosis on System Faults for Distributed Storage System. IEEE Transactions on Services Computing (2024)."
1418
+ },
1419
+ {
1420
+ "type": "ref_text",
1421
+ "bbox": [
1422
+ 0.517,
1423
+ 0.33,
1424
+ 0.914,
1425
+ 0.369
1426
+ ],
1427
+ "angle": 0,
1428
+ "content": "[21] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Aiwei Liu, Yong Yang, Zhonghai Wu, Xuming Hu, Philip S Yu, and Ying Li. 2024. A Survey of AIOps for Failure Management in the Era of Large Language Models. arXiv preprint arXiv:2406.11213 (2024)."
1429
+ },
1430
+ {
1431
+ "type": "ref_text",
1432
+ "bbox": [
1433
+ 0.517,
1434
+ 0.37,
1435
+ 0.914,
1436
+ 0.41
1437
+ ],
1438
+ "angle": 0,
1439
+ "content": "[22] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. ScalaLog: Scalable Log-Based Failure Diagnosis Using LLM. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 1-5."
1440
+ },
1441
+ {
1442
+ "type": "ref_text",
1443
+ "bbox": [
1444
+ 0.517,
1445
+ 0.41,
1446
+ 0.914,
1447
+ 0.45
1448
+ ],
1449
+ "angle": 0,
1450
+ "content": "[23] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. XRAGLog: A Resource-Efficient and Context-Aware Log-Based Anomaly Detection Method Using Retrieval-Augmented Generation. In AAAI 2025 Workshop on Preventing and Detecting LLM Misinformation (PDLM)."
1451
+ },
1452
+ {
1453
+ "type": "ref_text",
1454
+ "bbox": [
1455
+ 0.517,
1456
+ 0.45,
1457
+ 0.914,
1458
+ 0.491
1459
+ ],
1460
+ "angle": 0,
1461
+ "content": "[24] Lingzhe Zhang, Tong Jia, Kangjin Wang, Mengxi Jia, Yong Yang, and Ying Li. 2024. Reducing Events to Augment Log-based Anomaly Detection Models: An Empirical Study. In Proceedings of the 18th ACM/IEEE International Symposium on Empirical Software Engineering and Measurement. 538-548."
1462
+ },
1463
+ {
1464
+ "type": "ref_text",
1465
+ "bbox": [
1466
+ 0.517,
1467
+ 0.491,
1468
+ 0.914,
1469
+ 0.531
1470
+ ],
1471
+ "angle": 0,
1472
+ "content": "[25] Ling-Zhe Zhang, Xiang-Dong Huang, Yan-Kai Wang, Jia-Lin Qiao, Shao-Xu Song, and Jian-Min Wang. 2024. Time-tired compaction: An elastic compaction scheme for LSM-tree based time-series database. Advanced Engineering Informatics 59 (2024), 102224."
1473
+ },
1474
+ {
1475
+ "type": "ref_text",
1476
+ "bbox": [
1477
+ 0.517,
1478
+ 0.531,
1479
+ 0.914,
1480
+ 0.571
1481
+ ],
1482
+ "angle": 0,
1483
+ "content": "[26] Shenglin Zhang, Pengxiang Jin, Zihan Lin, Yongqian Sun, Bicheng Zhang, Sibo Xia, Zhengdan Li, Zhenyu Zhong, Minghua Ma, Wa Jin, et al. 2023. Robust failure diagnosis of microservice system through multimodal data. IEEE Transactions on Services Computing 16, 6 (2023), 3851-3864."
1484
+ },
1485
+ {
1486
+ "type": "ref_text",
1487
+ "bbox": [
1488
+ 0.517,
1489
+ 0.571,
1490
+ 0.914,
1491
+ 0.611
1492
+ ],
1493
+ "angle": 0,
1494
+ "content": "[27] Wei Zhang, Hongcheng Guo, Jian Yang, Yi Zhang, Chaoran Yan, Zhoujin Tian, Hangyuan Ji, Zhoujun Li, Tongliang Li, Tieqiao Zheng, et al. 2024. mABC: multi-Agent Blockchain-Inspired Collaboration for root cause analysis in micro-services architecture. arXiv preprint arXiv:2404.12135 (2024)."
1495
+ },
1496
+ {
1497
+ "type": "ref_text",
1498
+ "bbox": [
1499
+ 0.518,
1500
+ 0.611,
1501
+ 0.914,
1502
+ 0.652
1503
+ ],
1504
+ "angle": 0,
1505
+ "content": "[28] Xuchao Zhang, Supriyo Ghosh, Chetan Bansal, Rujia Wang, Minghua Ma, Yu Kang, and Saravan Rajmohan. 2024. Automated root causing of cloud incidents using in-context learning with gpt-4. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 266-277."
1506
+ },
1507
+ {
1508
+ "type": "ref_text",
1509
+ "bbox": [
1510
+ 0.518,
1511
+ 0.652,
1512
+ 0.914,
1513
+ 0.702
1514
+ ],
1515
+ "angle": 0,
1516
+ "content": "[29] Nengwen Zhao, Junjie Chen, Zhaoyang Yu, Honglin Wang, Jiesong Li, Bin Qiu, Hongyu Xu, Wenchi Zhang, Kaixin Sui, and Dan Pei. 2021. Identifying bad software changes via multimodal anomaly detection for online service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering. 527-539."
1517
+ },
1518
+ {
1519
+ "type": "ref_text",
1520
+ "bbox": [
1521
+ 0.518,
1522
+ 0.702,
1523
+ 0.914,
1524
+ 0.732
1525
+ ],
1526
+ "angle": 0,
1527
+ "content": "[30] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. Multimodal Causal Structure Learning and Root Cause Analysis. arXiv preprint arXiv:2402.02357 (2024)."
1528
+ },
1529
+ {
1530
+ "type": "list",
1531
+ "bbox": [
1532
+ 0.517,
1533
+ 0.11,
1534
+ 0.914,
1535
+ 0.732
1536
+ ],
1537
+ "angle": 0,
1538
+ "content": null
1539
+ }
1540
+ ]
1541
+ ]
data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a29ef41117fdd55543e3b209d72b5620a51245217616f3edf4689e1480b6b7c
3
+ size 1710651
data/2025/2504_06xxx/2504.06614/full.md ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents
2
+
3
+ Lingzhe Zhang†
4
+ Peking University
5
+ Beijing, China
6
+ zhang.lingzhe@stu.pku.edu.cn
7
+
8
+ Yunpeng Zhai†
9
+ Alibaba Group
10
+ Beijing, China
11
+ zhaiyunpeng.zyp@alibaba-inc.com
12
+
13
+ Tong Jia*
14
+ Peking University
15
+ Beijing, China
16
+ jia.tong@oku.edu.cn
17
+
18
+ Xiaosong Huang
19
+ Peking University
20
+ Beijing, China
21
+ hxs@stu.pku.edu.cn
22
+
23
+ Chiming Duan
24
+ Peking University
25
+ Beijing, China
26
+ duanchiming@stu.pku.edu.cn
27
+
28
+ Ying Li*
29
+ Peking University
30
+ Beijing, China
31
+ li.ying@pku.edu.cn
32
+
33
+ # ABSTRACT
34
+
35
+ Distributed databases are critical infrastructures for today's large-scale software systems, making effective failure management essential to ensure software availability. However, existing approaches often overlook the role distinctions within distributed databases and rely on small-scale models with limited generalization capabilities. In this paper, we conduct a preliminary empirical study to emphasize the unique significance of different roles. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. AgentFM addresses failure management by considering system roles, data roles, and task roles, with a meta-agent orchestrating these components. Preliminary evaluations using Apache IoTDB demonstrate the effectiveness of AgentFM and open new directions for further research.
36
+
37
+ # CCS CONCEPTS
38
+
39
+ - Software and its engineering $\rightarrow$ Maintaining software.
40
+
41
+ # KEYWORDS
42
+
43
+ Failure Management, Distributed Databases, Multi Agents
44
+
45
+ # ACM Reference Format:
46
+
47
+ Lingzhe Zhang†, Yunpeng Zhai†, Tong Jia*, Xiaosong Huang, Chiming Duan, and Ying Li*. 2025. AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents. In 33rd ACM International Conference on the Foundations of Software Engineering (FSE '25), June 23–28, 2025, Trondheim, Norway. ACM, New York, NY, USA, 5 pages. https://doi.org/10.1145/3696630.3728492
48
+
49
+ $\dagger$ Equal contribution.
50
+ *Corresponding author.
51
+
52
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
53
+
54
+ FSE '25, June 23-28, 2025, Trondheim, Norway
55
+
56
+ © 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM.
57
+
58
+ ACM ISBN 979-8-4007-1276-0/25/06...$15.00
59
+
60
+ https://doi.org/10.1145/3696630.3728492
61
+
62
+ # 1 INTRODUCTION
63
+
64
+ The distributed databases, such as Google Spanner [2], Alibaba OceanBase [16], TiDB [5], and Apache IoTDB [14], have become integral components of cloud infrastructures, handling vast volumes of data [7, 25].
65
+
66
+ However, these systems frequently encounter anomalies such as system failures and performance degradation, leading to significant financial losses. For example, Alibaba Cloud faces Intermittent Slow Queries (iSQs) [11], leading to billions of dollars in annual losses. Amazon reports that even a 0.1-second delay in loading caused by database anomalies can lead to a $1\%$ increase in financial losses [19]. Therefore, it is crucial to detect system failures in real time, analyze the root causes of these failures, and automatically remediate them.
67
+
68
+ System traces, metrics, and logs capture the states and critical events of active processes, making them essential for managing software failures. These data sources provide insights into both normal operations and deviations signaling potential failures. Leveraging their multimodal nature, recent research has enhanced anomaly detection and diagnosis in complex systems [6, 8-10, 17, 20-24, 26, 29, 30]. For example, Eadro [8] integrates anomaly detection and root cause localization using multi-source data, while AnoFusion [26] employs unsupervised multimodal failure detection in microservices. MSTGAD [6] combines all three data types with attentive multimodal learning for graph-based anomaly detection. Studies also explore two-source combinations, such as DeepTraLog [17], which pairs traces with logs using a GGNN-based model, and SCWarn [29], which identifies anomalies via heterogeneous metrics and logs. Although the effectiveness of these methods has been demonstrated in specific scenarios, they face several practical challenges when applied to distributed databases:
69
+
70
+ - Role Awareness. In distributed databases, nodes play different roles, each with varying levels of importance. Effective failure management often requires coordination and collaboration among multiple nodes. However, current methods completely overlook these roles, leading to suboptimal fault detection, inaccurate root cause diagnosis, and an inability to provide critical repair strategies.
71
+ - Application Limitations. Existing failure management models lack generalizability. For instance, these models are typically trained on specific systems and encounter concept drift when applied to new systems. Moreover, since these
72
+
73
+ methods often frame the task as a classification problem, the resulting outputs lack interpretability, which is crucial for Operations and Control Engineers (OCEs) to effectively resolve issues.
74
+
75
+ To address the first challenge, we conduct a detailed analysis of the various roles within a distributed database and the entire failure management process. We ultimately identify three distinct role categories: system roles, data roles, and task roles. System roles represent the various roles inherent in the distributed database itself (e.g., leader nodes, follower nodes). Data roles refer to the different data sources involved in failure management, while task roles represent the various tasks that need to be executed during failure management.
76
+
77
+ To tackle the second challenge, we adopt a large language model (LLM)-based approach. While many existing LLM-based failure management solutions have been proposed, a significant number of them do not utilize the three types of data sources mentioned above [1, 3, 12, 18, 28]. Moreover, some approaches that do incorporate these data sources fail to account for the role-based structure within distributed databases [4, 13, 27]. Therefore, we propose a role-aware, LLM-driven multi-agent approach that integrates the characteristics of these roles and the three data sources.
78
+
79
+ Building on these insights, we introduce AgentFM, a comprehensive role-aware failure management framework for distributed databases, powered by LLM-driven multi-agent systems. AgentFM integrates the unique characteristics of distributed database roles with the rich multimodal data sources typically encountered in failure management, such as system traces, metrics, and logs. By employing a multi-agent architecture, AgentFM facilitates specialized agents for each role—system roles, data roles, and task roles—ensuring a more nuanced and effective approach to failure detection, diagnosis, and resolution.
80
+
81
+ We conduct preliminary experiments on Apache IoTDB [14], a distributed time-series database system, to assess the effectiveness of AgentFM in failure detection and root cause analysis. Furthermore, we manually verified the accuracy and validity of the generated mitigation solution.
82
+
83
+ Contributions. The contributions of this paper are threefold. First, we conduct a preliminary empirical study highlighting the varying significance of different roles in failure management. Second, we propose AgentFM, a role-aware failure management framework for distributed databases with LLM-driven multi-agents. Third, we provide a preliminary evaluation of AgentFM on Apache IoTDB, demonstrating its feasibility and effectiveness.
84
+
85
+ # 2 PRELIMINARY EMPIRCAL STUDY
86
+
87
+ In this section, we conduct a preliminary empirical study using Apache IoTDB [14], focusing on the significance of different roles in the failure management process.
88
+
89
+ # 2.1 System Roles
90
+
91
+ We manually injected anomalies (excessive data export) into Apache IoTDB during runtime and evaluate the anomaly detection performance (precision, recall and f1-score) for each node using PLELog [15], which is a state-of-the-art log-based anomaly detection method.
92
+
93
+ Table 1: Anomaly Detection Results for Each Node
94
+
95
+ <table><tr><td></td><td>Node1</td><td>Node2</td><td>Node3</td><td>Node4</td><td>Node5</td><td>Node6</td></tr><tr><td>p</td><td>39.68%</td><td>34.35%</td><td>59.64%</td><td>31.17%</td><td>77.60%</td><td>89.42%</td></tr><tr><td>r</td><td>99.01%</td><td>100.00%</td><td>98.02%</td><td>100.00%</td><td>96.04%</td><td>92.08%</td></tr><tr><td>f1</td><td>56.66%</td><td>51.14%</td><td>74.16%</td><td>47.53%</td><td>85.84%</td><td>90.73%</td></tr></table>
96
+
97
+ As shown in Table 1, $\text{Node}_6$ achieves the best anomaly detection performance due to hosting the largest number of leader partitions. In contrast, the detection performance on other nodes is relatively suboptimal. This observation underscores the differing significance of various system roles.
98
+
99
+ # 2.2 Data Roles
100
+
101
+ We further conduct anomaly diagnosis classification experiments on Apache IoTDB using both metrics data and log data. To ensure fairness, we do not adopt state-of-the-art methods; instead, we implement a simple classification algorithm based on the Dynamic Time Warping (DTW) algorithm.
102
+
103
+ Table 2: Anomalies that can be Classified by Metrics and Logs
104
+
105
+ <table><tr><td>Source</td><td>CPU</td><td>Memory</td><td>Export</td><td>Import</td><td>Configuration</td></tr><tr><td>Metrics</td><td>✓</td><td>✓</td><td></td><td></td><td></td></tr><tr><td>Logs</td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr></table>
106
+
107
+ As shown in Table 2, anomalies with an F1-score above $50\%$ are considered identifiable by the corresponding data type. The results reveal that metrics are particularly effective at detecting resource anomalies, such as CPU and memory saturation, while logs excel at identifying internal database issues, including excessive data export/import and configuration errors. This observation highlights the distinct significance of different data roles.
108
+
109
+ # 3 METHODOLOGY
110
+
111
+ Our preliminary empirical study highlights that the effectiveness of failure management varies significantly across different roles in distributed databases. Consequently, it becomes crucial to assign varying levels of importance to different roles at different times and adopt role-specific operations to optimize outcomes.
112
+
113
+ In this section, we introduce AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Figure 1 illustrates the architecture of AgentFM, which comprises three types of agents corresponding to distinct roles: system agents, data agents, and task agents. System agents represent the various roles of nodes within the distributed database, data agents handle information from diverse data sources, and task agents focus on executing specific failure management tasks. Notably, since system agents and data agents operate independently on each node, they are collectively referred to as standalone agents. Additionally, the framework incorporates a meta-agent, which is responsible for orchestrating, adapting, aggregating, and ultimately producing the final results of the agents' operations, ensuring cohesive and efficient failure management.
114
+
115
+ ![](images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg)
116
+ Figure 1: AgentFM Architecture
117
+
118
+ The framework operates by first extracting critical information from different system agents using the corresponding data agents. This information is then transmitted to a centralized meta-agent for unified decision-making. The decision-making process employs trace data as a cohesive framework to integrate key insights across nodes. Finally, the appropriate task agents execute the required failure management operations, completing the process efficiently and effectively.
119
+
120
+ # 3.1 System Agents
121
+
122
+ Different distributed databases assign dynamic system roles that may change during runtime (e.g., leader re-election after node failure). To handle this, we designed an adaptive mechanism for system agents within the Meta Agent.
123
+
124
+ As illustrated in Figure 2, the core component is the System Role Manager, which initializes by analyzing configuration files and system docs to identify each node's role and importance. At runtime, it periodically queries the system state and updates roles accordingly.
125
+
126
+ Based on this process, each node instantiates a corresponding system agent—such as a Config Agent, Coordinator Agent, or Storage Agent—each embedding multiple data agents to handle specialized tasks.
127
+
128
+ # 3.2 Data Agents
129
+
130
+ In this paper, we define two types of data agents: the Metric Agent and the Log Agent. Each adopts a distinct approach to extract key information from the raw metrics data and log data, respectively.
131
+
132
+ Metric Agent: The overall workflow of the Metric Agent begins with simple data preprocessing, which includes noise removal and imputation of missing values. After preprocessing, the multivariate
133
+
134
+ ![](images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg)
135
+ Figure 2: System Agents Adaptation Workflow
136
+
137
+ time-series data is converted into natural language descriptions using a large model. The generated natural language descriptions capture key aspects of the data, including time intervals, fluctuation trends, and anomaly points.
138
+
139
+ Formally, let $\mathbf{M} = \{m_1, m_2, \dots, m_n\}$ represent the raw multidimensional metrics data, where each $m_i$ corresponds to a time-series for a specific metric. The preprocessing step can be represented as Equation 1, where $\mathbf{M}_{\mathrm{p}}$ is the denoised and imputed version of $M$ .
140
+
141
+ $$
142
+ \mathbf {M} _ {\mathrm {p}} = \text {P r e p r o c e s s} (\mathbf {M}) = \left\{m _ {1} ^ {\prime}, m _ {2} ^ {\prime}, \dots , m _ {n} ^ {\prime} \right\} \tag {1}
143
+ $$
144
+
145
+ Next, the processed data is converted into natural language descriptions using a large language model $\mathcal{L}$ , as shown in Equation 2,
146
+
147
+ where $\mathbf{D}_{\mathrm{nl}}$ represents the resulting natural language description, which includes information on time intervals, trends, and identified anomalies in the data.
148
+
149
+ $$
150
+ \mathbf {D} _ {\mathrm {n l}} = \mathcal {L} \left(\mathbf {M} _ {\mathrm {p}}\right) \tag {2}
151
+ $$
152
+
153
+ Log Agent: Unlike metrics data, log data is written by developers to monitor the internal state of the system, inherently containing rich semantic information. However, log data often includes substantial redundant information, such as repetitive log entries and messages unrelated to system anomalies.
154
+
155
+ To address this, the Log Agent comprises two main components: sequence compression and semantic compression. Sequence compression focuses on reducing the raw log sequence by consolidating repetitive log patterns, while semantic compression extracts key operational information from the logs.
156
+
157
+ Sequence Compression: This component utilizes a log-parsing algorithm to transform each log entry into a distinct event template consisting of a static constant part and variable parameters. It then merges consecutive occurrences of identical event templates. Formally, for a given raw log sequence $L = \{l_1, l_2, \dots, l_N\}$ , where $l_n$ represents an individual log entry, the parsing process transforms $L$ into $L_e = \{e_1, e_2, \dots, e_N\}$ , where $e_i$ represents an event template. Consecutive identical events are merged, yielding $G' = \{e_1', e_2', \dots, e_C'\}$ , where $C \ll N$ and $e_i' = e_i \times c$ represents the event $e_i$ with a count $c$ .
158
+
159
+ Semantic Compression: This component employs a prompt-based method to convert lengthy log sequences into descriptions of the database's ongoing operations. Formally, given a sequence of raw log entries $L = \{l_1, l_2, \dots, l_N\}$ , the LLM-based summarization transforms and compresses $L$ into a smaller set of key operational elements $O = \{o_1, o_2, \dots, o_M\}$ , where $M \ll N$ .
160
+
161
+ # 3.3 Task Agents
162
+
163
+ After system agents collect necessary data via data agents, the meta agent coordinates task agents to carry out failure management using trace data. This process involves three agent types—detection, diagnosis, and mitigation—which operate sequentially. Detection agents identify anomalies in each time window; upon detection, diagnosis agents locate and classify the issue; finally, mitigation agents propose solutions based on the diagnosis.
164
+
165
+ Though prompts vary by task, all agents follow a common RAG+CoT approach, using historical data as labeled examples—normal/abnormal for detection, failure types for diagnosis—to guide reasoning during querying.
166
+
167
+ # 4 PRELIMINARY EVALUATION
168
+
169
+ # 4.1 Design
170
+
171
+ To evaluate AgentFM, we assess its feasibility and effectiveness in Apache IoTDB. We manually injected 10 types of anomalies, including CPU saturation, IO saturation, memory saturation, network delay increase, network bandwidth limitation, network partition occurrence, workload spikes, accompanying slow queries, excessive data export, and excessive data import. Each anomaly type is injected 20 times.
172
+
173
+ The evaluation is conducted based on Qwen2.5-72b to assess the results of anomaly detection and diagnosis. The performance is
174
+
175
+ measured using precision, recall, and F1-score metrics. Additionally, the mitigation solutions generated are manually analyzed to determine their usefulness.
176
+
177
+ # 4.2 Results
178
+
179
+ We utilize the function call feature to guide the model in generating "yes" or "no" responses to evaluate anomaly detection performance. For failure diagnosis, we manually review the LLM-generated results to identify the classification most closely matching the pre-injected anomalies and assessed the outcomes accordingly.
180
+
181
+ Table 3: Anomaly Detection & Diagnosis Results
182
+
183
+ <table><tr><td>Task</td><td>Precision</td><td>Recall</td><td>F1-Score</td></tr><tr><td>Anomaly Detection</td><td>95.14%</td><td>97.03%</td><td>95.76%</td></tr><tr><td>Failure Diagnosis</td><td>89.61%</td><td>87.04%</td><td>87.62%</td></tr></table>
184
+
185
+ As shown in Table 3, AgentFM demonstrates promising performance in our preliminary experiments. However, based on our observations, the model struggles to identify correct results when dealing with complex internal anomalies (e.g., excessive data import). We plan to focus on optimizing performance for such scenarios in future work.
186
+
187
+ # Increase CPU Resources:
188
+
189
+ Iotdb0 frequently experiences high CPU usage, consider increasing the number of CPU cores allocated to the container or raising the CPU limit.
190
+
191
+ # Load Balancing:
192
+
193
+ If iotdb0 is part of a cluster, consider distributing some of the load to other nodes to achieve load balancing.
194
+
195
+ Figure 3: Sample Mitigation Solutions from AgentFM
196
+
197
+ We also conduct a manual analysis of the mitigation solutions recommended by AgentFM. One example is illustrated in Figure 3. We extract the key information from the solution and find that these recommendations provide targeted and effective assistance in system remediation.
198
+
199
+ # 5 CONCLUSION
200
+
201
+ This paper investigates the varying significance of different roles in failure management. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Our preliminary experiments demonstrate the feasibility of AgentFM. In the future, we plan to further enhance AgentFM's capabilities in automating remediation execution.
202
+
203
+ # ACKNOWLEDGEMENT
204
+
205
+ This work is supported by Key-Area Research and Development Program of Guangdong Province, China (NO.2020B010164003).
206
+
207
+ # REFERENCES
208
+
209
+ [1] Toufique Ahmed, Supriyo Ghosh, Chetan Bansal, Thomas Zimmermann, Xuchao Zhang, and Saravan Rajmohan. 2023. Recommending root-cause and mitigation steps for cloud incidents using large language models. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1737-1749.
210
+ [2] James C Corbett, Jeffrey Dean, Michael Epstein, Andrew Fikes, Christopher Frost, Jeffrey John Furman, Sanjay Ghemawat, Andrey Gubarev, Christopher Heiser, Peter Hochschild, et al. 2013. Spanner: Google's globally distributed database. ACM Transactions on Computer Systems (TOCS) 31, 3 (2013), 1-22.
211
+ [3] Drishti Goel, Fiza Husain, Aditya Singh, Supriyo Ghosh, Anjaly Parayil, Chetan Bansal, Xuchao Zhang, and Saravan Rajmohan. 2024. X-lifecycle learning for cloud incident management using llms. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 417-428.
212
+ [4] Adha Hrusto, Per Runeson, and Magnus C Ohlsson. 2024. Autonomous monitors for detecting failures early and reporting interpretable alerts in cloud operations. In Proceedings of the 46th International Conference on Software Engineering: Software Engineering in Practice. 47-57.
213
+ [5] Dongxu Huang, Qi Liu, Qiu Cui, Zhuhe Fang, Xiaoyu Ma, Fei Xu, Li Shen, Liu Tang, Yuxing Zhou, Menglong Huang, et al. 2020. TiDB: a Raft-based HTAP database. Proceedings of the VLDB Endowment 13, 12 (2020), 3072-3084.
214
+ [6] Jun Huang, Yang Yang, Hang Yu, Jianguo Li, and Xiao Zheng. 2023. Twin graph-based anomaly detection via attentive multi-modal learning for microservice system. In 2023 38th IEEE/ACM International Conference on Automated Software Engineering (ASE). IEEE, 66-78.
215
+ [7] Yuyuan Kang, Xiangdong Huang, Shaoxu Song, Lingzhe Zhang, Jialin Qiao, Chen Wang, Jianmin Wang, and Julian Feinauer. 2022. Separation or not: On handing out-of-order time-series data in leveled LSM-tree. In 2022 IEEE 38th International Conference on Data Engineering (ICDE). IEEE, 3340-3352.
216
+ [8] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, and Michael R Lyu. 2023. Eadro: An end-to-end troubleshooting framework for microservices on multi-source data. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1750-1762.
217
+ [9] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, Yongqiang Yang, and Michael R Lyu. 2023. Heterogeneous anomaly detection for software systems via semi-supervised cross-modal attention. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1724-1736.
218
+ [10] Cheng-Ming Lin, Ching Chang, Wei-Yao Wang, Kuang-Da Wang, and Wen-Chih Peng. 2024. Root Cause Analysis in Microservice Using Neural Granger Causal Discovery. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 206-213.
219
+ [11] Minghua Ma, Zheng Yin, Shenglin Zhang, Sheng Wang, Christopher Zheng, Xinhao Jiang, Hanwen Hu, Cheng Luo, Yilin Li, Nengjun Qiu, et al. 2020. Diagnosing root causes of intermittent slow queries in cloud databases. Proceedings of the VLDB Endowment 13, 8 (2020), 1176-1189.
220
+ [12] Devjeet Roy, Xuchao Zhang, Rashi Bhave, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. Exploring IIm-based agents for root cause analysis. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering.* 208-219.
221
+ [13] Manish Shetty, Yinfang Chen, Gagan Somashekar, Minghua Ma, Yogesh Simmhan, Xuchao Zhang, Jonathan Mace, Dax Vandevoorde, Pedro Las-Casas, Shachee Mishra Gupta, et al. 2024. Building AI Agents for Autonomous Clouds: Challenges and Design Principles. In Proceedings of the 2024 ACM Symposium on Cloud Computing, 99-110.
222
+ [14] Chen Wang, Xiangdong Huang, Jialin Qiao, Tian Jiang, Lei Rui, Jinrui Zhang, Rong Kang, Julian Feinauer, Kevin A McGrail, Peng Wang, et al. 2020. Apache iotdb: time-series database for internet of things. Proceedings of the VLDB Endowment 13, 12 (2020), 2901-2904.
223
+ [15] Lin Yang, Junjie Chen, Zan Wang, Weijing Wang, Jiajun Jiang, Xuyuan Dong, and Wenbin Zhang. 2021. Semi-supervised log-based anomaly detection via probabilistic label estimation. In 2021 IEEE/ACM 43rd International Conference on
224
+
225
+ Software Engineering (ICSE). IEEE, 1448-1460.
226
+ [16] Zhenkun Yang, Chuanhui Yang, Fusheng Han, Mingqiang Zhuang, Bing Yang, Zhifeng Yang, Xiaojun Cheng, Yuzhong Zhao, Wenhui Shi, Huafeng Xi, et al. 2022. OceanBase: a 707 million tpmC distributed relational database system. Proceedings of the VLDB Endowment 15, 12 (2022), 3385-3397.
227
+ [17] Chenxi Zhang, Xin Peng, Chaofeng Sha, Ke Zhang, Zhenqing Fu, Xiya Wu, Qingwei Lin, and Dongmei Zhang. 2022. Deepralog: Trace-log combined microservice anomaly detection through graph-based deep learning. In Proceedings of the 44th international conference on software engineering. 623-634.
228
+ [18] Dylan Zhang, Xuchao Zhang, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. LM-PACE: Confidence estimation by large language models for effective root causing of cloud incidents. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering*, 388-398.
229
+ [19] Lingzhe Zhang, Tong Jia, Mengxi Jia, Ying Li, Yong Yang, and Zhonghai Wu. 2024. Multivariate Log-based Anomaly Detection for Distributed Database. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4256-4267.
230
+ [20] Lingzhe Zhang, Tong Jia, Mengxi Jia, Hongyi Liu, Yong Yang, Zhonghai Wu, and Ying Li. 2024. Towards Close-To-Zero Runtime Collection Overhead: Raft-Based Anomaly Diagnosis on System Faults for Distributed Storage System. IEEE Transactions on Services Computing (2024).
231
+ [21] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Aiwei Liu, Yong Yang, Zhonghai Wu, Xuming Hu, Philip S Yu, and Ying Li. 2024. A Survey of AIOps for Failure Management in the Era of Large Language Models. arXiv preprint arXiv:2406.11213 (2024).
232
+ [22] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. ScalaLog: Scalable Log-Based Failure Diagnosis Using LLM. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 1-5.
233
+ [23] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. XRAGLog: A Resource-Efficient and Context-Aware Log-Based Anomaly Detection Method Using Retrieval-Augmented Generation. In AAAI 2025 Workshop on Preventing and Detecting LLM Misinformation (PDLM).
234
+ [24] Lingzhe Zhang, Tong Jia, Kangjin Wang, Mengxi Jia, Yong Yang, and Ying Li. 2024. Reducing Events to Augment Log-based Anomaly Detection Models: An Empirical Study. In Proceedings of the 18th ACM/IEEE International Symposium on Empirical Software Engineering and Measurement. 538-548.
235
+ [25] Ling-Zhe Zhang, Xiang-Dong Huang, Yan-Kai Wang, Jia-Lin Qiao, Shao-Xu Song, and Jian-Min Wang. 2024. Time-tired compaction: An elastic compaction scheme for LSM-tree based time-series database. Advanced Engineering Informatics 59 (2024), 102224.
236
+ [26] Shenglin Zhang, Pengxiang Jin, Zihan Lin, Yongqian Sun, Bicheng Zhang, Sibo Xia, Zhengdan Li, Zhenyu Zhong, Minghua Ma, Wa Jin, et al. 2023. Robust failure diagnosis of microservice system through multimodal data. IEEE Transactions on Services Computing 16, 6 (2023), 3851-3864.
237
+ [27] Wei Zhang, Hongcheng Guo, Jian Yang, Yi Zhang, Chaoran Yan, Zhoujin Tian, Hangyuan Ji, Zhoujun Li, Tongliang Li, Tieqiao Zheng, et al. 2024. mABC: multi-Agent Blockchain-Inspired Collaboration for root cause analysis in micro-services architecture. arXiv preprint arXiv:2404.12135 (2024).
238
+ [28] Xuchao Zhang, Supriyo Ghosh, Chetan Bansal, Rujia Wang, Minghua Ma, Yu Kang, and Saravan Rajmohan. 2024. Automated root causing of cloud incidents using in-context learning with gpt-4. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 266-277.
239
+ [29] Nengwen Zhao, Junjie Chen, Zhaoyang Yu, Honglin Wang, Jiesong Li, Bin Qiu, Hongyu Xu, Wenchi Zhang, Kaixin Sui, and Dan Pei. 2021. Identifying bad software changes via multimodal anomaly detection for online service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering. 527-539.
240
+ [30] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. Multimodal Causal Structure Learning and Root Cause Analysis. arXiv preprint arXiv:2402.02357 (2024).
data/2025/2504_06xxx/2504.06614/images/1eaf2908e2b5e1be5916fea854d4db8f8c6eed97d6b2241d3eb2755794a30451.jpg ADDED

Git LFS Details

  • SHA256: e313747d948d9b0d11c3287e796d2f3eb3a11ee68e6d1e6cf5a796cf40eb980a
  • Pointer size: 130 Bytes
  • Size of remote file: 23.2 kB
data/2025/2504_06xxx/2504.06614/images/48afdf90edf72752714f78e53226a211faa8aea67c7d6ed209b1f4cbb1fcf1c6.jpg ADDED

Git LFS Details

  • SHA256: 32c66f9f2cbb82c692f78f8464f5ed39d3ca65f5823014f230f62b7d9d98ae77
  • Pointer size: 129 Bytes
  • Size of remote file: 2.41 kB
data/2025/2504_06xxx/2504.06614/images/4ab02666500b97395a6c8de3ce496b691bb00849e2af93ec40212bc1d38518a3.jpg ADDED

Git LFS Details

  • SHA256: 4a902ae92d692d5662626a713f891f8c7341dd07e764ddc1fa367931b16b2cc5
  • Pointer size: 130 Bytes
  • Size of remote file: 15.5 kB
data/2025/2504_06xxx/2504.06614/images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg ADDED

Git LFS Details

  • SHA256: 9ef90d430313f9f3c670f62f5a9fecc30f73ad562f01e170d327c3e71a6e449c
  • Pointer size: 131 Bytes
  • Size of remote file: 123 kB
data/2025/2504_06xxx/2504.06614/images/82e7fce4ff67186806eecc16a65a5aafad2abe658c41fa7061b4b2c82185027f.jpg ADDED

Git LFS Details

  • SHA256: eb35c998cce958af012871a8d517661251c8c07b62ecc0eb797422961832c16d
  • Pointer size: 130 Bytes
  • Size of remote file: 16.2 kB
data/2025/2504_06xxx/2504.06614/images/c356888d6eb6b4b08030830f723c247e588c021a4bfa2535dc850f753e59f561.jpg ADDED

Git LFS Details

  • SHA256: 72fbeb9d66053611d24a34ee60774ec4f27e0146a78a565dfcd23e065ea0bcb7
  • Pointer size: 129 Bytes
  • Size of remote file: 4.82 kB
data/2025/2504_06xxx/2504.06614/images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg ADDED

Git LFS Details

  • SHA256: 3b55cff4865e11857a07ad6cfc91f2f3bd98da5b70c42290bc52d44e832cc9ca
  • Pointer size: 130 Bytes
  • Size of remote file: 48.6 kB
data/2025/2504_06xxx/2504.06614/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fbadd2b320fab25fa7302d3ba18df2a216ebf0819d83ec3f457f7117e103d0a
3
+ size 3485421
data/2025/2504_06xxx/2504.06821/full.md ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inducing Programmatic Skills for Agentic Tasks
2
+
3
+ Zora Zhiruo Wang Apurva Gandhi Graham Neubig Daniel Fried
4
+
5
+ Carnegie Mellon University
6
+
7
+ {zhiruow,apurvag,gneubig,dfried}@cs.cmu.edu
8
+
9
+ # Abstract
10
+
11
+ To succeed in common digital tasks such as web navigation, agents must carry out a variety of specialized tasks such as searching for products or planning a travel route. To tackle these tasks, agents can bootstrap themselves by learning task-specific skills online through interaction with the web environment. In this work, we demonstrate that programs are an effective representation for skills. We propose agent skill induction (ASI), which allows agents to adapt themselves by inducing, verifying, and utilizing program-based skills on the fly. We start with an evaluation on the WebArena agent benchmark and show that ASI outperforms the static baseline agent and its text-skill counterpart by $23.5\%$ and $11.3\%$ in success rate, mainly thanks to the programmatic verification guarantee during the induction phase. ASI also improves efficiency by reducing $10.7 - 15.3\%$ of the steps over baselines, by composing primitive actions (e.g., click) into higher-level skills (e.g., search_product). We then highlight the efficacy of ASI in remaining efficient and accurate under scaled-up web activities. Finally, we examine the generalizability of induced skills when transferring between websites, and find that ASI can effectively reuse common skills, while also updating incompatible skills to versatile website changes. $^{1}$
12
+
13
+ # 1 Introduction
14
+
15
+ To achieve success in common digital tasks such as web navigation, it is essential for agents to be able to perform a variety of specialized tasks such as searching for products on a shopping website (Yao et al., 2022; Deng et al., 2024) or finding a driving route on the map (Zhou et al., 2024a; Xie et al., 2024a). While one source for agents to learn such tasks is demonstrations annotated by humans (Deng et al., 2024) or synthesized with large language models (LMs) on websites of interest (Murty et al., 2024b;a), this can be a challenging offline learning procedure given the broad range of website domains and functionalities, especially for the collected demonstrations to match or cover the distribution of tasks queried at inference time (Zhou et al., 2024b); not to mention the limitations in resources to collect abundant high-quality data at ease (Pan et al., 2024).
16
+
17
+ Instead of learning from demonstrations offline, an alternative way is to learn these tasks directly online from test queries to prevent potential distribution mismatch between demonstration and downstream tasks (Levine et al., 2020). Some works propose to have agents induce casual abstractions (Majumder et al., 2024), single-state guidelines (Fu et al., 2024), or multi-step procedural workflows (Sarch et al., 2024; Wang et al., 2024b) as a form of intermediate knowledge to augment agent memory via non-parametric approaches (Brown et al., 2020). Nonetheless, most existing approaches represent this knowledge in text, offering limited quality and verification guarantees. In this work, we propose that executable programs are effective representations for intermediate skill acquisition, given their verifiability and composability advantages (Setlur et al., 2025).
18
+
19
+ We present ASI, namely agent skill induction (§2), that induces and applies programmatic skills along the process of solving user web navigation queries. More concretely, given a natural language (NL) query, the agent first generates an action trajectory attempting to solve the task using built-in, primitive actions such as click and scroll. The agent then
20
+
21
+ induces higher-level skills (e.g., search_product(name)) that wrap primitive actions or prior skills as executable programs, accompanied with corresponding test trajectories to verify their quality. Verified skills are then incorporated into the agent action space and can be directly called to solve future tasks with similar procedures, as depicted in Figure 1 (bottom).
22
+
23
+ We first evaluate ASI on the WebArena benchmark (Zhou et al., 2024a) (§3) and demonstrate that our online, adaptive ASI surpasses its static agent baseline without adaptive components by $23.5\%$ in success rate. To validate the advantage of using programmatic representations for skills, we further compare to an adaptive agent, AWM (Wang et al., 2024b), that represents skills in memory as non-executable texts (Figure 1 top); we find ASI scores $11.3\%$ higher success rate by employing verifiable, programmatic skills (Figure 1 bottom). Beyond the correctness aspect, the task-solving procedures by ASI-supported agents are $10.7 - 15.3\%$ more efficient than the baseline approaches, mainly because of the action space abstraction and composition enabled by the programmatic skill representation.
24
+
25
+ ![](images/e184163a935c302d2edc322fe07a755bc00b82f9292ce321df60b7e979ab1931.jpg)
26
+ Figure 1: Online adaptive agent that induces and reuses programmatic skills as actions (bottom), as opposed to adding textual skills in memory (top).
27
+
28
+ We further stress test ASI on scaled-up scenarios (S4) that require substantially longer-horizon trajectories to complete the task. Across various domains such as shopping and social forums, we find the efficiency advantage offered by ASI is more pronounced, reducing action trajectories by 9.5 and 5.6 average steps compared to static and text-form agents. Facilitated by this improved procedural efficiency and planning, we find that ASI agent adheres to the optimal trajectory better and achieves tasks $20.7 - 38.9\%$ more correctly.
29
+
30
+ Finally, we study agent behaviors on generalizing induced skills to other websites (§5), particularly from sandboxed, simulated websites to real-world websites of similar domains. While ASI agents effectively transfer common skills (e.g., search_product) to new websites, some skills may be incompatible with alternative website designs hence less useful. Nonetheless, ASI can quickly refine its prior skills or create new skills on new websites, indicating it allows agents to adapt online while maintaining verifiability via programs.
31
+
32
+ In short, ASI enhances web agent success and efficiency by inducing and applying verifiable programmatic skills, in general and longer-horizon tasks, even across varied websites.
33
+
34
+ # 2 Agent Skill Induction
35
+
36
+ In this section, we first lay out the web agent problem setup (§2.1) and introduce online, self-adaptive agents (§2.2). We then describe the core component of ASI—programmatic skill induction and verification (§2.3).
37
+
38
+ # 2.1 Problem Statement: Online Adaptive Agent
39
+
40
+ For the scope of this work, we focus on language model (LM) based agents, where each agent policy consists of an LM backbone $\mathcal{L}$ , a memory $\mathcal{M}$ , and a skill library $\mathcal{A}$ , as illustrated in Figure 1 top and bottom. In the implementation, the memory $\mathcal{M}$ and the skill library $\mathcal{A}$ are provided as input context to the LM backbone. We denote the agent policy as $\pi_{\mathcal{L}}(\cdot|\mathcal{M},\mathcal{A})$ and $\pi_{\mathcal{L}}$ for short. We focus on the web browser environment defined by a transition function $\mathcal{T}(s'|s,a)$ that models the change in the webpage after an action.
41
+
42
+ We focus on an online adaptation scenario where we have access to a sequence of NL queries $Q = \{q_{1}, q_{2}, \dots, q_{N}\}$ specifying the tasks, and no other information such as demonstration
43
+
44
+ trajectories or ground-truth rewards are available (Wang et al., 2024a,b). For each task specified by a natural language (NL) query $q$ , the agent generates a trajectory of actions $\tau = (s_0, a_0, s_1, a_1, \dots, s_{H-1}, a_{H-1}, s_H)$ for a finite number of $H$ steps. At each time step $h$ in the horizon, the agent receives observation $o_h$ from the current state $s_h$ , and generates an action $a_h \in \mathcal{A}$ based on the observations and actions so far, via $\pi_{\mathcal{L}}(o_{0:h}, a_{0:h-1}; \mathcal{M}, \mathcal{A}) \to a_h$ . The generated action will be executed on the environment and incurs a state change $\mathcal{T}(s_h, a_h) \to s_{h+1}$ . This observe-act loop continues for $H$ steps until the task reaches a task-terminating condition, such as the agent generating a termination action (e.g., send msg_to_user) or the horizon reaches a pre-determined maximum number of steps $h = H_{max}$ . We denote each pair of query and trajectory $(q, \tau) := e$ as an episode $e$ . Agents can update the content in $\mathcal{M}$ and $\mathcal{A}$ and reuse them across episodes.
45
+
46
+ # 2.2 Inducing Reusable Skills
47
+
48
+ To realize online adaptive agents, one common approach is to induce skills from correct trajectories to update the agent (Wang et al., 2024b). But since ground-truth rewards are unavailable, an LLM-based evaluator $V_{\mathcal{L}}$ is often used to judge the correctness of episodes. Formally, from the total of $N$ episodes throughout the online process $\{e^1,\dots ,e^N\} \coloneqq \mathcal{E}$ , we employ an LM-based evaluator $V_{\mathcal{L}}(e)\to 0 / 1$ to filter out the episodes predicted as correct $\mathcal{E}_V = \{e_i\in \mathcal{E}|V_{\mathcal{L}}(e_i) = 1,i\in \{1,\dots ,N\} \}$ and perform skill induction only on $\mathcal{E}_V$ .
49
+
50
+ Central to our adaptive agents is an induction component $I$ that enables the adaptivity of agents, which can be rule-based (Ellis et al., 2023; Grand et al., 2024) or instantiated by an LM $I(\cdot |LM)$ (Wang et al., 2024b); we follow the latter for its better performance and use $I$ to represent the module for simplicity. For online adaptive agents $\pi_{\mathcal{L}}$ , to induce skills, $I$ is instructed to take in one filtered episode $e$ and output one or more pieces of desired skills $D = \{d\}$ , denoted as $I(e)\to \mathcal{D}$ . Following AWM (Wang et al., 2024b), we update the agent in non-parametric ways that incorporate the induction outcome $I(e_t)\rightarrow d_t$ into the agent, instead of updating the parameters of the underlying LM backbone $\mathcal{L}$ for agent policy $\pi_{\mathcal{L}}$ .
51
+
52
+ Unlike AWM which represents skills in free-form text representations and can only augment agent memory via $\mathcal{M}_t \cup \{d_t\} \to \mathcal{M}_{t+1}$ (Figure 1 top), we introduce ASI that represents skills as executable python programs, and directly integrate skills into the agent action space instead, via $\mathcal{A}_t \cup \{d_t\} \to \mathcal{A}_{t+1}$ (Figure 1 bottom).
53
+
54
+ # 2.3 Inducing and Verifying Programmatic Skills
55
+
56
+ To improve the induction quality, we propose a change in representation from free-form text to executable programs, which offers advantages in correctness and efficiency. For one, the program format enables ready verification on skill correctness by executing them; for another, skill programs abstract multiple lower-level actions into a higher-level function call, thus agents can solve tasks in fewer steps without tackling tricky low-level details.
57
+
58
+ Inducing Programmatic Skills We first clean the input episodes to ensure the induction quality. We remove all the steps that cause execution errors such as invalid argument format, to keep these invalid actions from distracting agent predictions. Furthermore, noticing the long and possibly redundant thought process generated by agents along with each action, we simplify each thought text paragraph into a short one-sentence description (e.g., "Clicked the directions button to access the route planning feature") using LM, effectively reducing the thought content from 87.9 to 13.4 tokens per step.
59
+
60
+ Given a clean input episode $e$ , we now prompt the induction module $I$ to produce one or more program functions to represent reusable skills $\mathcal{D} = \{d\}$ as executable programs. As exemplified in Figure 2, given the input episode on the left side, the induction module first produces two skills open网络营销reviews() and searchreviews_search_box_id, search_button_id, search_term) in the form of callable program functions.
61
+
62
+ Skill Verification With the programmatic nature of ASI's skills, we can readily verify their correctness by executing them and checking if tasks can be solved successfully. While a naive way is to query the agent with the same NL query and allow it to use newly induced skill actions, we find agents may not always use new skills due to the large search space of
63
+
64
+ ![](images/3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg)
65
+ Figure 2: Inducing programmatic skills and rewriting the trajectory from an episode.
66
+
67
+ possible action trajectories. To have agents more efficiently generate trajectories that test skills in a more targeted way, we curate a rewritten trajectory prefix $\tau_{D}$ to constrain the first few steps executed in the environment, by rewriting and truncating the input action trajectory $\tau$ , and subsequently asking the agent to complete the prefix to get a full, checkable trajectory $\tau_{f}$ . Concretely, we first take the original action trajectory in the input episode $\tau$ (consisting of primitive actions or previously learned skills), and ask the induction module $I$ to transform it to a skill-using trajectory (Figure 2 bottom right), by replacing sub-trajectories in $\tau$ with calls to the newly induced skill programs $\mathcal{D}$ , if possible. Zooming into the Figure 2 example, this procedure merges click('Marketing') $\rightarrow$ click('All Reviews') to an open_marketreviews() call; transforms fill(757, 'satisfied') $\rightarrow$ click('Search') to a call of the second skill searchreviews('satisfied') with the specified term 'satisfied'; and adopted the last send msg_to_user('2') step directly. Note that we follow Wang et al. (2024b) and induce skills according to each website, so some skills could be tailored to particular webpage contexts such as the 'Marketing' and 'All Reviews' link constants in open_marketreviews, while other skills apply to more versatile setups such as searching for different reviews in searchreviews.
68
+
69
+ Next, to avoid spurious successes in skill verification, we truncate the trajectory yielded above by removing any trailing primitive actions after the last call to a skill program. Taking Figure 2 as an example, in the original input trajectory, the last send msg_to_user('2') already sends over the correct answer '2' to the user. However, if we directly adopt this last step into the skill-using trajectory $\tau_{D}$ , then executing it will always return the correct message to the user, regardless of whether the previous skill calls are valid. We thus remove such trailing actions to make sure verification attends to the induced skills we are testing.
70
+
71
+ After rewriting and truncation, we get this skill-using trajectory $\tau_{D}$ as the prefix to test skills. We now query the agent $\pi$ again with the same NL query $q$ and first execute $\tau_{D}$ on the environment. We then allow agents to continue generating up to $H_{max} - |\tau_{D}|$ actions to finish the task. In the Figure 2 example, to successfully solve query $q$ , we expect the agent to generate another step of send msg_to_user(...). with the correct answer '2' in the message. We take the concatenation of the trajectory prefix $\tau_{D}$ and the later additionally produced new steps $\tau_{A}$ (e.g., [send msg_to_user('2')]) as the full trajectory $\tau_{f}$ . We then decide whether to add the induced programs $\mathcal{D}$ into the agent skill library as $\mathcal{A}$ by examining $\tau_{f}$ .
72
+
73
+ Specifically, we check $\tau_{f}$ from three dimensions: (1) Correctness: if executing $\tau_{f}$ successfully solves the task $q$ as judged by the neural model evaluator $V_{\mathcal{L}}$ ; (2) Skill Usage: if the trajectory contains at least one call to at least one new skill in $\mathcal{D}$ ; and (3) Skill Validity: if all skill-calling actions cause environment changes. If all three boxes are checked, we add the skills being called in the trajectory $\tau_{f}$ to the agent skill library $\mathcal{A}_{t} \cup \mathcal{D}_{\text{called}} \rightarrow \mathcal{A}_{t+1}$ . By adding $\mathcal{D}_{\text{called}}$ , the agent can now generate actions that call these skill programs to solve subsequent tasks.
74
+
75
+ # 3 General Web Navigation Performance
76
+
77
+ # 3.1 Experiment Setup
78
+
79
+ Benchmark and Evaluation To evaluate ASI on general web navigation scenarios, we adopt the WebArena benchmark (Zhou et al., 2024a) that contains 812 test examples covering
80
+
81
+ <table><tr><td>Model</td><td>Method</td><td># Steps</td><td>SR</td><td>Shop</td><td>Admin</td><td>Reddit</td><td>GitLab</td><td>Maps</td><td>Multi</td></tr><tr><td rowspan="2">GPT</td><td>Vanilla</td><td>-</td><td>12.3</td><td>13.9</td><td>10.4</td><td>6.6</td><td>15.0</td><td>15.6</td><td>8.3</td></tr><tr><td>AWM</td><td>5.9</td><td>35.5</td><td>32.1</td><td>29.1</td><td>54.7</td><td>35.0</td><td>42.2</td><td>18.8</td></tr><tr><td rowspan="3">Claude</td><td>Vanilla</td><td>5.6</td><td>32.7</td><td>32.6</td><td>36.8</td><td>36.8</td><td>26.1</td><td>38.5</td><td>20.8</td></tr><tr><td>AWM</td><td>5.9</td><td>36.3</td><td>34.8</td><td>39.0</td><td>51.9</td><td>28.9</td><td>39.4</td><td>18.8</td></tr><tr><td>ASI (ours)</td><td>5.0</td><td>40.4</td><td>40.1</td><td>44.0</td><td>54.7</td><td>32.2</td><td>43.1</td><td>20.8</td></tr></table>
82
+
83
+ Table 1: WebArena success rate by adaptive agents with programmatic skills, in comparison to a static vanilla agent baseline, and a text-skill learning adaptive agent.
84
+
85
+ five major web activity domains: e-commerce, social forum, software development, content management, and travel. Each example in WebArena has an NL query $q$ for the task, and a program-based evaluator that provides a binary $0/1$ score for any given trajectory $\tau$ to judge if it successfully solves the task $q$ . This program-based evaluator enables relatively rigorous evaluation based on the functional correctness of the action trajectory. We report the average score across all WebArena examples, if not specified otherwise.
86
+
87
+ Backbone LM and Agent Architecture We use the top-performing claude-3.5-sonnet model as the LM backbone for all components, including the agent policy $\pi$ , the neural evaluator $V$ , and the skill induction modules $I$ . For experimentation, we use the BrowserGym (Chezelles et al., 2024) framework, which takes the webpage accessibility tree as observation, and instantiates the skill library $\mathcal{A}$ with the WebArena default action space listed in $\S A$ .
88
+
89
+ Baselines We take the vanilla Claude model with the BrowserGym framework (Drouin et al., 2024) as the non-adaptive agent baseline. Additionally, we compare ASI to AWM (Wang et al., 2024b), the current top-performing online adaptive web agent method. Because AWM was originally developed with the gpt-4o model, for a fairer comparison, we also experiment with AWM with claude-3.5-sonnet model as its LM backbone and also apply the episode cleaning procedure to enhance induction quality. We compare the two baseline methods with our ASI approach. We provide the complete prompts for each agent component: task-solving, episode evaluation, episode cleaning, and skill induction, in SA.
90
+
91
+ # 3.2 Results and Analysis
92
+
93
+ In Table 1, compared to the vanilla static-agent baseline, adaptive agents (AWM and ASI) generally achieve $11.0 - 23.5\%$ higher success rates overall. Among adaptive agents, our ASI with programmatic skills, achieves another $11.3\%$ success rate gain across websites, compared to its AWM counterpart that induces and uses textual skills. Meanwhile, ASI offers additional efficiency benefits by reducing the number of steps in solutions by $15.3\%$ and $10.6\%$ than vanilla and AWM agents, as one skill-call action can often execute multiple steps written in primitive actions used by vanilla and AWM agents. These advantages in correctness and efficiency are exhibited prominently across different websites and tasks, as shown by the website breakdown on Table 1 (right). Refer to $\S B$ for more analysis.
94
+
95
+ # 3.3 Why are Programmatic Skills Better?
96
+
97
+ To more concretely answer why programmatic skills are more effective than textual skills, we take a closer look on the two main differences between AWM and ASI: [1] whether the induction outcome is verified via execution, and [2] whether the induced skills are provided in memory for reference purpose only, or in the action space that allows execution.
98
+
99
+ Better Induction Quality We take the shopping website as a representative, and analyze the textual and program skills induced by AWM and ASI agents. We group textual and program skills by their functionality and show one representative example in Table 2. Compared to the clear functional boundary and highly-reusable granularity of the search_product skill, we find that the textual skills often have (1) more redundant steps, (2) example-specific context: e.g., the last text skill aims to find 'game accessories' while the steps generally apply to any product, and (3) fuzzier boundaries between separable tasks, e.g., the first skill mixes product-search and add-to-wishlist procedures, thus may not offer optimal guidance when asked to, e.g., search product and add it to cart instead.
100
+
101
+ <table><tr><td>Programmatic Skills</td><td>Textual Skills</td></tr><tr><td rowspan="3">def search_product_search_box_id: str, query: str):
102
+ &quot;&quot;Search for a product using the search box.
103
+ Args:
104
+ search_box_id: ID of the search input field
105
+ query: Search query string to enter
106
+ Returns:
107
+ None
108
+ Examples:
109
+ search_product(&#x27;595&#x27;, &#x27;sony bluetooth headphones&#x27;)
110
+ &quot;&quot;
111
+ click_search_box_id)
112
+ fill_search_box_id, query)
113
+ keyboard_press(&#x27;Enter&#x27;))</td><td>Task: Search for a product and add it to wish list
114
+ Action Trajectory:
115
+ __fill(621, {product_name})__ # Enter the product
116
+ name in the search box
117
+ __click(478)__ # Click the search button to execute the
118
+ search
119
+ __click({product_link})__ # Click the product to check
120
+ more details
121
+ __click(1769)__ # Click the &quot;Add to Wish List&quot; link</td></tr><tr><td>Task: Search for a product&#x27;s price range in the store
122
+ Action Trajectory:
123
+ __fill(565, {product_name})__ # Enter the product
124
+ name in the search box
125
+ __click(570)__ # Click the search button to execute the
126
+ search
127
+ __noop(1000)__ # Wait for search results to load
128
+ __send msg_to_user({price_range_info})__ #
129
+ Analyze and report the price range findings from the
130
+ search results</td></tr><tr><td>Task: Search for gaming accessories within a date range
131
+ Action Trajectory:
132
+ __click(1274)__ # Navigate to the Video Games category
133
+ __fill(473, {search_terms})__ # Enter search terms
134
+ including product name and year
135
+ __click(478)__ # Execute the search</td></tr></table>
136
+
137
+ Verified Induction Improves End Success Rate From qualitative examination of the induction outcomes, we find roughly similar numbers of episodes evaluated as correct and used for induction (70 and 58 examples for AWM and ASI), ASI produced programs pass verification for only $15.6\%$ of the turns, whereas AWM adds new skills for $31.4\%$ of the time (replace or add none otherwise). While skill usage (in memory or as action, [2]) is designated for AWM and ASI, we hypothesize that verification [1] affects induction quality and thus end success. We thus experiment with another setting that induces programs (such that verification is enabled), and only use the induced skills in memory, to study the importance of induction quality. As shown in Table 3, inducing skills with execution-based verification (i.e., (unverified, text) $\rightarrow$ (verified, program)), while always present skills in memory, improves end success rate by 4.2 points, indicating the importance of higher-quality induction via verification. Yet it is still 3.7 points lower than ASI, suggesting the incompatibility of program format to agent memory. Indeed, we observe many cases where the agent tries to call the skill programs but unsuccessfully, since they are not supported in the action space.
138
+
139
+ Textual Representations Suit Memory Better To prevent the agent from trying to call these plausible programs, we ablate another setting that transforms program skills to textual format (as Table 2 right) and provide them in agent memory, dubbed (verified, text). This format transformation effectively improves the overall success rate by another 2.6 points, getting a little closer to ASI. Given the different downstream usage,
140
+
141
+ Table 2: Example textual and program skills induced on the shopping website.
142
+
143
+ <table><tr><td colspan="2">Method</td><td>SR</td></tr><tr><td rowspan="3">Add to Memory</td><td>unverified, text</td><td>32.6</td></tr><tr><td>verified, program</td><td>36.4</td></tr><tr><td>verified, text</td><td>39.0</td></tr><tr><td>Add as Actions</td><td>verified, program</td><td>40.1</td></tr></table>
144
+
145
+ Table 3: Ablation study on induction verification and format on the shopping website.
146
+
147
+ i.e., memory or actuation, textual and program formats may suit individual scenarios better.
148
+
149
+ Beyond basic web navigation tasks, in the next two sections, we examine agents in two other important scenarios, scaled-up activities (§4) and cross-website generalization (§5).
150
+
151
+ # 4 Scaled-Up Browsing Activities
152
+
153
+ The WebArena benchmark mainly features isolated, single-task scenarios, such as adding a single product to the shopping cart. However, in real-world practices, people need to do a series of such tasks together, such as adding multiple related products (e.g., coffee and
154
+
155
+ mug) to the cart before finally checking out. This browsing request can lead to extremely long-horizon tasks, sometimes with repetitive intermediate procedures. We identify this to be a scenario to further demonstrate the efficacy of program skills, as opposed to textual skills, as programs lend themselves naturally to repeated invocation and composition.
156
+
157
+ Therefore, we curate several case scenarios where the user asks for action-dense instructions, such as the tasks listed in Figure 3. Because the tasks are long-horizon and involve multiple sub-tasks, we follow Xu et al. (2024) and set up intermediate checkpoints to better track the intermediate progress of agents. Refer to SC.1 to see the full list of tasks and their evaluation checkpoints. We measure the success rate of each example by the percentage of checkpoints achieved by the agent. We report the average success rate of all examples, as well as the average number of steps taken to solve the tasks, in Table 4.
158
+
159
+ <table><tr><td rowspan="2">Method</td><td colspan="2">Shopping</td><td colspan="2">Admin</td><td colspan="2">Reddit</td><td colspan="2">GitLab</td><td colspan="2">Map</td></tr><tr><td>sr ↑</td><td># steps ↓</td><td>sr ↑</td><td># steps ↓</td><td>sr ↑</td><td># steps ↓</td><td>sr ↑</td><td># steps ↓</td><td>sr ↑</td><td># steps ↓</td></tr><tr><td>VANILLA</td><td>41.7</td><td>23.5</td><td>58.0</td><td>20.8</td><td>33.3</td><td>23.0</td><td>33.3</td><td>40.0</td><td>40.0</td><td>15.2</td></tr><tr><td>AWM</td><td>68.3</td><td>21.5</td><td>74.0</td><td>18.2</td><td>40.0</td><td>16.8</td><td>50.0</td><td>33.8</td><td>65.0</td><td>12.6</td></tr><tr><td>ASI (ours)</td><td>100.0</td><td>16.3</td><td>91.0</td><td>14.2</td><td>55.0</td><td>12.8</td><td>55.0</td><td>25.4</td><td>100.0</td><td>6.2</td></tr></table>
160
+
161
+ Table 4: Performance of vanilla, AWM, and ASI agents in scaled-up browsing scenarios. We perform statistical testing between ASI and each baseline and verify all improvements are statistically significant with t-statistics $|t| > 2$ and $p < 0.05$ ; see SC.3 for more details.
162
+
163
+ ASI Features Improved Efficiency Across all websites, ASI-produced trajectories have 6.6-14.6 and $4.0 - 8.4\%$ fewer steps, compared to vanilla and AWM baselines, respectively. As the task horizon continues to grow when involving more intermediate checkpoints, this margin between ASI and baselines will predictably be more prominent.
164
+
165
+ Subsequent Benefits in Success Rate ASI also achieves higher success rates with more efficient trajectories, outperforming vanilla and AWM baselines by $38.9\%$ and $20.7\%$ on average. From manual analysis, we find this improvement comes from easier, better agent planning when using higher-level skills, without the need to tackle more complex procedures if only low-level primitive actions are available, as with vanilla and AWM agents.
166
+
167
+ Case Study: Changing Multiple Addresses We present a representative case on the shopping website: changing billing and shipping addresses after moving. As depicted in the top row in Figure 3, the vanilla agent without adaptive skills often roams into some irrelevant exploration steps, instead of sticking to the optimal route to solve the required task. It runs for minutes and exhausts the maximum steps (i.e., 50) before finishing the task.
168
+
169
+ ![](images/810aadc8162de8b670c598520349aa2a623d11538656c116f1a472d096254632.jpg)
170
+ Figure 3: Example scaled-up task of updating multiple addresses on shopping website.
171
+
172
+ With AWM, adding textual skills in memory provides soft guidelines for agents to follow, the agent thus better sticks to the goal and finishes each part of the task (i.e., navigate to the address page, update billing/shipping address) step by step. Although successful, the trajectory is long, i.e., 27 steps, and still takes a few minutes to finish.
173
+
174
+ In comparison, ASI (in Figure 3 bottom row) showcases its efficiency by using learned skills to navigate_to_address_settings and update_address_details can solve each part in one step (vs. the 3-6 steps used by AWM for these parts). Overall, ASI correctly finishes all required actions in only 4 steps, shortening the horizon by $85.2\%$ compared to AWM.
175
+
176
+ # 5 Adapting Across Websites
177
+
178
+ To examine whether agents can generalize with learned skills, we test agents on real-world website counterparts for some of the domains in WebArena as listed in Table 5. This experiment setup can reflect on (1) transfer across different websites of the same domain,
179
+
180
+ and (2) transfer from simulated, sandboxed to real-world websites.
181
+
182
+ <table><tr><td>Domain</td><td>WebArena Sandboxed</td><td>Real-World</td></tr><tr><td>shopping</td><td>OneStopMarket</td><td>Target</td></tr><tr><td>online forum</td><td>PostMill</td><td>Reddit</td></tr><tr><td>travel</td><td>OpenStreetMap</td><td>Google Maps</td></tr></table>
183
+
184
+ Table 5: Real-world in-domain website counterparts to each WebArena sandboxed website.
185
+
186
+ For each sandbox-real website pair, we take ten information-seeking style queries (He et al., 2024) in WebArena that do not involve potential privacy leakage or unrecoverable risky actions, such as making a purchase or changing user password. We provide the task details in §C.2. We compare ASI and AWM with their programmatic and textual skills as learned in §3, as well as comparing to the vanilla static agent baseline.
187
+
188
+ Transferring Common Skills In Figure 4, we can see how ASI can effectively reuse common skills such as search_product in the first step on the Target website.
189
+
190
+ Incompatible Skills One challenge faced by ASI is that some prior skills become incompatible on the new website. For example, the sort_by Listings() induced on OneStopMarket selects options from a dropdown menu, yet sorting on
191
+
192
+ the Target website opens a sidebar; despite their semantic similarity, the concrete actions in skill programs are no longer applicable. Still, we find that agents can often spot this incompatibility and rarely attempt to use these deprecated skills.
193
+
194
+ ![](images/32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg)
195
+ Figure 4: ASI can generalize the search_product skill but face incompatibility when sorting items.
196
+
197
+ Adapting Skills to New Environment Although some skills induced on previous websites
198
+
199
+ cannot be directly used on arbitrary new websites, we hypothesize that these skills can still serve as informative references on solving procedurally similar tasks or composing new skills targeted for the new website design.
200
+
201
+ <table><tr><td rowspan="2">Method</td><td colspan="2">Shopping</td><td colspan="2">Reddit</td><td colspan="2">Map</td></tr><tr><td>sr ↑</td><td># steps ↓</td><td>sr ↑</td><td># steps ↓</td><td>sr ↑</td><td># steps ↓</td></tr><tr><td>Vanilla</td><td>80.0</td><td>5.4</td><td>40.0</td><td>4.8</td><td>63.3</td><td>7.4</td></tr><tr><td>AWM</td><td>80.0</td><td>5.0</td><td>56.7</td><td>4.8</td><td>100.0</td><td>6.2</td></tr><tr><td>ASI</td><td>90.0</td><td>3.4</td><td>76.7</td><td>4.4</td><td>93.3</td><td>4.4</td></tr><tr><td>AWM + update</td><td>80.0</td><td>5.4</td><td>63.3</td><td>5.8</td><td>100.0</td><td>7.2</td></tr><tr><td>ASI + update</td><td>90.0</td><td>3.2</td><td>76.7</td><td>4.0</td><td>93.3</td><td>4.2</td></tr></table>
202
+
203
+ Table 6: Cross-website results. ASI significantly surpasses baselines in sr and # steps (with $|t| > 2$ and $p < 0.05$ ) from our analysis in §C.3.
204
+
205
+ We thus allow agents to induce new skills or update previously acquired skills from experiences on the new website, denoted as +update entries in Table 6. We find that enabling skill update in both textual and program formats helps agent performance on new websites. Within the short online learning process (tens of examples), AWM adapts faster to the new websites, while ASI sees a more pronounced improvement in efficiency.
206
+
207
+ # 6 Related Work
208
+
209
+ Adaptive Digital Agents An important thread of agent-improving methods is to build adaptive agents that can autonomously self-improve from experiences. Most works focus on integrating past experiences into agent memory by collecting human annotation (Deng et al., 2024) or LM-based synthesis (Ou et al., 2024; Xu et al., 2025), especially via agent-driven exploration with instruction- (Murty et al., 2024b) or trajectory-driven (Murty et al., 2024a) approaches, offering warm starts on the websites of interest. Other works gather experiences (Wang et al., 2024b) or feedback (Qu et al., 2024) during test time, and augment them into memory through parametric channels such as supervised fine-tuning (Murty et al., 2024a), contrastive learning (Song et al., 2024), or reinforcement learning (Zhou et al., 2024b). Meanwhile, non-parametric approaches can directly augment reflections (Shinn et al., 2023), raw past experiences (Wang et al., 2023; Zheng et al., 2023), or further induced reusable workflows (Wang et al., 2024b). While these adaptive agents learn textual skills stored in memory, our ASI stores skills as verifiable and composable programs in the agent action space (i.e., skill library), thus enabling better quality and efficiency.
210
+
211
+ Skill Discovery and Learning Learning specialized skills for tasks in programmatic (Shin et al., 2019; Ellis et al., 2023; Cai et al., 2024; Wang et al., 2024a; Grand et al., 2024), embodied (Sharma et al., 2022; Wang et al., 2023; Liang et al., 2023; Sarch et al., 2024; Wong et al., 2024), and physical (Yu et al., 2023) environments has shown to success in agent performance. Particularly for digital agents built for web navigation tasks, most works focus on exploring skills offline with RL roll-outs (Gur et al., 2018; Liu et al., 2018; Putta et al., 2024; Qi et al., 2024) or LM-based prompting (Zhou et al., 2024b; Murty et al., 2024a; Patel et al., 2024). While this exploration stage could offer some supervised data to update the agent policy either parametric (Murty et al., 2024a; Patel et al., 2024) or non-parametrically (Zheng et al., 2023; Murty et al., 2024b), it often costs enormous extra computation and may suffer from the lack or mismatch in distribution with the downstream tasks at hand (Wang et al., 2024b). In contrast, our ASI does not rely on supervised data and can directly learn skills online without prior exploration.
212
+
213
+ Web Navigation Benchmarks Digital agents have been explored across a wide range of tasks (Yao et al., 2024; Kapoor et al., 2025; Xie et al., 2024b), among which one of the most popular application being browsing and navigating through versatile websites such as shopping (Yao et al., 2022), social media communication (Zhou et al., 2024a; Koh et al., 2024), knowledge work tasks (Drouin et al., 2024), and more (Deng et al., 2024). Our work focuses on general web navigation tasks using the WebArena (Zhou et al., 2024a) benchmark, meanwhile exploring other challenging scenarios such as scaled-up activities (Yoran et al., 2024) and cross-domain generalization (Deng et al., 2024).
214
+
215
+ # 7 Conclusion and Future Discussions
216
+
217
+ In this work, we present ASI to support web navigation agents to autonomously induce, verify, learn, and apply programmatic skills during online inference. Beyond achieving $23.5\%$ success rate and $15.3\%$ efficiency increases in general web tasks, we also showcase ASI's strengths for scaled-up web activities, thanks to the high-level action interface offered by the programmatic abstraction. Moreover, we examine skill generalizability to new, real-world websites, and find ASI still offers great efficiency while flexibly updating skills to new environments. While our work aims to offer insights on the optimal representation in agent skill acquisition, we still find multiple pieces in ASI worthy of further investigation, such as the conceptually or empirically suitable granularity of skills, the stability of the online evolving process, and the skill quality in comparison to human expert desiderata.
218
+
219
+ # Acknowledgments
220
+
221
+ We would like to thank Jiayuan Mao, Yueqi Song, Boyuan Zheng, and Yu Su for the insightful discussions. We thank Yiqing Xie, Xinran Zhao, and Mingqian Zheng for their helpful comments on the paper draft. Zora is supported by the CMU Presidential Fellowship and Fujitsu Research. Apurva is supported by Amazon.
222
+
223
+ # References
224
+
225
+ Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bfcb4967418bf8ac142f64a-Paper.pdf.
226
+ Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qV83K9d5WB.
227
+ De Chezelles, Thibault Le Sellier, Maxime Gasse, Alexandre Lacoste, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, et al. The browsergym ecosystem for web agent research. arXiv preprint arXiv:2412.05467, 2024.
228
+ Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36, 2024.
229
+ Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, et al. Workarena: How capable are web agents at solving common knowledge work tasks? arXiv preprint arXiv:2403.07718, 2024.
230
+ Kevin Ellis, Lionel Wong, Maxwell Nye, Mathias Sable-Meyer, Luc Cary, Lore Anaya Pozo, Luke Hewitt, Armando Solar-Lezama, and Joshua B Tenenbaum. Dreamcoder: growing generalizable, interpretable knowledge with wake-sleep bayesian program learning. Philosophical Transactions of the Royal Society A, 381(2251):20220050, 2023.
231
+ Yao Fu, Dong-Ki Kim, Jaekyeom Kim, Sungryull Sohn, Lajanugen Logeswaran, Kyunghoon Bae, and Honglak Lee. Autoguide: Automated generation and selection of state-aware guidelines for large language model agents. CoRR, abs/2403.08978, 2024. URL https://doi.org/10.48550/arXiv.2403.08978.
232
+ Gabriel Grand, Lionel Wong, Matthew Bowers, Theo X. Olausson, Muxin Liu, Joshua B. Tenenbaum, and Jacob Andreas. LILO: Learning interpretable libraries by compressing and documenting code. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=TqYbAWKMJe.
233
+ Izzeddin Gur, Ulrich Rückert, Aleksandra Faust, and Dilek Hakkani-Tür. Learning to navigate the web. CoRR, abs/1812.09195, 2018. URL http://arxiv.org/abs/1812.09195.
234
+ Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, 2024. URL https://aclanthology.org/2024.acl-long.371/.
235
+
236
+ Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem AlShikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In European Conference on Computer Vision, pp. 161-178. Springer, 2025.
237
+ Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. Visualwebarena: Evaluating multimodal agents on realistic visual web tasks. arXiv preprint arXiv:2401.13649, 2024.
238
+ Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020.
239
+ Jacky Liang, Wenlong Huang, Fei Xia, Peng Xu, Karol Hausman, Brian Ichter, Pete Florence, and Andy Zeng. Code as policies: Language model programs for embodied control. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 9493-9500. IEEE, 2023.
240
+ Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, and Percy Liang. Reinforcement learning on web interfaces using workflow-guided exploration. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=ryTp3f-0-.
241
+ Bodhisattwa Prasad Majumder, Bhavana Dalvi, Peter Jansen, Oyvind Tafjord, Niket Tandon, Li Zhang, Chris Callison-Burch, and Peter Clark. CLIN: A continually learning language agent for rapid task adaptation and generalization, 2024. URL https://openreview.net/forum?id=d5DGVHMsC.
242
+ Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator, 2024a. URL https://arxiv.org/abs/2410.02907.
243
+ Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. Bagel: Bootstrapping agents by guiding exploration with language, 2024b. URL https://arxiv.org/abs/2403.08140.
244
+ Tianyue Ou, Frank F. Xu, Aman Madaan, Jiarui Liu, Robert Lo, Abishek Sridhar, Sudipta Sengupta, Dan Roth, Graham Neubig, and Shuyan Zhou. Synatra: Turning indirect knowledge into direct demonstrations for digital agents at scale. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=KjNEzWRIqn.
245
+ Yichen Pan, Dehan Kong, Sida Zhou, Cheng Cui, Yifei Leng, Bing Jiang, Hangyu Liu, Yanyi Shang, Shuyan Zhou, Tongshuang Wu, and Zhengyang Wu. Webcanvas: Benchmarking web agents in online environments. In Agentic Markets Workshop at ICML 2024, 2024. URL https://openreview.net/forum?id=O1FaGasJob.
246
+ Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024.
247
+ Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024.
248
+ Zehan Qi, Xiao Liu, Iat Long Iong, Hanyu Lai, Xueqiao Sun, Wenyi Zhao, Yu Yang, Xinyue Yang, Jiadai Sun, Shuntian Yao, et al. Webrl: Training llm web agents via self-evolving online curriculum reinforcement learning. arXiv preprint arXiv:2411.02337, 2024.
249
+ Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve, 2024. URL https://arxiv.org/abs/2407.18219.
250
+
251
+ Gabriel Sarch, Lawrence Jang, Michael Tarr, William W Cohen, Kenneth Marino, and Katerina Fragkiadaki. Vlm agents generate their own memories: Distilling experience into embodied programs of thought. Advances in Neural Information Processing Systems, 37: 75942-75985, 2024.
252
+ Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or rl is suboptimal. arXiv preprint arXiv:2502.12118, 2025.
253
+ Pratyusha Sharma, Antonio Torralba, and Jacob Andreas. Skill induction and planning with latent language. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, May 2022. URL https://aclanthology.org/2022.acl-long.120/.
254
+ Eui Chul Shin, Miltiadis Allamanis, Marc Brockschmidt, and Alex Polozov. Program synthesis and semantic parsing with learned code idioms. Advances in Neural Information Processing Systems, 32, 2019.
255
+ Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning, 2023. URL https://arxiv.org/abs/2303.11366.
256
+ Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for llm agents, 2024. URL https://arxiv.org/abs/2403.02502.
257
+ Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent with large language models, 2023. URL https://arxiv.org/abs/2305.16291.
258
+ Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=DCNCwamJjI.
259
+ Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024b.
260
+ Lionel Wong, Jiayuan Mao, Pratyusha Sharma, Zachary S Siegel, Jiahai Feng, Noa Korneev, Joshua B. Tenenbaum, and Jacob Andreas. Learning grounded action abstractions from language. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qJ0Cfj4Ex9.
261
+ Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. Travelplanner: A benchmark for real-world planning with language agents. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=15XQzNkAOe.
262
+ Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, et al. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. arXiv preprint arXiv:2404.07972, 2024b.
263
+ Frank F Xu, Yufan Song, Boxuan Li, Yuxuan Tang, Kritanjali Jain, Mengxue Bao, Zora Z Wang, Xuhui Zhou, Zhitong Guo, Murong Cao, et al. Theagentcompany: benchmarking llm agents on consequential real world tasks. arXiv preprint arXiv:2412.14161, 2024.
264
+ Yiheng Xu, Dunjie Lu, Zhennan Shen, Junli Wang, Zekun Wang, Yuchen Mao, Caiming Xiong, and Tao Yu. Agenttrek: Agent trajectory synthesis via guiding replay with web tutorials. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=EEgYUccwsV.
265
+ Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35:20744-20757, 2022.
266
+
267
+ Shunyu Yao, Noah Shinn, Pedram Razavi, and Karthik Narasimhan. $\tau$ -bench: A benchmark for tool-agent-user interaction in real-world domains. arXiv preprint arXiv:2406.12045, 2024.
268
+ Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? arXiv preprint arXiv:2407.15711, 2024.
269
+ Wenhao Yu, Nimrod Gileadi, Chuyuan Fu, Sean Kirmani, Kuang-Huei Lee, Montserrat Gonzalez Arenas, Hao-Tien Lewis Chiang, Tom Erez, Leonard Hasenclever, Jan Humplik, brian richter, Ted Xiao, Peng Xu, Andy Zeng, Tingnan Zhang, Nicolas Heess, Dorsa Sadigh, Jie Tan, Yuval Tassa, and Fei Xia. Language to rewards for robotic skill synthesis. In 7th Annual Conference on Robot Learning, 2023. URL https://openreview.net/forum?id= SgTPdyehXMA.
270
+ Longtao Zheng, Rundong Wang, Xinrun Wang, and Bo An. Synapse: Trajectory-as-exemplar prompting with memory for computer control. In The Twelfth International Conference on Learning Representations, 2023.
271
+ Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx.
272
+ Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Erran Li. Proposer-agent-evaluator (pae): Autonomous skill discovery for foundation model internet agents. arXiv preprint arXiv:2412.13194, 2024b.
273
+
274
+ # A Experiment Details
275
+
276
+ # A.1 Agent Action Space
277
+
278
+ Table 7 shows the default action space the web navigation agents we employed in all the experiments. This action space remains the same for both (i) static, vanilla agent, as well as the (ii) adaptive agent that learn textual skills in memory, i.e., AWM.
279
+
280
+ <table><tr><td>Action Type</td><td>Description</td></tr><tr><td>noopwait_ms)</td><td>Do nothing for specified time.</td></tr><tr><td>click(elem)</td><td>Click at an element.</td></tr><tr><td>hover(elem)</td><td>Hover on an element.</td></tr><tr><td>fill(elem, value)</td><td>Type into an element.</td></tr><tr><td>keyboard_press(key_comb)</td><td>Press a key combination.</td></tr><tr><td>scroll(x, y)</td><td>Scroll horizontally or vertically.</td></tr><tr><td>select_option(elem, options)</td><td>Select one or multiple options.</td></tr><tr><td>goto(url)</td><td>Navigate to a url.</td></tr><tr><td>go_back()</td><td>Navigate to the previous page.</td></tr><tr><td>go_forward()</td><td>Navigate to the next page.</td></tr><tr><td>new_tab()</td><td>Open a new tab.</td></tr><tr><td>tab_close()</td><td>Close the current tab.</td></tr><tr><td>tabfocus(index)</td><td>Bring tab to front.</td></tr><tr><td>send msg_to_user(text)</td><td>Send a message to the user.</td></tr><tr><td>report_infeasible Reason)</td><td>Notify user that instructions are infeasible.</td></tr></table>
281
+
282
+ Table 7: Initial action space for agents throughout our experiments.
283
+
284
+ # A.2 Prompts for LLM-Based Components
285
+
286
+ # Prompt for Episode Evaluation
287
+
288
+ You are an expert in evaluating the performance of a web navigation agent. The agent is designed to help a human user navigate a website to complete a task. Given the user's intent, the agent's action history, the final state of the webpage, and the agent's response to the user, your goal is to decide whether the agent's execution is successful or not. Please be careful of each detail and strict about the evaluation process.
289
+
290
+ There are three types of tasks: 1. Information seeking: The user wants to obtain certain information from the webpage, such as the information of a product, reviews, map info, comparison of map routes, etc. The bot's response must contain the information the user wants, or explicitly state that the information is not available. Otherwise, e.g. the bot encounters an exception and respond with the error content, the task is considered a failure. Besides, be careful about the sufficiency of the agent's actions. For example, when asked to list the top-searched items in a shop, the agent should order the items by the number of searches, and then return the top items. If the ordering action is missing, the task is likely to fail. 2. Site navigation: The user wants to navigate to a specific page. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response. 3. Content modification: The user wants to modify the content of a webpage or configuration. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response.
291
+
292
+ *IMPORTANT* Please be strict about the evaluation process. Format your response into two lines as shown below:
293
+
294
+ Thoughts: <your thoughts and reasoning process>
295
+
296
+ Status: "success" or "failure"
297
+
298
+ # Prompt for Episode Cleaning
299
+
300
+ You are a helpful assistant in summarizing web browsing actions. Your task is to summarize the main action taken and the corresponding state change, from the input thought process and environmental description.
301
+
302
+ Example input: "Let me think about this step by step: 1) I've clicked the Columns button and based on the accessibility tree, I can now see a list of available columns that can be shown/hidden. 2) Looking through the available columns in the accessibility tree, I don't see any direct column for "Number of Orders" or similar metric. 3) Since I don't see the information I need in the available columns, this suggests that: - Either this information is available through a different path - Or we need to access the Sales/Orders section to correlate this information 4) The most logical path would be to: - Go to the Sales section where we can see all orders - And then analyze which customers have the most orders 5) From the accessibility tree, I can see the "SALES" link with bid "183" in the main navigation.
303
+
304
+ Let me click on the SALES link: This should open the Sales menu where we can access the Orders section to analyze customer order histories."
305
+
306
+ Example output: "Clicked the SALES link to access the Orders section for customer order histories."
307
+
308
+ # Prompt for Skill Induction
309
+
310
+ You are a proficient software engineer. Your task is to (1) summarize reusable functions as APIs from the provided action trajectories, and (2) rewrite the trajectories using the reusable functions you generated in (1).
311
+
312
+ For (1), from the provided examples about the same task, you job is to generate Python functions that can be reused to solve (part of) these tasks. The functions should have mediocre complexity: (i) containing at least three actions and not too simple (e.g., a single line of code), (ii) not too complex (e.g., more than 10 lines of code), and should be general enough to be applied to other similar tasks. The arguments to these functions should be common variables (such as strings and lists), avoid using complex inputs such as another function.
313
+
314
+ The actions you can use are: 'click', 'fill', 'hover', 'keyboard_press', 'scroll'; 'tabfocus', 'new_tab', 'tab_close', 'go_back', 'go_forward'; 'send msg to user', 'report_infeasible', 'select_option'. Do not use other undefined actions. Do not include any try-except blocks in the functions.
315
+
316
+ Please include 'Args', 'Returns', and 'Examples' in the function documentation. For (2), write the instruction and rewritten code of each example. Do not include the answer response or example-specific information in the rewritten code. Pay attention to whether all link IDs are available before specifying them in the generated functions. If you use 'send msg_to_user', make sure the message is decided within the function, instead of provided as an argument.
317
+
318
+ Make sure each function contains no less than 2 steps, and no more than 5 steps; to keep the functions simple and task-oriented. You can generate zero, one, or multiple functions depending on the provided examples.
319
+
320
+ # B Skill Induction: Analysis
321
+
322
+ We provide more details about the skill induction process, in skill curation and reuse frequency (§B.1) and representative skill case studies (§B.2)
323
+
324
+ # B.1 Skill Induction and Reusability
325
+
326
+ To provide more insights on how agents curate and reuse programmatic skills, for the main experiments on WebArena, we calculate the number of examples that (i) attempt to induce a new skill, (ii) successfully induce a new skill, and (iii) reuse a previously induced skill.
327
+
328
+ As shown in Table 8, the agent reuses at least 1 skill for $42.5\%$ of the cases, which is pretty frequent across websites. Moreover, these skills are created using $7.6\%$ of the tasks, demonstrating the high scalability of our skill learning approach.
329
+
330
+ <table><tr><td rowspan="2">Domain</td><td rowspan="2">Attempted Induction</td><td colspan="3">Number of Examples</td></tr><tr><td>Successful Induction</td><td>Reuse Examples</td><td>Total</td></tr><tr><td>shopping</td><td>21</td><td>8</td><td>96</td><td>180</td></tr><tr><td>admin</td><td>38</td><td>15</td><td>108</td><td>156</td></tr><tr><td>reddit</td><td>24</td><td>11</td><td>14</td><td>105</td></tr><tr><td>map</td><td>13</td><td>10</td><td>80</td><td>109</td></tr><tr><td>gitlab</td><td>25</td><td>11</td><td>10</td><td>175</td></tr></table>
331
+
332
+ Table 8: Analyzing the number of examples that attempt, induce, and reuse skills.
333
+
334
+ # B.2 Representative Skills
335
+
336
+ We demonstrate two representative types of skills, which (i) chain frequently-used-together actions, and (ii) involve advanced programming primitives.
337
+
338
+ Chaining Primitive Actions The most common type of skills chains multiple frequently-used-together primitive actions (e.g., click, fill) in a certain order, to reflect a particular common usage, such as the search_product tool illustrated below. This type of skill provides an easy portal for frequent workflows.
339
+
340
+ ```python
341
+ def search_product(name: str):
342
+ click("Search textbox...")
343
+ if has_pop_window():
344
+ click("Close")
345
+ fill("Search textbox", name)
346
+ click("Search button")
347
+ ```
348
+
349
+ Advanced Programming Primitives Besides a sequential chaining of actions, some skills contain more advanced logics, such as if-else or for/while loops, as the two examples below.
350
+
351
+ ```python
352
+ def navigate_and_sort_category(category_id: str, subcategory_id: str | None = None):
353
+ '''"Navigate to a product category and sort items by price.
354
+ Args:
355
+ category_id: ID of the main category link
356
+ subcategory_id: Optional ID of the subcategory link, if needed
357
+ Returns:
358
+ None
359
+ Examples:
360
+ navigate_and_sort_category('1241', '1873') # PS4 under Video Games
361
+ navigate_and_sort_category('1245') # For main category only
362
+ '''' click(category_id) # Click main category
363
+ if subcategory_id:
364
+ click(subcategory_id) # Click subcategory if provided
365
+ select_option("1553", "Price") # Sort by price ascending
366
+ ```
367
+
368
+ ```python
369
+ def browse_category_byjahicationmenu_id_sequence: list):
370
+ ""Browse products by navigating through a sequence of menu IDs.
371
+ This function allows navigation through a series of menu interactions.
372
+ Args:
373
+ menu_id_sequence(list): A list of menu IDs to interact
374
+ sequentially, using hover actions followed by a click.
375
+ ```
376
+
377
+ ```txt
378
+ Example usage: browse_category_byjahgnation['735','786','797']#Navigates Home&Kitchen->Storage&Organization->Racks,Shelves& Drawers
379
+ "''" foridx,menu_idin enumerate(menu_id_sequence[-1]): hover/menu_id) click菜单_id_sequence[-1])#Click the final id to land in the predefined category
380
+ ```
381
+
382
+ # C Scaled-Up and Cross-Website Tests
383
+
384
+ We provide the full list of tasks used in scaled-up (S4) and cross-website (S5) analyses in S.C.1 and S.C.2, respectively. In S.C.3, we further perform significance testing to validate the findings from Table 4 and Table 6.
385
+
386
+ # C.1 Scaled-Up Tasks
387
+
388
+ Table 9, Table 10, Table 11, Table 12 and Table 13 shows example scaled-up tasks studied on the shopping, admin, social forum, software development, and map websites.
389
+
390
+ <table><tr><td>Instruction</td><td>Checkpoints</td><td>Score</td></tr><tr><td>Add a wireless headphone, a water bottle, a notebook, a ground coffee, and a mug to my shopping cart.</td><td>Add a wireless headphone to cart; Add a water bottle to cart; Add a notebook to cart; Add a ground coffee to cart; Add a mug to cart.</td><td>5</td></tr><tr><td>Add the most expensive item from the video games category, the cheapest item from the Office Products category, and the most relevant coffee mug to my shopping cart.</td><td>Add the most expensive item from the video games category to cart; Add the cheapest item from the Office Products category to cart; the most relevant coffee mug to my shopping cart.</td><td>3</td></tr><tr><td>Add the cheapest wireless headphone, a water bottle, the most expensive notebook, a ground coffee, and a mug to my shopping cart.</td><td>Add the cheapest wireless headphone to cart; Add a water bottle to cart; Add the most expensive notebook to cart; Add a ground coffee to cart; Add a mug to cart.</td><td>5</td></tr><tr><td>Show me the ordered items for each cancelled order from Feb to May in 2023.</td><td>Show me the 5/17/23 order; Show me the 2/24/23 order; Show me the 2/11/23 order.</td><td>3</td></tr><tr><td>Iterative update my billing address to 231 Willow Way, Suite 100, Chicago, IL, 60601. Then, update my shipping address to 987 Sycamore Circle, Philadelphia, PA, 19102.</td><td>Successfully update my billing address; Successfully update my shipping address.</td><td>2</td></tr></table>
391
+
392
+ Table 9: Exemplar scaled-up browsing tasks on the shopping website.
393
+
394
+ # C.2 Cross-Website Tasks
395
+
396
+ Table 14, Table 15, and Table 16 lists example tasks to test agent generalization abilities on shopping (OneStopMarket to Target), social forum (Postmill to Reddit), and software development (GitLab to GitHub) domains.
397
+
398
+ # C.3 Significance Testing
399
+
400
+ <table><tr><td>Instruction</td><td>Checkpoints</td><td>Score</td></tr><tr><td>Tell me the number of reviews that our store received by far that mention terms &#x27;disappointed&#x27;, &#x27;satisfied&#x27;, &#x27;decent&#x27;, &#x27;not useful&#x27;, and &#x27;best&#x27;.</td><td>Return the correct number for terms &#x27;disappointed&#x27;, &#x27;satisfied&#x27;, &#x27;decent&#x27;, &#x27;not useful&#x27;, and &#x27;best&#x27;.</td><td>5</td></tr><tr><td>I need to contact a list of customers. Find the customer name and email with phone number 2058812302, 2137418080, 2065555555, 8015551212, and 555-229-3326.</td><td>Return the correct name and email information for customers with each of the five phone numbers.</td><td>5</td></tr><tr><td>I will need to update our webpage to create a more energetic vibe. Change the page title of &#x27;404 Not Found&#x27; to &#x27;Bruh bro you clicked the wrong page&#x27;, the page title of &#x27;Enable Cookies&#x27; to &#x27;Cookie monster coming to your place&#x27;, the page title of &#x27;Home Page&#x27; page to &#x27;This is the home page!!&#x27;, the page with title &#x27;Privacy Policy&#x27; to &#x27;No privacy policy is needed is this dystopian world&#x27;, and lastly, change the page &#x27;About Us&#x27; to &#x27;Secret&#x27;.</td><td>Change the page title correctly for each of the five pages.</td><td>5</td></tr><tr><td>I need to generate a bunch of report to show to the store manager in an hour. Could you help me generate a sales order report for the last month, over the last 45 days, and for Q1? I&#x27;ll also need a refund report for last year, and a tax report for this year. Today is 3/15/2023.</td><td>Generate a sales report for 2/1/2023-2/29/2023; generate a sales report for 1/29/2023-3/15/2023; generate a sales report for 1/1/2023-3/15/2023; Generate a refund report for 1/1/2022-12/31/2022; Generate a tax report for 1/1/2023-3/15/2023.</td><td>5</td></tr><tr><td>Tell me the SKU of products that have 10 units, 3 units, and 0 units left. Also, give me the product names that have 2-3 units left.</td><td>Return the correct SKU for the first three questions; return the correct product names for the last question.</td><td>4</td></tr></table>
401
+
402
+ Scaled-Up Tasks We conduct t-tests between (i) ASI and AWM, (ii) ASI and VANILLA agent. From the results in Table 17, we find the advantage of ASI in success rate and efficiency improvements, when comparing to both AWM and VANILLA agents, are statistically significant, as indicated by all
403
+
404
+ t-statistics with absolute values over 2 and p-value below 0.05.
405
+
406
+ Table 10: Exemplar scaled-up browsing tasks on the shopping admin website.
407
+
408
+ <table><tr><td rowspan="2">Method Pair</td><td colspan="2">Success Rate</td><td colspan="2"># Steps</td></tr><tr><td>t-stat</td><td>p-value</td><td>t-stat</td><td>p-value</td></tr><tr><td>ASI vs. AWM</td><td>-2.3601</td><td>0.0226</td><td>2.7664</td><td>0.0068</td></tr><tr><td>ASI vs. VANILLA</td><td>-4.0922</td><td>0.0002</td><td>2.1983</td><td>0.0296</td></tr></table>
409
+
410
+ Cross-Web Tasks We conduct similar significance testing on cross-web tasks and report the results in Table 18. While ASI does not significantly outperform AWM in success rate, given the presumably greater flexibility of textual workflows, ASI still exhibits significant advantages on
411
+
412
+ the efficiency side. Furthermore, comparing ASI to static VANILLA agents, ASI achieves significant improvements in both success rates and efficiency (i.e., reduced number of steps), suggested by $|t| > 2$ and $p < 0.05$ .
413
+
414
+ Table 17: Results of significance testing on ASI's advantages for scaled-up web tasks.
415
+
416
+ <table><tr><td rowspan="2">Method Pair</td><td colspan="2">Success Rate</td><td colspan="2"># Steps</td></tr><tr><td>t-stat</td><td>p-value</td><td>t-stat</td><td>p-value</td></tr><tr><td>ASI vs. AWM</td><td>-1.3980</td><td>0.1673</td><td>2.1238</td><td>0.0378</td></tr><tr><td>ASI vs. VANILLA</td><td>-3.5984</td><td>0.0007</td><td>2.5792</td><td>0.0125</td></tr></table>
417
+
418
+ Table 18: Results of significance testing on ASI's advantages for cross-web tasks.
419
+
420
+ <table><tr><td>Instruction</td><td>Checkpoints</td><td>Score</td></tr><tr><td>I&#x27;m planning to organize multiple meetings in the next few months. Help me post notices on virtual meetings for the little women on Apr 10th, for Harry Potter in May 15th, and for Jane Eyre in Jan 30th, in the most suitable forums in PostMill.</td><td>Post Apr 10th meetup; Post about May 15th meetup; Post Jan 30th meetup. All in book-related forums.</td><td>3</td></tr><tr><td>Could you tell me all forums with names related to computer science?</td><td>must include: deeplearning (1 pt), Machine-Learning (1 pt); optionally (get 1 score if include any): science, askscience, technology.</td><td>3</td></tr><tr><td>Find the most relevant posts about jerseycity, newjersey, and nyc; and tell me how different they are.</td><td>Correctly find post about jerseycity; Correctly find post about newjersey; Correctly find post about nyc; Answer how different they are.</td><td>4</td></tr><tr><td>Thumbs down the top-2 posts in jerseycity, newjersey, and nyc forums, I don&#x27;t like them.</td><td>Thumbs down the top-2 posts in the jerseycity forum; Thumbs down the top-2 posts in the newjersey forum; Thumbs down the top-2 posts in the nyc forum.</td><td>3</td></tr><tr><td>Reply &quot;Thank you! This is super helpful!&quot; to three posts about long-distance relationship advice.</td><td>Reply to three posts with the correct message. Need to be relevant to long-distance relationship advice.</td><td>3</td></tr></table>
421
+
422
+ Table 11: Exemplar scaled-up tasks on the Postmill website.
423
+
424
+ <table><tr><td>Instruction</td><td>Checkpoints</td><td>Score</td></tr><tr><td>Display the list of issues in the a11yproject/a11yproject.com repository that have labels related to &#x27;help needed&#x27;, and assign the most recent one to the top contributor of this repository.</td><td>Display the help-wanted issues; find the top contributor; assign him to the most recent help-needed issue.</td><td>3</td></tr><tr><td>Set up a new, empty repository with the name agent_skill_induction, and create a MIT license file. Then, invite Abishek and Vinta as collaborators.</td><td>Create a new repository with given name; Create a MIT license inside; Invite both collaborators.</td><td>3</td></tr><tr><td>Start a private project web_agent.android xl with Android template and add primer, convexegg, abishek as members.</td><td>Create the repository private and with Android template; Invite all three people as members.</td><td>2</td></tr><tr><td>Add the following users to repo a11y-webring.club as developer: [abisubramanya27, lahwaacz], and [yjlou, a11yproject] as maintainer.</td><td>Add abisubramanya27 and lahwaacz as developers; Add yjlou and a11yproject as maintainers.</td><td>2</td></tr><tr><td>Add the following users [abisubramanya27, lahwaacz, yjlou, a11yproject] to repo a11y-webring.club, make sure to assign them different roles.</td><td>Add abisubramanya27 with role 1; Add lahwaacz with role 2; Add yjlou with role 3; Add a11yproject as role 4. Role 1-4 need to be all different.</td><td>4</td></tr></table>
425
+
426
+ Table 12: Exemplar scaled-up tasks on the GitLab website.
427
+
428
+ <table><tr><td>Instruction</td><td>Checkpoints</td><td>Score</td></tr><tr><td>Search for the closest restaurants, cafes, parking, and banks to Carnegie Mellon University on the map.</td><td>Return the closest restaurants; Return the closest cafes; Return the closest parking; Return the closest banks.</td><td>4</td></tr><tr><td>I will need to go to multiple places from Carnegie Mellon University today, including the Univ of Pittsburgh, UPMC shady-side, the Schenley park, and Squirrel Hill. Could you should me the driving route to all those places?</td><td>Show me driving route from CMU to UPitt; Show me driving route from CMU to UPMC; Show me driving route from CMU to Schenley Park; Show me driving route from CMU to Squirrel Hill.</td><td>4</td></tr><tr><td>Show me the route of driving from CMU to University of Pittsburgh, then walking to the Schenley Park; next, bike to UPMC shadyside, and walk to Squirrel Hill after that.</td><td>Show me CMU → Upitt route by car; Show me Upitt → Schenley Park route by foot; Show me Schenley Park → UPMC route by bike; Show me UPMC → Squirrel Hill route by foot.</td><td>4</td></tr><tr><td>Check if the Univ of Pittsburgh, UPMC shadyside, schenley park, and squirrel hill can be reached within one hour by walking, if departing from Carnegie Mellon University.</td><td>Return yes to route 1, route 2, route 3, and route 4.</td><td>4</td></tr><tr><td>Tell me the coordinates of Univ of Pittsburgh, UPMC shadyside, schenley park, squirrel hill, and CMU in DD format.</td><td>Return the coordinates of each of the four places.</td><td>4</td></tr></table>
429
+
430
+ Table 13: Exemplar scaled-up tasks on the Map website.
431
+
432
+ <table><tr><td>Instruction</td><td>Checkpoints</td><td>Score</td></tr><tr><td>Show me the options for Canon photo printer?</td><td>Return the correct search result.</td><td>1</td></tr><tr><td>I have a lot of Nintendo Switch game cards now, help me find the best storage option to fit all 11 cards.</td><td>Return one valid product.</td><td>1</td></tr><tr><td>What is the price range for beauty products?</td><td>Return the correct price range.</td><td>1</td></tr><tr><td>Show me products under $25 for woman shoes</td><td>Display correct products.</td><td>1</td></tr><tr><td>Show the least expensive shoe storage with a minimum storage capacity of 12 pairs.</td><td>Display correct products.</td><td>1</td></tr></table>
433
+
434
+ Table 14: Exemplar shopping tasks on the target website.
435
+
436
+ <table><tr><td>Instruction</td><td>Checkpoints</td><td>Score</td></tr><tr><td>Tell me the names of books recommended in the latest five posts in the books forum</td><td>Find the r/books forum; Find the most recent 5 posts; Give the correct answer.</td><td>3</td></tr><tr><td>Tell me the titles of the 5 most recent posts about little women in the books forum</td><td>Find the r/books forum; Find little women related posts; Sort the posts by newest.</td><td>3</td></tr><tr><td>What are the recommended products for noise-canceling headphones within a bud-get of $200 in r/headphones</td><td>Find the r/headphones forum; Correctly search with noise-canceling, under $200 re-requirements; Return a valid headphone rec-ommendation.</td><td>3</td></tr><tr><td>Find 3 pieces of advices about deal with long-distance relationships in a subreddit for relations.</td><td>Navigate to a forum about relations; find at least 3 pieces of advice from relevant posts.</td><td>2</td></tr><tr><td>Find if there are any jeep wrangler meetups. If so, when and where?</td><td>Search in jeep wrangler related forums; Re-turn a valid answer based on the search result.</td><td>2</td></tr></table>
437
+
438
+ Table 15: Exemplar social forum tasks on the reddit website.
439
+
440
+ <table><tr><td>Instruction</td><td>Checkpoints</td><td>Score</td></tr><tr><td>Tell me the full address of all international airports that are within a driving distance of 30 miles to Carnegie Mellon University</td><td>Return Pittsburgh International Airport.</td><td>1</td></tr><tr><td>I will arrive Pittsburgh Airport soon. Provide the name of a Hilton hotel in the vicinity, if available. Then, tell me the the walking distance to the nearest supermarket own by a local company from the hotel.</td><td>Show me the hotels; Find a nearby supermarket; Show me the walking route from the hotel to the supermarket.</td><td>3</td></tr><tr><td>Show me the walking route from nearby hotels to CMU, Pittsburgh that take at most 5 minutes?</td><td>Find a hotel that meets the walking time requirement; Show me the walking route.</td><td>2</td></tr><tr><td>I am at CMU Pittsburgh, how long it takes to the nearest USPS postal office with different transportation methods?</td><td>Return travel time by car, by foot, by bus, and by bike.</td><td>4</td></tr><tr><td>Tell me the coordinates of Carnegie Mellon Cafe in DD format.</td><td>Return the correct coordinates.</td><td>1</td></tr></table>
441
+
442
+ Table 16: Exemplar social forum tasks on the Google Maps website.
data/2025/2504_06xxx/2504.06821/images/1148f878af9686e2ea67d104ffeb29c5fefb949586578f2979b3fc083583026f.jpg ADDED

Git LFS Details

  • SHA256: 8a0e29e2d29bd2349fbbb6791416f6f48d3ba8ec2360e55690fac45baf0bff73
  • Pointer size: 130 Bytes
  • Size of remote file: 40.6 kB
data/2025/2504_06xxx/2504.06821/images/1aaa066e75e3a385f9f9d19b3acdea81acd51a20e825389f3c2d58675f3d0989.jpg ADDED

Git LFS Details

  • SHA256: 401efc68ab871c34efdf23bce2712619275061f4d3678793af71599f4bd61b8e
  • Pointer size: 130 Bytes
  • Size of remote file: 87.5 kB
data/2025/2504_06xxx/2504.06821/images/1ff0af4eb9d2ef32997f69eebd000de698114a0aa8fdfd862945d1007cd5f3ec.jpg ADDED

Git LFS Details

  • SHA256: 6b02b67abfb46f694d135abeda9696d86a6b968616fa22d1a6d6d4e3335bb0f4
  • Pointer size: 131 Bytes
  • Size of remote file: 178 kB
data/2025/2504_06xxx/2504.06821/images/32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg ADDED

Git LFS Details

  • SHA256: 74a79b8af8cdceade6f1cfbe64e530982fa09001c66f6e2403dc752264fe0d0a
  • Pointer size: 130 Bytes
  • Size of remote file: 53.2 kB
data/2025/2504_06xxx/2504.06821/images/32cefbcbef9f4c017763f3d35ba213c0ab16333e67dde8da90f90a5625d1eeb9.jpg ADDED

Git LFS Details

  • SHA256: 9ed95a0c28f890f0df44604f980d37ebfa0ea1c89b6ab3a2ede70e3ccf5ef26c
  • Pointer size: 131 Bytes
  • Size of remote file: 166 kB
data/2025/2504_06xxx/2504.06821/images/32d2941bc49fc2c302ec4d75a44393a68d9505c660e98d27b329443e04fd912a.jpg ADDED

Git LFS Details

  • SHA256: 9cfbc8501393636ec2d8a2a9594d65c3e5a2b95f5245b1fb5f7fed21ac7eb3b7
  • Pointer size: 130 Bytes
  • Size of remote file: 34.2 kB
data/2025/2504_06xxx/2504.06821/images/3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg ADDED

Git LFS Details

  • SHA256: 20302cd6e4e476964f47efe59d859354b3afd1beee859f06cb9d11c0b7e87589
  • Pointer size: 130 Bytes
  • Size of remote file: 60.1 kB
data/2025/2504_06xxx/2504.06821/images/495de9455d437d5fb7075c1e7ad702e4817af15dbe99fd41fe407351a3037267.jpg ADDED

Git LFS Details

  • SHA256: 64f664cd39358e3502692bb2fc315908da6b437b664fd67f826eb53874225122
  • Pointer size: 131 Bytes
  • Size of remote file: 108 kB
data/2025/2504_06xxx/2504.06821/images/4f88725b8dc9876aae1dde5be6e27efcb666ca61c19b6ac75e2549b3303cabd2.jpg ADDED

Git LFS Details

  • SHA256: 203c11be61a9c11eaf075f0dfb8140750aadf0d2c8a4c42742dd4654d3b0519b
  • Pointer size: 130 Bytes
  • Size of remote file: 47.5 kB
data/2025/2504_06xxx/2504.06821/images/52294272a8d6f283c35d590bbc871337b4ac1a2662ec382ff8567094c2e77506.jpg ADDED

Git LFS Details

  • SHA256: 166b845c14c3d78694a0392d875785012a061abfba0a7e1b0cf0327f94b8de8f
  • Pointer size: 130 Bytes
  • Size of remote file: 77.7 kB
data/2025/2504_06xxx/2504.06821/images/744d7a242930e30176d7727b1c46da76ad345db665e2c589a8713ff65ca7e1c6.jpg ADDED

Git LFS Details

  • SHA256: 97282c876c298ba56fe4a44982fed80ef3290b847b8ae791105eadb76b0a9ac5
  • Pointer size: 131 Bytes
  • Size of remote file: 174 kB
data/2025/2504_06xxx/2504.06821/images/802ab4997fb966f03cc6360d294c6427efd24e884407d8c1be5c83e3522e3a9a.jpg ADDED

Git LFS Details

  • SHA256: 35bf33f3495fa6abf858c4377c55b641adf09286345b83b03a3e2b69150d6ee0
  • Pointer size: 131 Bytes
  • Size of remote file: 223 kB