DIVYA-NSHU99 commited on
Commit
9e99205
·
verified ·
1 Parent(s): 801aa6b

Upload 4 files

Browse files
agents/fix_generator.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+
4
+ from llm.model_loader import ask_llm
5
+
6
+
7
+ # ---------------------------------
8
+ # Load System Prompt
9
+ # ---------------------------------
10
+
11
+ PROMPT_PATH = Path("prompts/fix_prompt.txt")
12
+
13
+
14
+ def load_system_prompt():
15
+
16
+ if not PROMPT_PATH.exists():
17
+ raise FileNotFoundError(
18
+ "fix_prompt.txt not found in prompts folder"
19
+ )
20
+
21
+ return PROMPT_PATH.read_text()
22
+
23
+
24
+ # ---------------------------------
25
+ # Build Chat Messages
26
+ # ---------------------------------
27
+
28
+ def build_messages(payload):
29
+
30
+ system_prompt = load_system_prompt()
31
+
32
+ report_json = payload.get("report_json")
33
+ risks = payload.get("identified_risks")
34
+ refusals = payload.get("predicted_refusals")
35
+ strategy_plan = payload.get("strategy_plan")
36
+
37
+ user_prompt = f"""
38
+ INPUT DATA
39
+
40
+ report_json:
41
+ {json.dumps(report_json, indent=2)}
42
+
43
+ identified_risks:
44
+ {json.dumps(risks, indent=2)}
45
+
46
+ predicted_refusals:
47
+ {json.dumps(refusals, indent=2)}
48
+
49
+ strategy_plan:
50
+ {json.dumps(strategy_plan, indent=2)}
51
+
52
+ Return ONLY valid JSON.
53
+ """
54
+
55
+ messages = [
56
+ {
57
+ "role": "system",
58
+ "content": system_prompt
59
+ },
60
+ {
61
+ "role": "user",
62
+ "content": user_prompt
63
+ }
64
+ ]
65
+
66
+ return messages
67
+
68
+
69
+ # ---------------------------------
70
+ # Agent Entry Function
71
+ # ---------------------------------
72
+
73
+ def run_fix_generator(payload):
74
+ """
75
+ Executes Application Fix Generator Agent.
76
+
77
+ Expected payload:
78
+ {
79
+ "report_json": {...},
80
+ "identified_risks": [...],
81
+ "predicted_refusals": [...],
82
+ "strategy_plan": [...]
83
+ }
84
+
85
+ Returns raw JSON string.
86
+ """
87
+
88
+ messages = build_messages(payload)
89
+
90
+ response = ask_llm(messages)
91
+
92
+ return response
agents/refusal_predictor.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+
4
+ from llm.model_loader import ask_llm
5
+
6
+
7
+ # ---------------------------------
8
+ # Load System Prompt
9
+ # ---------------------------------
10
+
11
+ PROMPT_PATH = Path("prompts/refusal_prompt.txt")
12
+
13
+
14
+ def load_system_prompt():
15
+
16
+ if not PROMPT_PATH.exists():
17
+ raise FileNotFoundError(
18
+ "refusal_prompt.txt not found in prompts folder"
19
+ )
20
+
21
+ return PROMPT_PATH.read_text()
22
+
23
+
24
+ # ---------------------------------
25
+ # Build Chat Messages
26
+ # ---------------------------------
27
+
28
+ def build_messages(payload):
29
+
30
+ system_prompt = load_system_prompt()
31
+
32
+ report_json = payload.get("report_json")
33
+ identified_risks = payload.get("identified_risks")
34
+
35
+ user_prompt = f"""
36
+ INPUT DATA
37
+
38
+ report_json:
39
+ {json.dumps(report_json, indent=2)}
40
+
41
+ identified_risks:
42
+ {json.dumps(identified_risks, indent=2)}
43
+
44
+ Return ONLY valid JSON.
45
+ """
46
+
47
+ messages = [
48
+ {
49
+ "role": "system",
50
+ "content": system_prompt
51
+ },
52
+ {
53
+ "role": "user",
54
+ "content": user_prompt
55
+ }
56
+ ]
57
+
58
+ return messages
59
+
60
+
61
+ # ---------------------------------
62
+ # Agent Entry Function
63
+ # ---------------------------------
64
+
65
+ def run_refusal_predictor(payload):
66
+ """
67
+ Executes Refusal Predictor Agent.
68
+
69
+ Expected payload:
70
+ {
71
+ "report_json": {...},
72
+ "identified_risks": [...]
73
+ }
74
+
75
+ Returns raw JSON string from LLM.
76
+ """
77
+
78
+ messages = build_messages(payload)
79
+
80
+ response = ask_llm(messages)
81
+
82
+ return response
agents/risk_analyzer.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+
4
+ from llm.model_loader import ask_llm
5
+
6
+
7
+ # ---------------------------------
8
+ # Load Prompt
9
+ # ---------------------------------
10
+
11
+ PROMPT_PATH = Path("prompts/risk_prompt.txt")
12
+
13
+
14
+ def load_system_prompt():
15
+
16
+ if not PROMPT_PATH.exists():
17
+ raise FileNotFoundError("risk_prompt.txt not found")
18
+
19
+ return PROMPT_PATH.read_text()
20
+
21
+
22
+ # ---------------------------------
23
+ # Build Messages
24
+ # ---------------------------------
25
+
26
+ def build_messages(payload):
27
+
28
+ system_prompt = load_system_prompt()
29
+
30
+ report_json = payload.get("report_json")
31
+
32
+ user_prompt = f"""
33
+ INPUT DATA
34
+
35
+ report_json:
36
+ {json.dumps(report_json, indent=2)}
37
+
38
+ Return ONLY valid JSON.
39
+ """
40
+
41
+ messages = [
42
+ {"role": "system", "content": system_prompt},
43
+ {"role": "user", "content": user_prompt}
44
+ ]
45
+
46
+ return messages
47
+
48
+
49
+ # ---------------------------------
50
+ # Agent Entry Function
51
+ # ---------------------------------
52
+
53
+ def run_risk_analyzer(payload):
54
+
55
+ messages = build_messages(payload)
56
+
57
+ response = ask_llm(messages)
58
+
59
+ return response
agents/strategy_planner.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+
4
+ from llm.model_loader import ask_llm
5
+
6
+
7
+ # ---------------------------------
8
+ # Load System Prompt
9
+ # ---------------------------------
10
+
11
+ PROMPT_PATH = Path("prompts/strategy_prompt.txt")
12
+
13
+
14
+ def load_system_prompt():
15
+
16
+ if not PROMPT_PATH.exists():
17
+ raise FileNotFoundError(
18
+ "strategy_prompt.txt not found in prompts folder"
19
+ )
20
+
21
+ return PROMPT_PATH.read_text()
22
+
23
+
24
+ # ---------------------------------
25
+ # Build Chat Messages
26
+ # ---------------------------------
27
+
28
+ def build_messages(payload):
29
+
30
+ system_prompt = load_system_prompt()
31
+
32
+ report_json = payload.get("report_json")
33
+ risks = payload.get("identified_risks")
34
+ refusals = payload.get("predicted_refusals")
35
+
36
+ user_prompt = f"""
37
+ INPUT DATA
38
+
39
+ report_json:
40
+ {json.dumps(report_json, indent=2)}
41
+
42
+ identified_risks:
43
+ {json.dumps(risks, indent=2)}
44
+
45
+ predicted_refusals:
46
+ {json.dumps(refusals, indent=2)}
47
+
48
+ Return ONLY valid JSON.
49
+ """
50
+
51
+ messages = [
52
+ {
53
+ "role": "system",
54
+ "content": system_prompt
55
+ },
56
+ {
57
+ "role": "user",
58
+ "content": user_prompt
59
+ }
60
+ ]
61
+
62
+ return messages
63
+
64
+
65
+ # ---------------------------------
66
+ # Agent Entry Function
67
+ # ---------------------------------
68
+
69
+ def run_strategy_planner(payload):
70
+ """
71
+ Executes Strategy Planner Agent.
72
+
73
+ Expected payload:
74
+ {
75
+ "report_json": {...},
76
+ "identified_risks": [...],
77
+ "predicted_refusals": [...]
78
+ }
79
+
80
+ Returns raw JSON string.
81
+ """
82
+
83
+ messages = build_messages(payload)
84
+
85
+ response = ask_llm(messages)
86
+
87
+ return response