Navya-Sree commited on
Commit
02eae86
Β·
verified Β·
1 Parent(s): b766743

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+
4
+ from macg.llm_hf import HuggingFaceInferenceLLM
5
+ from macg.agents.coder import CoderAgent
6
+ from macg.agents.reviewer import ReviewerAgent
7
+ from macg.agents.tester import TesterAgent
8
+ from macg.orchestrator import Orchestrator
9
+
10
+ st.set_page_config(page_title="Multi-Agent Codegen (HF)", layout="wide")
11
+
12
+ st.title("πŸ€– Multi-Agent Codegen + Review + Testing (Hugging Face)")
13
+ st.caption("Coder β†’ Reviewer β†’ Tester loop with pytest verification.")
14
+
15
+ # --- Secrets / token handling (Streamlit Cloud uses st.secrets) ---
16
+ hf_token = None
17
+ if "HF_TOKEN" in st.secrets:
18
+ hf_token = st.secrets["HF_TOKEN"]
19
+ else:
20
+ hf_token = os.getenv("HF_TOKEN")
21
+
22
+ with st.sidebar:
23
+ st.header("Settings")
24
+ model = st.text_input(
25
+ "HF model (Inference API)",
26
+ value="Qwen/Qwen2.5-Coder-7B-Instruct",
27
+ help="You can change to another hosted model if you want."
28
+ )
29
+ max_iters = st.slider("Max iterations", 1, 6, 3)
30
+ temperature = st.slider("Temperature", 0.0, 1.0, 0.2, 0.05)
31
+
32
+ st.divider()
33
+ st.subheader("HF Token")
34
+ if hf_token:
35
+ st.success("HF_TOKEN found (env or secrets).")
36
+ else:
37
+ st.warning("HF_TOKEN not found. Add it in Streamlit Secrets or environment.")
38
+ st.info("Streamlit Cloud: Settings β†’ Secrets β†’ add HF_TOKEN='...'")
39
+
40
+ default_task = (
41
+ "Implement a function fizzbuzz(n: int) -> list[str] that returns strings for 1..n.\n"
42
+ "- Multiples of 3 -> 'Fizz'\n"
43
+ "- Multiples of 5 -> 'Buzz'\n"
44
+ "- Multiples of both -> 'FizzBuzz'\n"
45
+ "Return the list of length n.\n"
46
+ "Edge cases: n <= 0 should return an empty list."
47
+ )
48
+
49
+ task = st.text_area("Task", value=default_task, height=180)
50
+
51
+ colA, colB = st.columns([1, 1])
52
+ run_btn = colA.button("Run Agents", type="primary", use_container_width=True)
53
+ clear_btn = colB.button("Clear Output", use_container_width=True)
54
+
55
+ if clear_btn:
56
+ st.session_state.pop("result", None)
57
+
58
+ def build_orchestrator() -> Orchestrator:
59
+ if not hf_token:
60
+ raise RuntimeError("HF_TOKEN missing. Add it to environment or Streamlit secrets.")
61
+
62
+ llm = HuggingFaceInferenceLLM(
63
+ model=model,
64
+ token=hf_token,
65
+ temperature=float(temperature),
66
+ max_new_tokens=900,
67
+ )
68
+ coder = CoderAgent(llm)
69
+ reviewer = ReviewerAgent(llm)
70
+ tester = TesterAgent(llm)
71
+ return Orchestrator(coder=coder, reviewer=reviewer, tester=tester)
72
+
73
+ if run_btn:
74
+ try:
75
+ orch = build_orchestrator()
76
+ with st.spinner("Running Coder β†’ Reviewer β†’ Tester..."):
77
+ result = orch.run(task=task, max_iters=int(max_iters))
78
+ st.session_state["result"] = result
79
+ except Exception as e:
80
+ st.error(str(e))
81
+
82
+ result = st.session_state.get("result")
83
+
84
+ if result:
85
+ top1, top2, top3 = st.columns([1, 1, 1])
86
+ top1.metric("Passed", "βœ… Yes" if result.passed else "❌ No")
87
+ top2.metric("Iterations", str(result.iteration))
88
+ top3.metric("Module", result.module_name)
89
+
90
+ st.divider()
91
+
92
+ left, right = st.columns([1, 1])
93
+
94
+ with left:
95
+ st.subheader("Generated Code")
96
+ st.code(result.code or "", language="python")
97
+
98
+ st.subheader("Review Notes")
99
+ st.text(result.review_notes or "")
100
+
101
+ with right:
102
+ st.subheader("Generated Tests")
103
+ st.code(result.tests or "", language="python")
104
+
105
+ st.subheader("Test Report")
106
+ st.text(result.test_report or "")