Navya-Sree's picture
Update app.py
9c283b0 verified
raw
history blame
3.6 kB
import os
import streamlit as st
from macg.llm_openai import OpenAIResponsesLLM
openai_key = os.getenv("OPENAI_API_KEY")
if not openai_key:
st.error("OPENAI_API_KEY not found. Add it in HF Space Settings β†’ Variables and secrets.")
st.stop()
llm = OpenAIResponsesLLM(
model=st.sidebar.text_input("OpenAI model", value="gpt-5"),
api_key=openai_key,
temperature=st.sidebar.slider("Temperature", 0.0, 1.0, 0.2, 0.05),
max_output_tokens=900,
)
st.set_page_config(page_title="Multi-Agent Codegen (HF)", layout="wide")
st.title("πŸ€– Multi-Agent Codegen + Review + Testing (Hugging Face)")
st.caption("Coder β†’ Reviewer β†’ Tester loop with pytest verification.")
# --- Secrets / token handling (Streamlit Cloud uses st.secrets) ---
hf_token = None
if "HF_TOKEN" in st.secrets:
hf_token = st.secrets["HF_TOKEN"]
else:
hf_token = os.getenv("HF_TOKEN")
with st.sidebar:
st.header("Settings")
model = st.text_input(
"HF model (Inference API)",
value="Qwen/Qwen2.5-Coder-7B-Instruct",
help="You can change to another hosted model if you want."
)
max_iters = st.slider("Max iterations", 1, 6, 3)
temperature = st.slider("Temperature", 0.0, 1.0, 0.2, 0.05)
st.divider()
st.subheader("HF Token")
if hf_token:
st.success("HF_TOKEN found (env or secrets).")
else:
st.warning("HF_TOKEN not found. Add it in Streamlit Secrets or environment.")
st.info("Streamlit Cloud: Settings β†’ Secrets β†’ add HF_TOKEN='...'")
default_task = (
"Implement a function fizzbuzz(n: int) -> list[str] that returns strings for 1..n.\n"
"- Multiples of 3 -> 'Fizz'\n"
"- Multiples of 5 -> 'Buzz'\n"
"- Multiples of both -> 'FizzBuzz'\n"
"Return the list of length n.\n"
"Edge cases: n <= 0 should return an empty list."
)
task = st.text_area("Task", value=default_task, height=180)
colA, colB = st.columns([1, 1])
run_btn = colA.button("Run Agents", type="primary", use_container_width=True)
clear_btn = colB.button("Clear Output", use_container_width=True)
if clear_btn:
st.session_state.pop("result", None)
def build_orchestrator() -> Orchestrator:
if not hf_token:
raise RuntimeError("HF_TOKEN missing. Add it to environment or Streamlit secrets.")
llm = HuggingFaceInferenceLLM(
model=model,
token=hf_token,
temperature=float(temperature),
max_new_tokens=900,
)
coder = CoderAgent(llm)
reviewer = ReviewerAgent(llm)
tester = TesterAgent(llm)
return Orchestrator(coder=coder, reviewer=reviewer, tester=tester)
if run_btn:
try:
orch = build_orchestrator()
with st.spinner("Running Coder β†’ Reviewer β†’ Tester..."):
result = orch.run(task=task, max_iters=int(max_iters))
st.session_state["result"] = result
except Exception as e:
st.error(str(e))
result = st.session_state.get("result")
if result:
top1, top2, top3 = st.columns([1, 1, 1])
top1.metric("Passed", "βœ… Yes" if result.passed else "❌ No")
top2.metric("Iterations", str(result.iteration))
top3.metric("Module", result.module_name)
st.divider()
left, right = st.columns([1, 1])
with left:
st.subheader("Generated Code")
st.code(result.code or "", language="python")
st.subheader("Review Notes")
st.text(result.review_notes or "")
with right:
st.subheader("Generated Tests")
st.code(result.tests or "", language="python")
st.subheader("Test Report")
st.text(result.test_report or "")