Spaces:
Sleeping
Sleeping
File size: 8,862 Bytes
637f42c 7078b21 637f42c 7078b21 637f42c 760f5e6 637f42c 7d56332 760f5e6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 | """
TeamForge β Hugging Face Spaces Entry Point
============================================
Exposes BOTH:
1. REST API (for OpenEnv validator: POST /reset, POST /step, GET /state)
2. Gradio UI (for human demo)
The OpenEnv validator POSTs to /reset β this must return a valid JSON observation.
"""
from __future__ import annotations
import json
import os
import threading
from typing import Any, Dict, Optional
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import gradio as gr
from environment import TeamForgeEnv
from models import (
Commit, EditFile, GenerateReview,
PlanStep, RunLint, RunTests, SelfReflect, RequestIteration,
)
from tasks import ALL_TASK_IDS
# ββ Shared environment instance βββββββββββββββββββββββββββββββββββββββββββββββ
env = TeamForgeEnv()
_obs = None
_lock = threading.Lock()
# ββ FastAPI app (REST endpoints for OpenEnv validator) ββββββββββββββββββββββββ
api = FastAPI(title="TeamForge OpenEnv API", version="1.0.0")
class ResetRequest(BaseModel):
task_id: str = "easy_bugfix_chunk_list"
class StepRequest(BaseModel):
action: Dict[str, Any]
@api.get("/")
def root():
"""Health check β validator pings this first."""
return {"status": "ok", "env": "teamforge", "version": "1.0.0"}
@api.get("/health")
def health():
return {"status": "ok"}
@api.post("/reset")
def reset(req: Optional[ResetRequest] = None):
"""
OpenEnv reset endpoint.
POST /reset {"task_id": "easy_bugfix_chunk_list"}
Returns full Observation as JSON.
"""
global _obs
with _lock:
try:
task_id = req.task_id if req else "easy_bugfix_chunk_list"
if task_id not in ALL_TASK_IDS:
task_id = "easy_bugfix_chunk_list"
_obs = env.reset(task_id)
return JSONResponse(content=_obs.model_dump())
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@api.post("/step")
def step(req: StepRequest):
"""
OpenEnv step endpoint.
POST /step {"action": {"type": "run_tests", ...}}
Returns updated Observation as JSON.
"""
global _obs
with _lock:
if _obs is None:
_obs = env.reset("easy_bugfix_chunk_list")
dispatch = {
"plan_step": PlanStep,
"edit_file": EditFile,
"run_tests": RunTests,
"run_lint": RunLint,
"generate_review": GenerateReview,
"commit": Commit,
"self_reflect": SelfReflect,
"request_iteration":RequestIteration,
}
try:
action_data = req.action
action_type = action_data.get("type", "")
cls = dispatch.get(action_type)
if cls is None:
raise HTTPException(status_code=400, detail=f"Unknown action type: {action_type}")
action = cls(**action_data)
_obs = env.step(action)
return JSONResponse(content=_obs.model_dump())
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@api.get("/state")
def state():
"""
OpenEnv state endpoint.
GET /state β current environment state dict.
"""
with _lock:
return JSONResponse(content=env.state())
@api.get("/tasks")
def tasks():
"""List all available tasks."""
return {"tasks": ALL_TASK_IDS}
@api.post("/grade")
def grade():
"""Grade the current episode."""
with _lock:
result = env.grade()
return JSONResponse(content=result.model_dump())
# ββ Gradio UI (for human demo) ββββββββββββββββββββββββββββββββββββββββββββββββ
def gradio_reset(task_id: str):
global _obs
with _lock:
env._sandbox.teardown()
_obs = env.reset(task_id)
state_dict = env.state()
desc = f"## β
Episode started: `{task_id}`\n\n"
desc += f"**Files in repo:** `{[f.path for f in _obs.repo_files]}`\n\n"
desc += f"**Task:**\n{_obs.task_description[:400]}"
return desc, json.dumps(state_dict, indent=2)
def gradio_run_demo(task_id: str):
"""Run the scripted demo solution for the easy task."""
if task_id != "easy_bugfix_chunk_list":
return "β οΈ Scripted demo only available for easy_bugfix_chunk_list.\nFor other tasks, use the API or inference.py"
global _obs
with _lock:
env._sandbox.teardown()
_obs = env.reset(task_id)
log = [f"[START] task={task_id}\n"]
steps = [
PlanStep(step_number=1, description="Read chunk_list, find range() bug", estimated_effort="low"),
PlanStep(step_number=2, description="Fix range(0,len(lst)-1,n) β range(0,len(lst),n)", estimated_effort="low"),
EditFile(
file_path="utils/list_ops.py",
content='"""List utility operations."""\nfrom typing import Any, List\n\n\ndef chunk_list(lst: List[Any], n: int) -> List[List[Any]]:\n """Split lst into chunks of size n."""\n if n <= 0:\n raise ValueError("Chunk size must be positive")\n return [lst[i : i + n] for i in range(0, len(lst), n)]\n\n\ndef flatten(lst: List[List[Any]]) -> List[Any]:\n """Flatten one level."""\n return [item for sublist in lst for item in sublist]\n',
reason="Fix off-by-one: range stop was len(lst)-1, should be len(lst)",
),
RunTests(),
RunLint(fix=False),
GenerateReview(
focus_areas=["correctness", "off-by-one", "range"],
review_text=(
"Bug was off-by-one in range() β range(0,len(lst)-1,n) dropped last chunk. "
"Fix: range(0,len(lst),n). All 7 tests pass. Lint clean. O(n) complexity preserved."
),
),
SelfReflect(
what_went_well="Identified off-by-one immediately from test_odd_split assertion.",
what_to_improve="Should run lint before tests next time.",
),
Commit(message="fix(list_ops): correct off-by-one in chunk_list range() call"),
]
with _lock:
for action in steps:
_obs = env.step(action)
tr = _obs.test_results
log.append(
f"[STEP {_obs.step_number:2d}] {action.type:20s} "
f"reward={_obs.reward:+.4f} "
f"tests={'%dp/%df' % (tr.passed, tr.failed) if tr else 'N/A'}"
)
result = env.grade()
log.append(f"\n[END] FINAL_SCORE={result.final_score:.4f} | PASSED={result.passed}")
log.append(f" test_pass_rate = {result.test_pass_rate:.4f}")
log.append(f" lint_score = {result.lint_score:.4f}")
log.append(f" efficiency = {result.efficiency_score:.4f}")
log.append(f" review_quality = {result.review_quality:.4f}")
return "\n".join(log)
with gr.Blocks(title="TeamForge β OpenEnv Benchmark") as demo:
gr.Markdown("""
# ποΈ TeamForge: OpenEnv Benchmark for Autonomous Software Engineering Agents
**REST API available at this Space URL** β the OpenEnv validator uses:
- `POST /reset` β start episode
- `POST /step` β execute action
- `GET /state` β current state
> Simulates a full software development team: **Plan β Code β Test β Review β Reflect**
""")
with gr.Row():
task_dd = gr.Dropdown(choices=ALL_TASK_IDS, value=ALL_TASK_IDS[0], label="Task")
reset_btn = gr.Button("π Init Episode", variant="secondary")
demo_btn = gr.Button("βΆ Run Demo (Easy Task)", variant="primary")
with gr.Row():
obs_out = gr.Markdown(label="Observation")
state_out = gr.Code(label="State JSON", language="json")
log_out = gr.Textbox(label="Episode Log", lines=20, interactive=False)
reset_btn.click(gradio_reset, inputs=[task_dd], outputs=[obs_out, state_out])
demo_btn.click(gradio_run_demo, inputs=[task_dd], outputs=[log_out])
# ββ Mount Gradio inside FastAPI βββββββββββββββββββββββββββββββββββββββββββββββ
from gradio.routes import mount_gradio_app
app = mount_gradio_app(api, demo, path="/ui")
# ββ Entry point βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def main():
port = int(os.getenv("PORT", 7860))
uvicorn.run(app, host="0.0.0.0", port=port)
if __name__ == "__main__":
main()
|