Spaces:
Sleeping
Sleeping
vineetshukla.work@gmail.com
fix: strictly bound task scores to satisfy OpenEnv Phase 2 testing bounds
5fcb94c | """ | |
| CodeSensei — Inference Script (OpenEnv Hackathon) | |
| =================================================== | |
| MANDATORY ENV VARS: | |
| API_BASE_URL The API endpoint for the LLM. | |
| MODEL_NAME The model identifier to use for inference. | |
| HF_TOKEN Your Hugging Face / API key. | |
| LOCAL_IMAGE_NAME Docker image name for from_docker_image() | |
| STDOUT FORMAT (must be exact): | |
| [START] task=<task_name> env=<benchmark> model=<model_name> | |
| [STEP] step=<n> action=<action_str> reward=<0.00> done=<true|false> error=<msg|null> | |
| [END] success=<true|false> steps=<n> score=<score> rewards=<r1,r2,...,rn> | |
| """ | |
| import asyncio | |
| import os | |
| import re | |
| import textwrap | |
| from typing import List, Optional | |
| from openai import OpenAI | |
| # --------------------------------------------------------------------------- | |
| # Import our OpenEnv environment (mirrors: from my_env_v4 import ...) | |
| # --------------------------------------------------------------------------- | |
| from env import CodeSenseiEnv, CodeSenseiAction | |
| # --------------------------------------------------------------------------- | |
| # Env vars (submission contract — matches sample exactly) | |
| # --------------------------------------------------------------------------- | |
| IMAGE_NAME = os.getenv("LOCAL_IMAGE_NAME") | |
| API_KEY = os.getenv("HF_TOKEN") or os.getenv("API_KEY") | |
| API_BASE_URL = os.getenv("API_BASE_URL", "https://api.groq.com/openai/v1") | |
| MODEL_NAME = os.getenv("MODEL_NAME", "llama-3.3-70b-versatile") | |
| TASK_NAME = os.getenv("CODESENSEI_TASK", "code-debug") | |
| BENCHMARK = os.getenv("CODESENSEI_BENCHMARK", "codesensei") | |
| MAX_STEPS = 6 | |
| TEMPERATURE = 0.7 | |
| MAX_TOKENS = 512 | |
| SUCCESS_SCORE_THRESHOLD = 0.5 | |
| # Max total reward: best case +2.0 per step, 6 steps | |
| MAX_TOTAL_REWARD = MAX_STEPS * 2.0 | |
| # --------------------------------------------------------------------------- | |
| # System prompt | |
| # --------------------------------------------------------------------------- | |
| SYSTEM_PROMPT = textwrap.dedent("""\ | |
| You are CodeSensei, an expert Python debugger. | |
| TASK: You are given a buggy Python function and its failing test cases. | |
| Find the bug and return ONLY the corrected function. | |
| RULES: | |
| 1. Return ONLY the corrected Python function — no explanations. | |
| 2. Keep the same function name and signature. | |
| 3. Wrap your code in ```python ... ``` markers. | |
| 4. Think step-by-step: identify failing tests, find the bug, then fix it. | |
| EXAMPLE: | |
| Given: | |
| ```python | |
| def add(a, b): | |
| return a - b | |
| ``` | |
| Return: | |
| ```python | |
| def add(a, b): | |
| return a + b | |
| ``` | |
| """).strip() | |
| # --------------------------------------------------------------------------- | |
| # Structured logging — [START], [STEP], [END] (exact format from sample) | |
| # --------------------------------------------------------------------------- | |
| def log_start(task: str, env: str, model: str) -> None: | |
| print(f"[START] task={task} env={env} model={model}", flush=True) | |
| def log_step(step: int, action: str, reward: float, done: bool, error: Optional[str]) -> None: | |
| error_val = error if error else "null" | |
| done_val = str(done).lower() | |
| print( | |
| f"[STEP] step={step} action={action} reward={reward:.2f} done={done_val} error={error_val}", | |
| flush=True, | |
| ) | |
| def log_end(success: bool, steps: int, score: float, rewards: List[float]) -> None: | |
| rewards_str = ",".join(f"{r:.2f}" for r in rewards) | |
| print(f"[END] success={str(success).lower()} steps={steps} score={score:.3f} rewards={rewards_str}", flush=True) | |
| # --------------------------------------------------------------------------- | |
| # Helpers | |
| # --------------------------------------------------------------------------- | |
| def extract_code(text: str) -> str: | |
| """Extract code from markdown fences or raw def statement.""" | |
| match = re.search(r"```(?:python)?\s*\n(.*?)```", text, re.DOTALL) | |
| if match: | |
| return match.group(1).strip() | |
| match = re.search(r"(def \w+\(.*?\):.*)", text, re.DOTALL) | |
| if match: | |
| return match.group(1).strip() | |
| return text.strip() | |
| def build_user_prompt(step: int, buggy_code: str, feedback: str, history: List[str]) -> str: | |
| history_block = "\n".join(history[-4:]) if history else "None" | |
| return textwrap.dedent(f"""\ | |
| Step: {step} | |
| Buggy code: | |
| ```python | |
| {buggy_code} | |
| ``` | |
| Last feedback: {feedback} | |
| Previous attempts: | |
| {history_block} | |
| Send your corrected function. | |
| """).strip() | |
| def get_model_message(client: OpenAI, step: int, buggy_code: str, feedback: str, history: List[str]) -> str: | |
| user_prompt = build_user_prompt(step, buggy_code, feedback, history) | |
| try: | |
| completion = client.chat.completions.create( | |
| model=MODEL_NAME, | |
| messages=[ | |
| {"role": "system", "content": SYSTEM_PROMPT}, | |
| {"role": "user", "content": user_prompt}, | |
| ], | |
| temperature=TEMPERATURE, | |
| max_tokens=MAX_TOKENS, | |
| stream=False, | |
| ) | |
| text = (completion.choices[0].message.content or "").strip() | |
| return text if text else "hello" | |
| except Exception as exc: | |
| print(f"[DEBUG] Model request failed: {exc}", flush=True) | |
| return "hello" | |
| # --------------------------------------------------------------------------- | |
| # Main — follows sample structure exactly | |
| # --------------------------------------------------------------------------- | |
| async def main() -> None: | |
| client = OpenAI(base_url=API_BASE_URL, api_key=API_KEY) | |
| # OpenEnv standard: start env from Docker image | |
| env = await CodeSenseiEnv.from_docker_image(IMAGE_NAME) | |
| history: List[str] = [] | |
| rewards: List[float] = [] | |
| steps_taken = 0 | |
| score = 0.0 | |
| success = False | |
| log_start(task=TASK_NAME, env=BENCHMARK, model=MODEL_NAME) | |
| try: | |
| result = await env.reset() # OpenEnv.reset() | |
| last_feedback = result.observation.feedback or "All tests failing — fix the bug." | |
| last_reward = 0.0 | |
| buggy_code = result.observation.buggy_code | |
| for step in range(1, MAX_STEPS + 1): | |
| if result.done: | |
| break | |
| message = get_model_message(client, step, buggy_code, last_feedback, history) | |
| proposed_fix = extract_code(message) | |
| result = await env.step(CodeSenseiAction(proposed_fix=proposed_fix)) | |
| obs = result.observation | |
| reward = result.reward or 0.0 | |
| done = result.done | |
| error = obs.error_output if obs.error_output else None | |
| rewards.append(reward) | |
| steps_taken = step | |
| last_feedback = obs.feedback or f"{obs.tests_passed}/{obs.tests_total} tests passing" | |
| last_reward = reward | |
| # Action for log: just the function name fix summary | |
| action_str = proposed_fix.replace("\n", "\\n")[:120] | |
| log_step(step=step, action=action_str, reward=reward, done=done, error=error) | |
| history.append(f"Step {step}: {obs.tests_passed}/{obs.tests_total} tests -> reward {reward:+.2f}") | |
| if done: | |
| break | |
| # Score strictly bounded to (0, 1) per Phase 2 requirements | |
| score = sum(rewards) / MAX_TOTAL_REWARD if MAX_TOTAL_REWARD > 0 else 0.01 | |
| score = min(max(score, 0.01), 0.99) # strict bounds | |
| success = score >= SUCCESS_SCORE_THRESHOLD | |
| finally: | |
| try: | |
| await env.close() | |
| except Exception as e: | |
| print(f"[DEBUG] env.close() error (container cleanup): {e}", flush=True) | |
| log_end(success=success, steps=steps_taken, score=score, rewards=rewards) | |
| if __name__ == "__main__": | |
| asyncio.run(main()) | |