# Default profile: Hugging Face Router (OpenAI-compatible API) API_BASE_URL=https://router.huggingface.co/v1 MODEL_NAME=Qwen/Qwen2.5-72B-Instruct HF_TOKEN= # Optional alias. If both are set, OPENAI_API_KEY is used first by inference.py. OPENAI_API_KEY= # OpenAI direct profile (uncomment for OpenAI access token usage): # API_BASE_URL=https://api.openai.com/v1 # MODEL_NAME=gpt-4o-mini # HF_TOKEN= # OPENAI_API_KEY= # Optional runtime knobs LOCAL_IMAGE_NAME= MY_ENV_V4_TASK=easy-command-typo MY_ENV_V4_BENCHMARK=cicd_debugger_env MAX_STEPS=8 TEMPERATURE=0.2 MAX_TOKENS=120 SUCCESS_SCORE_THRESHOLD=0.1 OFFLINE_INFERENCE=0