Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- inference.py +2 -6
inference.py
CHANGED
|
@@ -22,7 +22,7 @@ from openai import OpenAI
|
|
| 22 |
# ββ configuration βββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 23 |
|
| 24 |
# Environment server URL (where the Sentinel env is running)
|
| 25 |
-
ENV_URL = os.getenv("ENV_URL", "")
|
| 26 |
|
| 27 |
# LLM configuration (aligned with official OpenEnv inference examples)
|
| 28 |
API_BASE_URL = os.getenv("API_BASE_URL", "https://router.huggingface.co/v1")
|
|
@@ -292,16 +292,12 @@ async def run_task(task_id: int, base_url: str, client: OpenAI) -> float:
|
|
| 292 |
|
| 293 |
|
| 294 |
async def main() -> None:
|
| 295 |
-
if not ENV_URL:
|
| 296 |
-
print("ERROR: ENV_URL environment variable is required.", file=sys.stderr)
|
| 297 |
-
print("Set it to the environment server URL (e.g., http://localhost:8000)", file=sys.stderr)
|
| 298 |
-
sys.exit(1)
|
| 299 |
-
|
| 300 |
llm_client = OpenAI(
|
| 301 |
base_url=API_BASE_URL,
|
| 302 |
api_key=API_KEY,
|
| 303 |
)
|
| 304 |
print(f"Using LLM API: {API_BASE_URL} / model={MODEL_NAME}")
|
|
|
|
| 305 |
|
| 306 |
scores: dict[int, float] = {}
|
| 307 |
for task_id in [1, 2, 3]:
|
|
|
|
| 22 |
# ββ configuration βββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 23 |
|
| 24 |
# Environment server URL (where the Sentinel env is running)
|
| 25 |
+
ENV_URL = os.getenv("ENV_URL", "http://localhost:8000")
|
| 26 |
|
| 27 |
# LLM configuration (aligned with official OpenEnv inference examples)
|
| 28 |
API_BASE_URL = os.getenv("API_BASE_URL", "https://router.huggingface.co/v1")
|
|
|
|
| 292 |
|
| 293 |
|
| 294 |
async def main() -> None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
llm_client = OpenAI(
|
| 296 |
base_url=API_BASE_URL,
|
| 297 |
api_key=API_KEY,
|
| 298 |
)
|
| 299 |
print(f"Using LLM API: {API_BASE_URL} / model={MODEL_NAME}")
|
| 300 |
+
print(f"Environment URL: {ENV_URL}")
|
| 301 |
|
| 302 |
scores: dict[int, float] = {}
|
| 303 |
for task_id in [1, 2, 3]:
|