Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -473,7 +473,7 @@ async def chat(request: ChatRequest):
|
|
| 473 |
|
| 474 |
# Use token from request or fallback to env
|
| 475 |
token = request.hf_token or hf_token
|
| 476 |
-
|
| 477 |
# Generate response
|
| 478 |
if not token:
|
| 479 |
response = f"""[LLM Response Placeholder]
|
|
@@ -494,7 +494,7 @@ Example:
|
|
| 494 |
else:
|
| 495 |
try:
|
| 496 |
client = InferenceClient(
|
| 497 |
-
token=
|
| 498 |
model="openai/gpt-oss-20b"
|
| 499 |
)
|
| 500 |
|
|
|
|
| 473 |
|
| 474 |
# Use token from request or fallback to env
|
| 475 |
token = request.hf_token or hf_token
|
| 476 |
+
const hf_token = os.getenv("HUGGINGFACE_TOKEN")
|
| 477 |
# Generate response
|
| 478 |
if not token:
|
| 479 |
response = f"""[LLM Response Placeholder]
|
|
|
|
| 494 |
else:
|
| 495 |
try:
|
| 496 |
client = InferenceClient(
|
| 497 |
+
token=hf_token,
|
| 498 |
model="openai/gpt-oss-20b"
|
| 499 |
)
|
| 500 |
|