Spaces:
Sleeping
Sleeping
| import logging | |
| from openai import OpenAI | |
| from dotenv import load_dotenv | |
| from os import getenv | |
| load_dotenv() | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| client = OpenAI( | |
| base_url="https://openrouter.ai/api/v1", | |
| api_key=getenv("OPENROUTER_API_KEY"), | |
| ) | |
| def chat(): | |
| stream = client.chat.completions.create( | |
| model="openai/gpt-oss-120b", | |
| messages=[ | |
| {"role": "user", "content": "Say this is a test"} | |
| ], | |
| stream=True, # ✅ REQUIRED | |
| ) | |
| logger.info("Streaming response:") | |
| full_response = "" | |
| for event in stream: | |
| # Some events don't contain text — always guard | |
| delta = event.choices[0].delta | |
| if delta and delta.content: | |
| token = delta.content | |
| logger.info(token) | |
| print(token, end="", flush=True) | |
| full_response += token | |
| print() | |
| return {"response": full_response} | |
| chat() | |