Spaces:
Sleeping
Sleeping
| import os | |
| from fastapi import FastAPI, HTTPException | |
| from fastapi.responses import StreamingResponse | |
| from openai import AsyncOpenAI | |
| import asyncio | |
| app = FastAPI() | |
| # Initialize OpenAI client | |
| client = AsyncOpenAI(api_key=os.getenv("GITHUB_TOKEN")) | |
| async def generate_ai_response(prompt: str): | |
| try: | |
| stream = await client.chat.completions.create( | |
| model="gpt-3.5-turbo", # Using 3.5-turbo for better compatibility | |
| messages=[ | |
| {"role": "system", "content": "You are a helpful assistant."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| temperature=0.7, | |
| stream=True | |
| ) | |
| async for chunk in stream: | |
| if chunk.choices and chunk.choices[0].delta.content: | |
| yield chunk.choices[0].delta.content | |
| except Exception as err: | |
| yield f"Error: {str(err)}" | |
| raise HTTPException(status_code=500, detail="AI generation failed") | |
| async def generate_response(prompt: str): | |
| if not prompt: | |
| raise HTTPException(status_code=400, detail="Prompt cannot be empty") | |
| return StreamingResponse( | |
| generate_ai_response(prompt), | |
| media_type="text/event-stream" | |
| ) | |
| # For Hugging Face Spaces | |
| def get_app(): | |
| return app |