Spaces:
Running
Running
| from typing import AsyncGenerator | |
| from src.core.ports.llm_port import LlmPort | |
| from langchain_openai import ChatOpenAI | |
| from src.core.config import settings | |
| import json | |
| class OpenAiAdapter(LlmPort): | |
| def __init__(self): | |
| if settings.OPENAI_API_KEY: | |
| self.llm = ChatOpenAI(api_key=settings.OPENAI_API_KEY, model="gpt-4", temperature=0.2) | |
| else: | |
| self.llm = None | |
| def generate(self, prompt: str) -> str: | |
| if not self.llm: | |
| return "LLM API Key not configured." | |
| return self.llm.invoke(prompt).content | |
| async def generate_stream(self, prompt: str) -> AsyncGenerator[str, None]: | |
| if not self.llm: | |
| yield "data: [DONE]\n\n" | |
| return | |
| for chunk in self.llm.stream(prompt): | |
| if hasattr(chunk, 'content'): | |
| yield f"data: {json.dumps({'token': chunk.content})}\n\n" | |
| else: | |
| yield f"data: {json.dumps({'token': str(chunk)})}\n\n" | |
| yield "data: [DONE]\n\n" | |