Spaces:
Sleeping
Sleeping
| import ast | |
| import json | |
| from fastapi import APIRouter, Depends | |
| from fastapi.responses import StreamingResponse | |
| from pydantic import BaseModel | |
| from api.stored_data import stored_data | |
| from src.genai.context_analysis_agent.agent import IntroductionChatbot | |
| router = APIRouter() | |
| class UserMessage(BaseModel): | |
| message: str | |
| context_analysis_graph = IntroductionChatbot() | |
| ## ---------------------- Passing in json ------------------------ | |
| def context_analysis(msg: UserMessage): | |
| def event_generator(): | |
| accumulated_response = "" | |
| # Stream tokens or partial chunks from your chat generator | |
| for chunk in context_analysis_graph.chat(msg.message): | |
| accumulated_response += chunk | |
| payload = { | |
| "streamed_response": chunk, | |
| } | |
| yield json.dumps(payload) + "\n" # JSON per line | |
| # After streaming finished, get completion info | |
| last_response = context_analysis_graph.messages[-1]["content"] | |
| if context_analysis_graph.is_complete(last_response): | |
| details = context_analysis_graph.extract_details() | |
| if type(details) != dict: | |
| details = details.model_dump() | |
| if isinstance(details, str): | |
| details = ast.literal_eval(details) | |
| stored_data["business_details"] = details | |
| context_analysis_graph.reset() | |
| final_payload = { | |
| "response": accumulated_response, | |
| "complete": True, | |
| "business_details": details | |
| } | |
| else: | |
| final_payload = { | |
| "response": accumulated_response, | |
| "complete": False | |
| } | |
| yield json.dumps(final_payload) + "\n" | |
| return StreamingResponse(event_generator(), media_type="event-stream") | |