Spaces:
Sleeping
Sleeping
File size: 1,863 Bytes
583f6dd 8039e4b 583f6dd 8039e4b 583f6dd 8039e4b 583f6dd 8039e4b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import ast
import json
from fastapi import APIRouter, Depends
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from api.stored_data import stored_data
from src.genai.context_analysis_agent.agent import IntroductionChatbot
router = APIRouter()
class UserMessage(BaseModel):
message: str
context_analysis_graph = IntroductionChatbot()
## ---------------------- Passing in json ------------------------
@router.post("/context-analysis")
def context_analysis(msg: UserMessage):
def event_generator():
accumulated_response = ""
# Stream tokens or partial chunks from your chat generator
for chunk in context_analysis_graph.chat(msg.message):
accumulated_response += chunk
payload = {
"streamed_response": chunk,
}
yield json.dumps(payload) + "\n" # JSON per line
# After streaming finished, get completion info
last_response = context_analysis_graph.messages[-1]["content"]
if context_analysis_graph.is_complete(last_response):
details = context_analysis_graph.extract_details()
if type(details) != dict:
details = details.model_dump()
if isinstance(details, str):
details = ast.literal_eval(details)
stored_data["business_details"] = details
context_analysis_graph.reset()
final_payload = {
"response": accumulated_response,
"complete": True,
"business_details": details
}
else:
final_payload = {
"response": accumulated_response,
"complete": False
}
yield json.dumps(final_payload) + "\n"
return StreamingResponse(event_generator(), media_type="event-stream")
|