junaid17 commited on
Commit
595aeaa
·
verified ·
1 Parent(s): 21ab7a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -60
app.py CHANGED
@@ -6,6 +6,7 @@ import json
6
  from fastapi import FastAPI, UploadFile, File, Form, HTTPException, BackgroundTasks
7
  from fastapi.responses import StreamingResponse
8
  from pydantic import BaseModel
 
9
  from utils import STT, TTS
10
  from data_ingestion import Ingest_Data
11
  from RAG import app as rag_app, Ragbot_State, reload_vector_store
@@ -13,6 +14,14 @@ from RAG import app as rag_app, Ragbot_State, reload_vector_store
13
  # Initialize FastAPI
14
  app = FastAPI(title="LangGraph RAG Chatbot", version="1.0")
15
 
 
 
 
 
 
 
 
 
16
  # --- Pydantic Models ---
17
  class ChatRequest(BaseModel):
18
  query: str
@@ -72,10 +81,8 @@ async def upload_document(
72
 
73
  @app.post("/chat")
74
  async def chat_endpoint(request: ChatRequest):
75
- """
76
- Robust Streaming Endpoint that logs events to console.
77
- """
78
  config = {"configurable": {"thread_id": request.thread_id}}
 
79
  inputs = {
80
  "query": request.query,
81
  "RAG": request.use_rag,
@@ -87,70 +94,21 @@ async def chat_endpoint(request: ChatRequest):
87
  }
88
 
89
  async def event_generator():
90
- print(f"--- 🚀 Starting stream for {request.thread_id} ---")
91
-
92
- try:
93
- # Iterate over all events
94
- async for event in rag_app.astream_events(inputs, config=config, version="v2"):
95
- event_type = event.get("event")
96
- name = event.get("name", "")
97
-
98
- # Debug: print all events to see what's happening
99
- print(f"Event: {event_type} | Name: {name}")
100
 
101
- # Method 1: Check for on_chat_model_stream events (most reliable)
102
- if event_type == "on_chat_model_stream":
103
- data = event.get("data", {})
104
- chunk = data.get("chunk")
105
-
106
- if chunk and hasattr(chunk, "content"):
107
- content = chunk.content
108
- if content:
109
- chunk_json = json.dumps({"content": content})
110
- yield f"data: {chunk_json}\n\n"
111
- print(f"✅ Yielded: {content[:50]}...")
112
-
113
- # Method 2: Alternative - check for any chunk with content
114
- elif event_type in ["on_chain_stream", "on_llm_stream"]:
115
- data = event.get("data", {})
116
- chunk = data.get("chunk")
117
-
118
- # Handle different chunk types
119
- if chunk:
120
- content = None
121
-
122
- # AIMessageChunk
123
- if hasattr(chunk, "content"):
124
- content = chunk.content
125
- # String chunk
126
- elif isinstance(chunk, str):
127
- content = chunk
128
- # Dict with content
129
- elif isinstance(chunk, dict) and "content" in chunk:
130
- content = chunk["content"]
131
-
132
- if content:
133
- chunk_json = json.dumps({"content": content})
134
- yield f"data: {chunk_json}\n\n"
135
- print(f"✅ Yielded: {content[:50]}...")
136
-
137
- print("--- ✅ Stream completed ---")
138
-
139
- except Exception as e:
140
- print(f"❌ Error in stream: {e}")
141
- error_json = json.dumps({"error": str(e)})
142
- yield f"data: {error_json}\n\n"
143
-
144
- # End of stream
145
- yield "data: [DONE]\n\n"
146
 
147
  return StreamingResponse(
148
- event_generator(),
149
  media_type="text/event-stream",
150
  headers={
151
  "Cache-Control": "no-cache",
152
  "Connection": "keep-alive",
153
- "Content-Type": "text/event-stream",
154
  "X-Accel-Buffering": "no",
155
  },
156
  )
 
6
  from fastapi import FastAPI, UploadFile, File, Form, HTTPException, BackgroundTasks
7
  from fastapi.responses import StreamingResponse
8
  from pydantic import BaseModel
9
+ from fastapi.middleware.cors import CORSMiddleware
10
  from utils import STT, TTS
11
  from data_ingestion import Ingest_Data
12
  from RAG import app as rag_app, Ragbot_State, reload_vector_store
 
14
  # Initialize FastAPI
15
  app = FastAPI(title="LangGraph RAG Chatbot", version="1.0")
16
 
17
+ app.add_middleware(
18
+ CORSMiddleware,
19
+ allow_origins=["*"],
20
+ allow_credentials=True,
21
+ allow_methods=["*"],
22
+ allow_headers=["*"],
23
+ )
24
+
25
  # --- Pydantic Models ---
26
  class ChatRequest(BaseModel):
27
  query: str
 
81
 
82
  @app.post("/chat")
83
  async def chat_endpoint(request: ChatRequest):
 
 
 
84
  config = {"configurable": {"thread_id": request.thread_id}}
85
+
86
  inputs = {
87
  "query": request.query,
88
  "RAG": request.use_rag,
 
94
  }
95
 
96
  async def event_generator():
97
+ async for event in rag_app.astream_events(inputs, config=config, version="v1"):
98
+ kind = event["event"]
99
+ if kind == "on_chat_model_stream":
100
+ content = event["data"]["chunk"].content
 
 
 
 
 
 
101
 
102
+ if content:
103
+ data = content.replace("\n", "\\n")
104
+ yield f"data: {data}\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  return StreamingResponse(
107
+ event_generator(),
108
  media_type="text/event-stream",
109
  headers={
110
  "Cache-Control": "no-cache",
111
  "Connection": "keep-alive",
 
112
  "X-Accel-Buffering": "no",
113
  },
114
  )