Upload app.py
Browse files
app.py
CHANGED
|
@@ -229,40 +229,54 @@ def process_uploads(files, progress=gr.Progress()) -> str:
|
|
| 229 |
# ββ Chat βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 230 |
|
| 231 |
def chat_respond(message: str, history: List) -> Tuple[List, str]:
|
|
|
|
|
|
|
|
|
|
| 232 |
if not _system_ready or _qa is None:
|
| 233 |
-
history.append(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
return history, ""
|
| 235 |
|
| 236 |
try:
|
| 237 |
start = time.time()
|
|
|
|
| 238 |
result = _qa.answer(message, top_k=8, use_communities=True)
|
| 239 |
elapsed = time.time() - start
|
| 240 |
|
|
|
|
| 241 |
response_parts = [result.answer]
|
| 242 |
|
| 243 |
if result.sources:
|
| 244 |
response_parts.append("\n\n---\n**π Sources:**")
|
| 245 |
for i, src in enumerate(result.sources[:5], 1):
|
| 246 |
scores = []
|
| 247 |
-
if src.dense_score > 0:
|
| 248 |
-
|
| 249 |
-
if src.
|
| 250 |
-
|
| 251 |
-
if src.graph_score > 0:
|
| 252 |
-
scores.append(f"graph={src.graph_score:.2f}")
|
| 253 |
score_str = f" ({', '.join(scores)})" if scores else ""
|
| 254 |
response_parts.append(
|
| 255 |
f"{i}. **{src.title[:60]}** β p.{src.page} β score: {src.score:.4f}{score_str}"
|
| 256 |
)
|
| 257 |
|
| 258 |
-
response_parts.append(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
|
| 260 |
-
|
| 261 |
-
history.append(
|
|
|
|
| 262 |
|
| 263 |
except Exception as e:
|
| 264 |
-
history.append(
|
|
|
|
| 265 |
|
|
|
|
| 266 |
return history, ""
|
| 267 |
|
| 268 |
|
|
|
|
| 229 |
# ββ Chat βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 230 |
|
| 231 |
def chat_respond(message: str, history: List) -> Tuple[List, str]:
|
| 232 |
+
"""
|
| 233 |
+
Handles the chat logic using the modern Gradio 'messages' format (list of dicts).
|
| 234 |
+
"""
|
| 235 |
if not _system_ready or _qa is None:
|
| 236 |
+
history.append({"role": "user", "content": message})
|
| 237 |
+
history.append({
|
| 238 |
+
"role": "assistant",
|
| 239 |
+
"content": "β οΈ System not ready. Please upload PDF files or wait for the pre-built graph to finish loading."
|
| 240 |
+
})
|
| 241 |
return history, ""
|
| 242 |
|
| 243 |
try:
|
| 244 |
start = time.time()
|
| 245 |
+
# Perform GraphRAG QA
|
| 246 |
result = _qa.answer(message, top_k=8, use_communities=True)
|
| 247 |
elapsed = time.time() - start
|
| 248 |
|
| 249 |
+
# Construct the response text
|
| 250 |
response_parts = [result.answer]
|
| 251 |
|
| 252 |
if result.sources:
|
| 253 |
response_parts.append("\n\n---\n**π Sources:**")
|
| 254 |
for i, src in enumerate(result.sources[:5], 1):
|
| 255 |
scores = []
|
| 256 |
+
if src.dense_score > 0: scores.append(f"dense={src.dense_score:.2f}")
|
| 257 |
+
if src.sparse_score > 0: scores.append(f"sparse={src.sparse_score:.2f}")
|
| 258 |
+
if src.graph_score > 0: scores.append(f"graph={src.graph_score:.2f}")
|
| 259 |
+
|
|
|
|
|
|
|
| 260 |
score_str = f" ({', '.join(scores)})" if scores else ""
|
| 261 |
response_parts.append(
|
| 262 |
f"{i}. **{src.title[:60]}** β p.{src.page} β score: {src.score:.4f}{score_str}"
|
| 263 |
)
|
| 264 |
|
| 265 |
+
response_parts.append(
|
| 266 |
+
f"\n*Confidence: {result.confidence:.0%} | Time: {elapsed:.2f}s | {', '.join(result.reasoning)}*"
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
full_response = "\n".join(response_parts)
|
| 270 |
|
| 271 |
+
# Append to history using dictionary format
|
| 272 |
+
history.append({"role": "user", "content": message})
|
| 273 |
+
history.append({"role": "assistant", "content": full_response})
|
| 274 |
|
| 275 |
except Exception as e:
|
| 276 |
+
history.append({"role": "user", "content": message})
|
| 277 |
+
history.append({"role": "assistant", "content": f"β Error: {str(e)}"})
|
| 278 |
|
| 279 |
+
# Return updated history and an empty string to clear the input textbox
|
| 280 |
return history, ""
|
| 281 |
|
| 282 |
|