Spaces:
Sleeping
Sleeping
Upload 10 files
Browse files- Dockerfile +35 -0
- app.py +1023 -0
- app_hybrid_rag.log +0 -0
- chat_history.csv +127 -0
- chat_history.db +0 -0
- chunker.py +189 -0
- llm_handling.py +575 -0
- postman_collection.json +348 -0
- requirements.txt +30 -0
- system_prompts.py +66 -0
Dockerfile
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Set the working directory in the container
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install system dependencies
|
| 8 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 9 |
+
libgl1 \
|
| 10 |
+
libglib2.0-0 \
|
| 11 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 12 |
+
|
| 13 |
+
# Copy the requirements file
|
| 14 |
+
COPY requirements.txt requirements.txt
|
| 15 |
+
|
| 16 |
+
# Install Python packages
|
| 17 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 18 |
+
|
| 19 |
+
# Copy application code
|
| 20 |
+
COPY . /app
|
| 21 |
+
|
| 22 |
+
# Create a non-root user
|
| 23 |
+
RUN useradd -m -u 1000 user
|
| 24 |
+
|
| 25 |
+
# Change ownership
|
| 26 |
+
RUN chown -R user:user /app
|
| 27 |
+
|
| 28 |
+
# Switch to the non-root user
|
| 29 |
+
USER user
|
| 30 |
+
|
| 31 |
+
# Expose the port Gunicorn will run on (Using 7860 as in CMD)
|
| 32 |
+
EXPOSE 7860
|
| 33 |
+
|
| 34 |
+
# Command to run the app
|
| 35 |
+
CMD ["python", "app.py", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
|
@@ -0,0 +1,1023 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, request, send_file, abort, jsonify, url_for, render_template, Response
|
| 2 |
+
from flask_cors import CORS
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from sentence_transformers import SentenceTransformer, util
|
| 5 |
+
import torch
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import List, Dict, Tuple, Optional, Any, Iterator
|
| 8 |
+
from collections import deque
|
| 9 |
+
import os
|
| 10 |
+
import logging
|
| 11 |
+
import atexit
|
| 12 |
+
from threading import Thread, Lock
|
| 13 |
+
import time
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
from uuid import uuid4 as generate_uuid
|
| 16 |
+
import csv as csv_lib
|
| 17 |
+
import functools
|
| 18 |
+
import json
|
| 19 |
+
import re
|
| 20 |
+
import subprocess
|
| 21 |
+
import sys
|
| 22 |
+
import sqlite3
|
| 23 |
+
import io
|
| 24 |
+
|
| 25 |
+
from dotenv import load_dotenv
|
| 26 |
+
|
| 27 |
+
# Load environment variables from .env file AT THE VERY TOP
|
| 28 |
+
load_dotenv()
|
| 29 |
+
|
| 30 |
+
# Import RAG system and Fallback LLM from llm_handling AFTER load_dotenv
|
| 31 |
+
from llm_handling import (
|
| 32 |
+
initialize_and_get_rag_system,
|
| 33 |
+
KnowledgeRAG,
|
| 34 |
+
groq_bot_instance,
|
| 35 |
+
RAG_SOURCES_DIR,
|
| 36 |
+
RAG_STORAGE_PARENT_DIR,
|
| 37 |
+
RAG_CHUNKED_SOURCES_FILENAME,
|
| 38 |
+
get_answer_from_context,
|
| 39 |
+
stream_answer_from_context # <-- ADDED IMPORT
|
| 40 |
+
)
|
| 41 |
+
from system_prompts import QA_FORMATTER_PROMPT
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Setup logging (remains global for the app)
|
| 45 |
+
logging.basicConfig(
|
| 46 |
+
level=logging.INFO,
|
| 47 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 48 |
+
handlers=[
|
| 49 |
+
logging.FileHandler("app_hybrid_rag.log"),
|
| 50 |
+
logging.StreamHandler()
|
| 51 |
+
]
|
| 52 |
+
)
|
| 53 |
+
logger = logging.getLogger(__name__) # Main app logger
|
| 54 |
+
|
| 55 |
+
# --- Application Constants and Configuration ---
|
| 56 |
+
ADMIN_USERNAME = os.getenv('FLASK_ADMIN_USERNAME', 'admin')
|
| 57 |
+
ADMIN_PASSWORD = os.getenv('FLASK_ADMIN_PASSWORD', 'admin')
|
| 58 |
+
FLASK_APP_HOST = os.getenv("FLASK_HOST", "0.0.0.0")
|
| 59 |
+
FLASK_APP_PORT = int(os.getenv("FLASK_PORT", "7860"))
|
| 60 |
+
FLASK_DEBUG_MODE = os.getenv("FLASK_DEBUG", "True").lower() == "true"
|
| 61 |
+
_APP_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 62 |
+
TEXT_EXTRACTIONS_DIR = os.path.join(_APP_BASE_DIR, 'text_extractions')
|
| 63 |
+
RELATED_QUESTIONS_TO_SHOW = 10
|
| 64 |
+
QUESTIONS_TO_SEND_TO_GROQ_QA = 3
|
| 65 |
+
# MODIFIED: Lowered the threshold to make it easier to trigger the Excel/CSV logic
|
| 66 |
+
LLM_FORMATTER_CONFIDENCE_THRESHOLD = int(os.getenv("LLM_FORMATTER_CONFIDENCE_THRESHOLD", "65"))
|
| 67 |
+
HIGH_CONFIDENCE_THRESHOLD = 90 # For greetings, which are answered directly.
|
| 68 |
+
CHAT_HISTORY_TO_SEND = int(os.getenv("CHAT_HISTORY_TO_SEND", "5"))
|
| 69 |
+
CHAT_LOG_FILE = os.path.join(_APP_BASE_DIR, 'chat_history.csv')
|
| 70 |
+
|
| 71 |
+
rag_system: Optional[KnowledgeRAG] = None
|
| 72 |
+
|
| 73 |
+
# --- Persistent Chat History Management using SQLite ---
|
| 74 |
+
class ChatHistoryManager:
|
| 75 |
+
def __init__(self, db_path):
|
| 76 |
+
self.db_path = db_path
|
| 77 |
+
self.lock = Lock()
|
| 78 |
+
self._create_table()
|
| 79 |
+
logger.info(f"SQLite chat history manager initialized at: {self.db_path}")
|
| 80 |
+
|
| 81 |
+
def _get_connection(self):
|
| 82 |
+
conn = sqlite3.connect(self.db_path, timeout=10)
|
| 83 |
+
return conn
|
| 84 |
+
|
| 85 |
+
def _create_table(self):
|
| 86 |
+
with self.lock:
|
| 87 |
+
with self._get_connection() as conn:
|
| 88 |
+
cursor = conn.cursor()
|
| 89 |
+
cursor.execute("""
|
| 90 |
+
CREATE TABLE IF NOT EXISTS chat_histories (
|
| 91 |
+
session_id TEXT PRIMARY KEY,
|
| 92 |
+
history TEXT NOT NULL
|
| 93 |
+
)
|
| 94 |
+
""")
|
| 95 |
+
conn.commit()
|
| 96 |
+
|
| 97 |
+
def get_history(self, session_id: str, limit_turns: int = 5) -> list:
|
| 98 |
+
try:
|
| 99 |
+
with self._get_connection() as conn:
|
| 100 |
+
cursor = conn.cursor()
|
| 101 |
+
cursor.execute("SELECT history FROM chat_histories WHERE session_id = ?", (session_id,))
|
| 102 |
+
row = cursor.fetchone()
|
| 103 |
+
if row:
|
| 104 |
+
history_list = json.loads(row[0])
|
| 105 |
+
return history_list[-(limit_turns * 2):]
|
| 106 |
+
else:
|
| 107 |
+
return []
|
| 108 |
+
except Exception as e:
|
| 109 |
+
logger.error(f"Error fetching history for session {session_id}: {e}", exc_info=True)
|
| 110 |
+
return []
|
| 111 |
+
|
| 112 |
+
def update_history(self, session_id: str, query: str, answer: str):
|
| 113 |
+
with self.lock:
|
| 114 |
+
try:
|
| 115 |
+
with self._get_connection() as conn:
|
| 116 |
+
cursor = conn.cursor()
|
| 117 |
+
cursor.execute("SELECT history FROM chat_histories WHERE session_id = ?", (session_id,))
|
| 118 |
+
row = cursor.fetchone()
|
| 119 |
+
|
| 120 |
+
history = json.loads(row[0]) if row else []
|
| 121 |
+
|
| 122 |
+
history.append({'role': 'user', 'content': query})
|
| 123 |
+
history.append({'role': 'assistant', 'content': answer})
|
| 124 |
+
|
| 125 |
+
updated_history_json = json.dumps(history)
|
| 126 |
+
|
| 127 |
+
cursor.execute("""
|
| 128 |
+
INSERT OR REPLACE INTO chat_histories (session_id, history)
|
| 129 |
+
VALUES (?, ?)
|
| 130 |
+
""", (session_id, updated_history_json))
|
| 131 |
+
conn.commit()
|
| 132 |
+
except Exception as e:
|
| 133 |
+
logger.error(f"Error updating history for session {session_id}: {e}", exc_info=True)
|
| 134 |
+
|
| 135 |
+
def clear_history(self, session_id: str):
|
| 136 |
+
with self.lock:
|
| 137 |
+
try:
|
| 138 |
+
with self._get_connection() as conn:
|
| 139 |
+
cursor = conn.cursor()
|
| 140 |
+
cursor.execute("""
|
| 141 |
+
INSERT OR REPLACE INTO chat_histories (session_id, history)
|
| 142 |
+
VALUES (?, ?)
|
| 143 |
+
""", (session_id, json.dumps([])))
|
| 144 |
+
conn.commit()
|
| 145 |
+
logger.info(f"Chat history cleared for session: {session_id}")
|
| 146 |
+
except Exception as e:
|
| 147 |
+
logger.error(f"Error clearing history for session {session_id}: {e}", exc_info=True)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
# --- EmbeddingManager for CSV QA (remains in app.py) ---
|
| 151 |
+
@dataclass
|
| 152 |
+
class QAEmbeddings:
|
| 153 |
+
questions: List[str]
|
| 154 |
+
question_map: List[int]
|
| 155 |
+
embeddings: torch.Tensor
|
| 156 |
+
df_qa: pd.DataFrame
|
| 157 |
+
original_questions: List[str]
|
| 158 |
+
|
| 159 |
+
class EmbeddingManager:
|
| 160 |
+
def __init__(self, model_name='all-MiniLM-L6-v2'):
|
| 161 |
+
self.model = SentenceTransformer(model_name)
|
| 162 |
+
self.embeddings = {
|
| 163 |
+
'general': None,
|
| 164 |
+
'personal': None,
|
| 165 |
+
'greetings': None
|
| 166 |
+
}
|
| 167 |
+
logger.info(f"EmbeddingManager initialized with model: {model_name}")
|
| 168 |
+
|
| 169 |
+
def _process_questions(self, df: pd.DataFrame) -> Tuple[List[str], List[int], List[str]]:
|
| 170 |
+
questions = []
|
| 171 |
+
question_map = []
|
| 172 |
+
original_questions = []
|
| 173 |
+
|
| 174 |
+
if 'Question' not in df.columns:
|
| 175 |
+
logger.warning(f"DataFrame for EmbeddingManager is missing 'Question' column. Cannot process questions from it.")
|
| 176 |
+
return questions, question_map, original_questions
|
| 177 |
+
|
| 178 |
+
for idx, question_text_raw in enumerate(df['Question']):
|
| 179 |
+
if pd.isna(question_text_raw):
|
| 180 |
+
continue
|
| 181 |
+
question_text_cleaned = str(question_text_raw).strip()
|
| 182 |
+
if not question_text_cleaned or question_text_cleaned.lower() == "nan":
|
| 183 |
+
continue
|
| 184 |
+
|
| 185 |
+
questions.append(question_text_cleaned)
|
| 186 |
+
question_map.append(idx)
|
| 187 |
+
original_questions.append(question_text_cleaned)
|
| 188 |
+
|
| 189 |
+
return questions, question_map, original_questions
|
| 190 |
+
|
| 191 |
+
def update_embeddings(self, general_qa: pd.DataFrame, personal_qa: pd.DataFrame, greetings_qa: pd.DataFrame):
|
| 192 |
+
gen_questions, gen_question_map, gen_original_questions = self._process_questions(general_qa)
|
| 193 |
+
gen_embeddings = self.model.encode(gen_questions, convert_to_tensor=True, show_progress_bar=False) if gen_questions else None
|
| 194 |
+
|
| 195 |
+
pers_questions, pers_question_map, pers_original_questions = self._process_questions(personal_qa)
|
| 196 |
+
pers_embeddings = self.model.encode(pers_questions, convert_to_tensor=True, show_progress_bar=False) if pers_questions else None
|
| 197 |
+
|
| 198 |
+
greet_questions, greet_question_map, greet_original_questions = self._process_questions(greetings_qa)
|
| 199 |
+
greet_embeddings = self.model.encode(greet_questions, convert_to_tensor=True, show_progress_bar=False) if greet_questions else None
|
| 200 |
+
|
| 201 |
+
self.embeddings['general'] = QAEmbeddings(
|
| 202 |
+
questions=gen_questions, question_map=gen_question_map, embeddings=gen_embeddings,
|
| 203 |
+
df_qa=general_qa, original_questions=gen_original_questions
|
| 204 |
+
)
|
| 205 |
+
self.embeddings['personal'] = QAEmbeddings(
|
| 206 |
+
questions=pers_questions, question_map=pers_question_map, embeddings=pers_embeddings,
|
| 207 |
+
df_qa=personal_qa, original_questions=pers_original_questions
|
| 208 |
+
)
|
| 209 |
+
self.embeddings['greetings'] = QAEmbeddings(
|
| 210 |
+
questions=greet_questions, question_map=greet_question_map, embeddings=greet_embeddings,
|
| 211 |
+
df_qa=greetings_qa, original_questions=greet_original_questions
|
| 212 |
+
)
|
| 213 |
+
logger.info("CSV QA embeddings updated in EmbeddingManager.")
|
| 214 |
+
|
| 215 |
+
def find_best_answers(self, user_query: str, qa_type: str, top_n: int = 5) -> Tuple[List[float], List[str], List[str], List[str], List[int]]:
|
| 216 |
+
qa_data = self.embeddings[qa_type]
|
| 217 |
+
if qa_data is None or qa_data.embeddings is None or len(qa_data.embeddings) == 0:
|
| 218 |
+
return [], [], [], [], []
|
| 219 |
+
|
| 220 |
+
query_embedding_tensor = self.model.encode([user_query], convert_to_tensor=True, show_progress_bar=False)
|
| 221 |
+
if not isinstance(qa_data.embeddings, torch.Tensor):
|
| 222 |
+
qa_data.embeddings = torch.tensor(qa_data.embeddings) # Safeguard
|
| 223 |
+
|
| 224 |
+
cos_scores = util.cos_sim(query_embedding_tensor, qa_data.embeddings)[0]
|
| 225 |
+
|
| 226 |
+
top_k = min(top_n, len(cos_scores))
|
| 227 |
+
if top_k == 0:
|
| 228 |
+
return [], [], [], [], []
|
| 229 |
+
|
| 230 |
+
top_scores_tensor, indices_tensor = torch.topk(cos_scores, k=top_k)
|
| 231 |
+
|
| 232 |
+
top_confidences = [score.item() * 100 for score in top_scores_tensor]
|
| 233 |
+
top_indices_mapped = []
|
| 234 |
+
top_questions = []
|
| 235 |
+
|
| 236 |
+
for idx_tensor in indices_tensor:
|
| 237 |
+
item_idx = idx_tensor.item()
|
| 238 |
+
if item_idx < len(qa_data.question_map) and item_idx < len(qa_data.original_questions):
|
| 239 |
+
original_df_idx = qa_data.question_map[item_idx]
|
| 240 |
+
if original_df_idx < len(qa_data.df_qa):
|
| 241 |
+
top_indices_mapped.append(original_df_idx)
|
| 242 |
+
top_questions.append(qa_data.original_questions[item_idx])
|
| 243 |
+
else:
|
| 244 |
+
logger.warning(f"Index out of bounds: original_df_idx {original_df_idx} for df_qa length {len(qa_data.df_qa)}")
|
| 245 |
+
else:
|
| 246 |
+
logger.warning(f"Index out of bounds: item_idx {item_idx} for question_map/original_questions")
|
| 247 |
+
|
| 248 |
+
valid_count = len(top_indices_mapped)
|
| 249 |
+
top_confidences = top_confidences[:valid_count]
|
| 250 |
+
top_questions = top_questions[:valid_count]
|
| 251 |
+
|
| 252 |
+
if 'Respuesta' in qa_data.df_qa.columns:
|
| 253 |
+
answer_col = 'Respuesta'
|
| 254 |
+
elif 'Answer' in qa_data.df_qa.columns:
|
| 255 |
+
answer_col = 'Answer'
|
| 256 |
+
else:
|
| 257 |
+
answer_col = None
|
| 258 |
+
|
| 259 |
+
if answer_col:
|
| 260 |
+
top_answers = [str(qa_data.df_qa[answer_col].iloc[i]) for i in top_indices_mapped]
|
| 261 |
+
else:
|
| 262 |
+
top_answers = [str(qa_data.df_qa['Question'].iloc[i]) for i in top_indices_mapped]
|
| 263 |
+
|
| 264 |
+
top_images = [str(qa_data.df_qa['Image'].iloc[i]) if 'Image' in qa_data.df_qa.columns and pd.notna(qa_data.df_qa['Image'].iloc[i]) else None for i in top_indices_mapped]
|
| 265 |
+
|
| 266 |
+
return top_confidences, top_questions, top_answers, top_images, top_indices_mapped
|
| 267 |
+
|
| 268 |
+
# --- DatabaseMonitor for personal_qa.csv placeholders (remains in app.py) ---
|
| 269 |
+
class DatabaseMonitor:
|
| 270 |
+
def __init__(self, database_path):
|
| 271 |
+
self.logger = logging.getLogger(__name__ + ".DatabaseMonitor")
|
| 272 |
+
self.database_path = database_path
|
| 273 |
+
self.last_modified = None
|
| 274 |
+
self.last_size = None
|
| 275 |
+
self.df = None
|
| 276 |
+
self.lock = Lock()
|
| 277 |
+
self.running = True
|
| 278 |
+
self._load_database()
|
| 279 |
+
self.monitor_thread = Thread(target=self._monitor_database, daemon=True)
|
| 280 |
+
self.monitor_thread.start()
|
| 281 |
+
self.logger.info(f"DatabaseMonitor initialized for: {database_path}")
|
| 282 |
+
|
| 283 |
+
def _load_database(self):
|
| 284 |
+
try:
|
| 285 |
+
if not os.path.exists(self.database_path):
|
| 286 |
+
self.logger.warning(f"Personal data file not found: {self.database_path}.")
|
| 287 |
+
self.df = None
|
| 288 |
+
return
|
| 289 |
+
with self.lock:
|
| 290 |
+
self.df = pd.read_csv(self.database_path, encoding='cp1252')
|
| 291 |
+
self.last_modified = os.path.getmtime(self.database_path)
|
| 292 |
+
self.last_size = os.path.getsize(self.database_path)
|
| 293 |
+
self.logger.info(f"Personal data file reloaded: {self.database_path}")
|
| 294 |
+
except Exception as e:
|
| 295 |
+
self.logger.error(f"Error loading personal data file '{self.database_path}': {e}", exc_info=True)
|
| 296 |
+
self.df = None
|
| 297 |
+
|
| 298 |
+
def _monitor_database(self):
|
| 299 |
+
while self.running:
|
| 300 |
+
try:
|
| 301 |
+
if not os.path.exists(self.database_path):
|
| 302 |
+
if self.df is not None:
|
| 303 |
+
self.logger.warning(f"Personal data file disappeared: {self.database_path}")
|
| 304 |
+
self.df = None; self.last_modified = None; self.last_size = None
|
| 305 |
+
time.sleep(5)
|
| 306 |
+
continue
|
| 307 |
+
current_modified = os.path.getmtime(self.database_path); current_size = os.path.getsize(self.database_path)
|
| 308 |
+
if (self.last_modified is None or current_modified != self.last_modified or
|
| 309 |
+
self.last_size is None or current_size != self.last_size):
|
| 310 |
+
self.logger.info("Personal data file change detected.")
|
| 311 |
+
self._load_database()
|
| 312 |
+
time.sleep(1)
|
| 313 |
+
except Exception as e:
|
| 314 |
+
self.logger.error(f"Error monitoring personal data file: {e}", exc_info=True)
|
| 315 |
+
time.sleep(5)
|
| 316 |
+
|
| 317 |
+
def get_data(self, user_id):
|
| 318 |
+
with self.lock:
|
| 319 |
+
if self.df is not None and user_id:
|
| 320 |
+
try:
|
| 321 |
+
if 'id' not in self.df.columns:
|
| 322 |
+
self.logger.warning("'id' column not found in personal_data.csv")
|
| 323 |
+
return None
|
| 324 |
+
id_col_type = self.df['id'].dtype
|
| 325 |
+
target_user_id = user_id
|
| 326 |
+
if pd.api.types.is_numeric_dtype(id_col_type):
|
| 327 |
+
try:
|
| 328 |
+
if user_id is None: return None
|
| 329 |
+
valid_ids = self.df['id'].dropna()
|
| 330 |
+
if not valid_ids.empty:
|
| 331 |
+
target_user_id = type(valid_ids.iloc[0])(user_id)
|
| 332 |
+
else:
|
| 333 |
+
target_user_id = int(user_id)
|
| 334 |
+
except (ValueError, TypeError):
|
| 335 |
+
self.logger.warning(f"Could not convert user_id '{user_id}' to numeric type {id_col_type}")
|
| 336 |
+
return None
|
| 337 |
+
user_data = self.df[self.df['id'] == target_user_id]
|
| 338 |
+
if not user_data.empty: return user_data.iloc[0].to_dict()
|
| 339 |
+
except Exception as e:
|
| 340 |
+
self.logger.error(f"Error retrieving data for user_id {user_id}: {e}", exc_info=True)
|
| 341 |
+
return None
|
| 342 |
+
|
| 343 |
+
def stop(self):
|
| 344 |
+
self.running = False
|
| 345 |
+
if hasattr(self, 'monitor_thread') and self.monitor_thread.is_alive():
|
| 346 |
+
self.monitor_thread.join(timeout=5)
|
| 347 |
+
self.logger.info("DatabaseMonitor stopped.")
|
| 348 |
+
|
| 349 |
+
# --- Flask App Initialization ---
|
| 350 |
+
app = Flask(__name__)
|
| 351 |
+
CORS(app, resources={r"/*": {"origins": "*"}}, supports_credentials=True)
|
| 352 |
+
|
| 353 |
+
# --- Initialize Managers ---
|
| 354 |
+
embedding_manager = EmbeddingManager()
|
| 355 |
+
history_manager = ChatHistoryManager('chat_history.db')
|
| 356 |
+
database_csv_path = os.path.join(RAG_SOURCES_DIR, 'database.csv')
|
| 357 |
+
personal_data_monitor = DatabaseMonitor(database_csv_path)
|
| 358 |
+
|
| 359 |
+
# --- Helper Functions (App specific) ---
|
| 360 |
+
def clean_html_from_text(text: str) -> str:
|
| 361 |
+
"""Removes HTML tags from a string using a simple regex."""
|
| 362 |
+
if not isinstance(text, str):
|
| 363 |
+
return text
|
| 364 |
+
clean_text = re.sub(r'<[^>]+>', '', text)
|
| 365 |
+
return clean_text.strip()
|
| 366 |
+
|
| 367 |
+
def normalize_text(text):
|
| 368 |
+
if isinstance(text, str):
|
| 369 |
+
replacements = {
|
| 370 |
+
'\x91': "'", '\x92': "'", '\x93': '"', '\x94': '"',
|
| 371 |
+
'\x96': '-', '\x97': '-', '\x85': '...', '\x95': '-',
|
| 372 |
+
'"': '"', '"': '"', '‘': "'", '’': "'",
|
| 373 |
+
'–': '-', '—': '-', '…': '...', '•': '-',
|
| 374 |
+
}
|
| 375 |
+
for old, new in replacements.items(): text = text.replace(old, new)
|
| 376 |
+
return text
|
| 377 |
+
|
| 378 |
+
def require_admin_auth(f):
|
| 379 |
+
@functools.wraps(f)
|
| 380 |
+
def decorated(*args, **kwargs):
|
| 381 |
+
auth = request.authorization
|
| 382 |
+
if not auth or auth.username != ADMIN_USERNAME or auth.password != ADMIN_PASSWORD:
|
| 383 |
+
return Response('Admin auth failed.', 401, {'WWW-Authenticate': 'Basic realm="Admin Login Required"'})
|
| 384 |
+
return f(*args, **kwargs)
|
| 385 |
+
return decorated
|
| 386 |
+
|
| 387 |
+
def initialize_chat_log():
|
| 388 |
+
if not os.path.exists(CHAT_LOG_FILE):
|
| 389 |
+
with open(CHAT_LOG_FILE, 'w', newline='', encoding='utf-8') as f:
|
| 390 |
+
writer = csv_lib.writer(f)
|
| 391 |
+
writer.writerow(['sl', 'date_time', 'session_id', 'user_id', 'query', 'answer'])
|
| 392 |
+
|
| 393 |
+
def store_chat_history(sid: str, uid: Optional[str], query: str, resp: Dict[str, Any]):
|
| 394 |
+
try:
|
| 395 |
+
answer = str(resp.get('answer', ''))
|
| 396 |
+
history_manager.update_history(sid, query, answer)
|
| 397 |
+
|
| 398 |
+
initialize_chat_log()
|
| 399 |
+
next_sl = 1
|
| 400 |
+
try:
|
| 401 |
+
if os.path.exists(CHAT_LOG_FILE) and os.path.getsize(CHAT_LOG_FILE) > 0:
|
| 402 |
+
df_log = pd.read_csv(CHAT_LOG_FILE, on_bad_lines='skip')
|
| 403 |
+
if not df_log.empty and 'sl' in df_log.columns and pd.api.types.is_numeric_dtype(df_log['sl'].dropna()):
|
| 404 |
+
if not df_log['sl'].dropna().empty:
|
| 405 |
+
next_sl = int(df_log['sl'].dropna().max()) + 1
|
| 406 |
+
except Exception as e:
|
| 407 |
+
logger.error(f"Error reading SL from {CHAT_LOG_FILE}: {e}", exc_info=True)
|
| 408 |
+
|
| 409 |
+
with open(CHAT_LOG_FILE, 'a', newline='', encoding='utf-8') as f:
|
| 410 |
+
csv_lib.writer(f).writerow([next_sl, datetime.now().strftime('%Y-%m-%d %H:%M:%S'), sid, uid or "N/A", query, answer])
|
| 411 |
+
|
| 412 |
+
except Exception as e:
|
| 413 |
+
logger.error(f"Error in store_chat_history for session {sid}: {e}", exc_info=True)
|
| 414 |
+
|
| 415 |
+
def get_formatted_chat_history(session_id: str) -> List[Dict[str, str]]:
|
| 416 |
+
if not session_id:
|
| 417 |
+
return []
|
| 418 |
+
return history_manager.get_history(session_id, limit_turns=CHAT_HISTORY_TO_SEND)
|
| 419 |
+
|
| 420 |
+
def get_qa_context_for_groq(all_questions: List[Dict]) -> str:
|
| 421 |
+
valid_qa_pairs = []
|
| 422 |
+
non_greeting_questions = [q for q in all_questions if q.get('source_type') != 'greetings']
|
| 423 |
+
sorted_questions = sorted(non_greeting_questions, key=lambda x: x.get('confidence', 0), reverse=True)
|
| 424 |
+
|
| 425 |
+
for qa in sorted_questions[:QUESTIONS_TO_SEND_TO_GROQ_QA]:
|
| 426 |
+
answer = qa.get('answer')
|
| 427 |
+
if (not pd.isna(answer) and isinstance(answer, str) and answer.strip() and
|
| 428 |
+
"not available" not in answer.lower()):
|
| 429 |
+
valid_qa_pairs.append(f"Q: {qa.get('question')}\nA: {answer}")
|
| 430 |
+
return '\n'.join(valid_qa_pairs)
|
| 431 |
+
|
| 432 |
+
def replace_placeholders_in_answer(answer, db_data):
|
| 433 |
+
if pd.isna(answer) or str(answer).strip() == '':
|
| 434 |
+
return "Sorry, this information is not available yet"
|
| 435 |
+
answer_str = str(answer)
|
| 436 |
+
placeholders = re.findall(r'\{(\w+)\}', answer_str)
|
| 437 |
+
if not placeholders: return answer_str
|
| 438 |
+
if db_data is None:
|
| 439 |
+
return "To get this specific information, please ensure you are logged in or have provided your user ID."
|
| 440 |
+
missing_count = 0; replacements_made = 0
|
| 441 |
+
for placeholder in set(placeholders):
|
| 442 |
+
key = placeholder.strip()
|
| 443 |
+
value = db_data.get(key)
|
| 444 |
+
if value is None or (isinstance(value, float) and pd.isna(value)) or str(value).strip() == '':
|
| 445 |
+
answer_str = answer_str.replace(f'{{{key}}}', "not available")
|
| 446 |
+
missing_count += 1
|
| 447 |
+
else:
|
| 448 |
+
answer_str = answer_str.replace(f'{{{key}}}', str(value))
|
| 449 |
+
replacements_made +=1
|
| 450 |
+
if missing_count == len(placeholders) and len(placeholders) > 0 :
|
| 451 |
+
return "Sorry, some specific details for you are not available at the moment."
|
| 452 |
+
if "not available" in answer_str.lower() and replacements_made < len(placeholders):
|
| 453 |
+
if answer_str == "not available" and len(placeholders) == 1:
|
| 454 |
+
return "Sorry, this information is not available yet."
|
| 455 |
+
if re.search(r'\{(\w+)\}', answer_str):
|
| 456 |
+
logger.warning(f"Unresolved placeholders remain after replacement attempt: {answer_str}")
|
| 457 |
+
answer_str = re.sub(r'\{(\w+)\}', "a specific detail", answer_str)
|
| 458 |
+
if "a specific detail" in answer_str and not "Sorry" in answer_str:
|
| 459 |
+
return "Sorry, I couldn't retrieve all the specific details for this answer. " + answer_str
|
| 460 |
+
return "Sorry, I couldn't retrieve all the specific details for this answer. Some information has been generalized."
|
| 461 |
+
return answer_str
|
| 462 |
+
|
| 463 |
+
# --- Non-Streaming Logic ---
|
| 464 |
+
def get_hybrid_response_logic_non_streaming(user_query: str, session_id: str, user_id: Optional[str], chat_history: Optional[List[Dict]] = None) -> Dict[str, Any]:
|
| 465 |
+
global rag_system
|
| 466 |
+
|
| 467 |
+
if not user_query: return {'error': 'No query provided'}
|
| 468 |
+
if not session_id: return {'error': 'session_id is required'}
|
| 469 |
+
|
| 470 |
+
personal_db_data = personal_data_monitor.get_data(user_id) if user_id else None
|
| 471 |
+
|
| 472 |
+
# Get candidates from QA files
|
| 473 |
+
conf_greet, q_greet, a_greet, img_greet, idx_greet = embedding_manager.find_best_answers(user_query, 'greetings', top_n=1)
|
| 474 |
+
conf_pers, q_pers, a_pers, img_pers, idx_pers = embedding_manager.find_best_answers(user_query, 'personal', top_n=5)
|
| 475 |
+
conf_gen, q_gen, a_gen, img_gen, idx_gen = embedding_manager.find_best_answers(user_query, 'general', top_n=5)
|
| 476 |
+
|
| 477 |
+
# Handle greetings separately with a high confidence check
|
| 478 |
+
if conf_greet and conf_greet[0] >= HIGH_CONFIDENCE_THRESHOLD:
|
| 479 |
+
response_data = {
|
| 480 |
+
'query': user_query, 'answer': a_greet[0],
|
| 481 |
+
'confidence': conf_greet[0],
|
| 482 |
+
'original_question': q_greet[0],
|
| 483 |
+
'source': 'greetings_qa'
|
| 484 |
+
}
|
| 485 |
+
if img_greet and img_greet[0]:
|
| 486 |
+
response_data['image_url'] = url_for('static', filename=img_greet[0], _external=True)
|
| 487 |
+
store_chat_history(session_id, user_id, user_query, response_data)
|
| 488 |
+
return response_data
|
| 489 |
+
|
| 490 |
+
# --- MODIFIED LOGIC ---
|
| 491 |
+
# Combine general and personal candidates and send top 5 to LLM, regardless of confidence
|
| 492 |
+
all_qa_candidates = []
|
| 493 |
+
if conf_pers:
|
| 494 |
+
for c, q, a, img, idx in zip(conf_pers, q_pers, a_pers, img_pers, idx_pers):
|
| 495 |
+
processed_a = replace_placeholders_in_answer(a, personal_db_data)
|
| 496 |
+
if not ("Sorry, this information is not available yet" in processed_a or "To get this specific information" in processed_a):
|
| 497 |
+
all_qa_candidates.append({'question': q, 'answer': processed_a, 'image': img, 'confidence': c, 'source_type': 'personal', 'original_index': idx})
|
| 498 |
+
if conf_gen:
|
| 499 |
+
for c, q, a, img, idx in zip(conf_gen, q_gen, a_gen, img_gen, idx_gen):
|
| 500 |
+
if not (pd.isna(a) or str(a).strip() == '' or str(a).lower() == 'nan'):
|
| 501 |
+
all_qa_candidates.append({'question': str(a), 'answer': str(a), 'image': img, 'confidence': c, 'source_type': 'general', 'original_index': idx})
|
| 502 |
+
|
| 503 |
+
all_qa_candidates.sort(key=lambda x: x['confidence'], reverse=True)
|
| 504 |
+
top_5_candidates = all_qa_candidates[:5]
|
| 505 |
+
|
| 506 |
+
if top_5_candidates:
|
| 507 |
+
logger.info(f"Found {len(top_5_candidates)} relevant rows from CSV/XLSX. Sending to LLM for formatting.")
|
| 508 |
+
|
| 509 |
+
context_chunks = []
|
| 510 |
+
for candidate in top_5_candidates:
|
| 511 |
+
source_type = candidate['source_type']
|
| 512 |
+
original_df = embedding_manager.embeddings[source_type].df_qa
|
| 513 |
+
matched_row_data = original_df.iloc[candidate['original_index']]
|
| 514 |
+
|
| 515 |
+
row_dict = matched_row_data.to_dict()
|
| 516 |
+
row_context_str = "\n".join([f"- {key}: {value}" for key, value in row_dict.items() if pd.notna(value) and str(value).strip() != ''])
|
| 517 |
+
context_chunks.append(f"Matching Row {len(context_chunks) + 1} (From: {source_type} source file):\n{row_context_str}")
|
| 518 |
+
|
| 519 |
+
full_context = "\n\n---\n\n".join(context_chunks)
|
| 520 |
+
|
| 521 |
+
final_answer = get_answer_from_context(
|
| 522 |
+
question=user_query,
|
| 523 |
+
context=full_context,
|
| 524 |
+
system_prompt=QA_FORMATTER_PROMPT
|
| 525 |
+
)
|
| 526 |
+
|
| 527 |
+
response_data = {
|
| 528 |
+
'query': user_query,
|
| 529 |
+
'answer': final_answer,
|
| 530 |
+
'confidence': top_5_candidates[0]['confidence'],
|
| 531 |
+
'original_question': top_5_candidates[0]['question'],
|
| 532 |
+
'source': 'xlsx_qa_llm_formatted'
|
| 533 |
+
}
|
| 534 |
+
if top_5_candidates[0].get('image'):
|
| 535 |
+
response_data['image_url'] = url_for('static', filename=top_5_candidates[0]['image'], _external=True)
|
| 536 |
+
|
| 537 |
+
related_questions_list = [{'question': c['question'], 'answer': c['answer'], 'match': c['confidence']} for c in all_qa_candidates[1:RELATED_QUESTIONS_TO_SHOW+1] if c['source_type'] != 'greetings']
|
| 538 |
+
response_data['related_questions'] = related_questions_list
|
| 539 |
+
|
| 540 |
+
store_chat_history(session_id, user_id, user_query, response_data)
|
| 541 |
+
return response_data
|
| 542 |
+
# --- END OF MODIFIED LOGIC ---
|
| 543 |
+
|
| 544 |
+
# Fallback to RAG if no QA candidates were found
|
| 545 |
+
if rag_system and rag_system.retriever:
|
| 546 |
+
logger.info(f"Attempting FAISS RAG query for: {user_query[:50]}...")
|
| 547 |
+
rag_result = rag_system.invoke(user_query)
|
| 548 |
+
rag_answer = rag_result.get("answer")
|
| 549 |
+
|
| 550 |
+
if rag_answer and "the provided bibliography does not contain specific information" not in rag_answer.lower():
|
| 551 |
+
logger.info(f"FAISS RAG system provided a valid answer: {rag_answer[:100]}...")
|
| 552 |
+
response_data = {
|
| 553 |
+
'query': user_query, 'answer': rag_answer, 'confidence': 85,
|
| 554 |
+
'source': 'document_rag_faiss', 'related_questions': [],
|
| 555 |
+
'document_sources_details': rag_result.get("cited_source_details")
|
| 556 |
+
}
|
| 557 |
+
store_chat_history(session_id, user_id, user_query, response_data)
|
| 558 |
+
return response_data
|
| 559 |
+
|
| 560 |
+
# Final fallback to general Groq model
|
| 561 |
+
logger.info(f"No high-confidence answer. Using Groq fallback.")
|
| 562 |
+
chat_history_messages_for_groq = chat_history if chat_history is not None else get_formatted_chat_history(session_id)
|
| 563 |
+
groq_context = {'current_query': user_query, 'chat_history': chat_history_messages_for_groq, 'qa_related_info': ""}
|
| 564 |
+
groq_stream = groq_bot_instance.stream_response(groq_context)
|
| 565 |
+
groq_answer = "".join([chunk for chunk in groq_stream])
|
| 566 |
+
|
| 567 |
+
response_data = {'query': user_query, 'answer': groq_answer, 'confidence': 75, 'source': 'groq_general_fallback', 'related_questions': []}
|
| 568 |
+
store_chat_history(session_id, user_id, user_query, response_data)
|
| 569 |
+
return response_data
|
| 570 |
+
|
| 571 |
+
# --- Streaming Logic ---
|
| 572 |
+
def generate_streaming_response(user_query: str, session_id: str, user_id: Optional[str], chat_history: Optional[List[Dict]] = None) -> Iterator[str]:
|
| 573 |
+
global rag_system
|
| 574 |
+
|
| 575 |
+
personal_db_data = personal_data_monitor.get_data(user_id) if user_id else None
|
| 576 |
+
|
| 577 |
+
# Get candidates from QA files
|
| 578 |
+
conf_greet, _, a_greet, _, _ = embedding_manager.find_best_answers(user_query, 'greetings', top_n=1)
|
| 579 |
+
conf_pers, q_pers, a_pers, img_pers, idx_pers = embedding_manager.find_best_answers(user_query, 'personal', top_n=5)
|
| 580 |
+
conf_gen, q_gen, a_gen, img_gen, idx_gen = embedding_manager.find_best_answers(user_query, 'general', top_n=5)
|
| 581 |
+
|
| 582 |
+
# Handle greetings separately
|
| 583 |
+
if conf_greet and conf_greet[0] >= HIGH_CONFIDENCE_THRESHOLD:
|
| 584 |
+
yield a_greet[0]
|
| 585 |
+
return
|
| 586 |
+
|
| 587 |
+
# --- MODIFIED LOGIC ---
|
| 588 |
+
# Combine general and personal candidates and stream a formatted response
|
| 589 |
+
all_qa_candidates = []
|
| 590 |
+
if conf_pers:
|
| 591 |
+
for c, q, a, img, idx in zip(conf_pers, q_pers, a_pers, img_pers, idx_pers):
|
| 592 |
+
processed_a = replace_placeholders_in_answer(a, personal_db_data)
|
| 593 |
+
if not ("Sorry, this information is not available yet" in processed_a or "To get this specific information" in processed_a):
|
| 594 |
+
all_qa_candidates.append({'question': q, 'answer': processed_a, 'image': img, 'confidence': c, 'source_type': 'personal', 'original_index': idx})
|
| 595 |
+
if conf_gen:
|
| 596 |
+
for c, q, a, img, idx in zip(conf_gen, q_gen, a_gen, img_gen, idx_gen):
|
| 597 |
+
if not (pd.isna(a) or str(a).strip() == '' or str(a).lower() == 'nan'):
|
| 598 |
+
all_qa_candidates.append({'question': str(a), 'answer': str(a), 'image': img, 'confidence': c, 'source_type': 'general', 'original_index': idx})
|
| 599 |
+
|
| 600 |
+
all_qa_candidates.sort(key=lambda x: x['confidence'], reverse=True)
|
| 601 |
+
top_5_candidates = all_qa_candidates[:5]
|
| 602 |
+
|
| 603 |
+
if top_5_candidates:
|
| 604 |
+
logger.info(f"Found {len(top_5_candidates)} relevant CSV/XLSX rows. Streaming formatted answer.")
|
| 605 |
+
|
| 606 |
+
context_chunks = []
|
| 607 |
+
for candidate in top_5_candidates:
|
| 608 |
+
source_type = candidate['source_type']
|
| 609 |
+
original_df = embedding_manager.embeddings[source_type].df_qa
|
| 610 |
+
matched_row_data = original_df.iloc[candidate['original_index']]
|
| 611 |
+
|
| 612 |
+
row_dict = matched_row_data.to_dict()
|
| 613 |
+
row_context_str = "\n".join([f"- {key}: {value}" for key, value in row_dict.items() if pd.notna(value) and str(value).strip() != ''])
|
| 614 |
+
context_chunks.append(f"Matching Row {len(context_chunks) + 1} (From: {source_type} source file):\n{row_context_str}")
|
| 615 |
+
|
| 616 |
+
full_context = "\n\n---\n\n".join(context_chunks)
|
| 617 |
+
|
| 618 |
+
yield from stream_answer_from_context(
|
| 619 |
+
question=user_query,
|
| 620 |
+
context=full_context,
|
| 621 |
+
system_prompt=QA_FORMATTER_PROMPT
|
| 622 |
+
)
|
| 623 |
+
return
|
| 624 |
+
# --- END OF MODIFIED LOGIC ---
|
| 625 |
+
|
| 626 |
+
# Fallback to RAG if no QA candidates were found
|
| 627 |
+
if rag_system and rag_system.retriever:
|
| 628 |
+
logger.info(f"Attempting to stream from FAISS RAG for: {user_query[:50]}...")
|
| 629 |
+
rag_stream = rag_system.stream(user_query)
|
| 630 |
+
first_chunk = next(rag_stream, None)
|
| 631 |
+
|
| 632 |
+
if first_chunk and "the provided bibliography does not contain specific information" not in first_chunk.lower():
|
| 633 |
+
logger.info("FAISS RAG streaming valid answer...")
|
| 634 |
+
yield first_chunk
|
| 635 |
+
yield from rag_stream
|
| 636 |
+
return
|
| 637 |
+
|
| 638 |
+
# Final fallback to general Groq model
|
| 639 |
+
logger.info(f"No high-confidence CSV or RAG answer. Streaming from Groq fallback.")
|
| 640 |
+
chat_history_messages_for_groq = chat_history if chat_history is not None else get_formatted_chat_history(session_id)
|
| 641 |
+
groq_context = {'current_query': user_query, 'chat_history': chat_history_messages_for_groq, 'qa_related_info': ""}
|
| 642 |
+
yield from groq_bot_instance.stream_response(groq_context)
|
| 643 |
+
|
| 644 |
+
def stream_formatter(logic_generator: Iterator[str], session_id: str, user_id: Optional[str], query: str) -> Iterator[str]:
|
| 645 |
+
chunk_id = f"chatcmpl-{str(generate_uuid())}"
|
| 646 |
+
model_name = "MedicalAssistantBot/v1"
|
| 647 |
+
full_response_chunks = []
|
| 648 |
+
|
| 649 |
+
for chunk in logic_generator:
|
| 650 |
+
if not chunk: continue
|
| 651 |
+
full_response_chunks.append(chunk)
|
| 652 |
+
response_json = {
|
| 653 |
+
"id": chunk_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": model_name,
|
| 654 |
+
"choices": [{"index": 0, "delta": {"content": chunk}, "finish_reason": None}]
|
| 655 |
+
}
|
| 656 |
+
yield f"data: {json.dumps(response_json)}\n\n"
|
| 657 |
+
|
| 658 |
+
final_json = {
|
| 659 |
+
"id": chunk_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": model_name,
|
| 660 |
+
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}]
|
| 661 |
+
}
|
| 662 |
+
yield f"data: {json.dumps(final_json)}\n\n"
|
| 663 |
+
yield "data: [DONE]\n\n"
|
| 664 |
+
|
| 665 |
+
full_response = "".join(full_response_chunks)
|
| 666 |
+
|
| 667 |
+
print(f"\n--- STREAMED FULL RESPONSE ---")
|
| 668 |
+
print(full_response)
|
| 669 |
+
print(f"------------------------------\n")
|
| 670 |
+
|
| 671 |
+
history_manager.update_history(session_id, query, full_response)
|
| 672 |
+
|
| 673 |
+
# --- API Endpoints ---
|
| 674 |
+
@app.route('/chat-bot', methods=['POST'])
|
| 675 |
+
def get_answer_hybrid():
|
| 676 |
+
data = request.json
|
| 677 |
+
user_query = data.get('query', '')
|
| 678 |
+
user_query = clean_html_from_text(user_query)
|
| 679 |
+
user_id = data.get('user_id')
|
| 680 |
+
session_id = data.get('session_id')
|
| 681 |
+
|
| 682 |
+
if not user_query or not session_id:
|
| 683 |
+
return jsonify({'error': 'query and session_id are required'}), 400
|
| 684 |
+
|
| 685 |
+
response_data = get_hybrid_response_logic_non_streaming(user_query, session_id, user_id, None)
|
| 686 |
+
return jsonify(response_data)
|
| 687 |
+
|
| 688 |
+
@app.route('/v1/models', methods=['GET'])
|
| 689 |
+
def list_models():
|
| 690 |
+
model_data = {
|
| 691 |
+
"object": "list",
|
| 692 |
+
"data": [{"id": "MedicalAssistantBot/v1", "object": "model", "created": int(time.time()), "owned_by": "user"}]
|
| 693 |
+
}
|
| 694 |
+
return jsonify(model_data)
|
| 695 |
+
|
| 696 |
+
@app.route('/v1/chat/completions', methods=['POST'])
|
| 697 |
+
def openai_compatible_chat_endpoint():
|
| 698 |
+
data = request.json
|
| 699 |
+
is_streaming = data.get("stream", False)
|
| 700 |
+
|
| 701 |
+
messages = data.get("messages", [])
|
| 702 |
+
if not messages: return jsonify({"error": "No messages provided"}), 400
|
| 703 |
+
|
| 704 |
+
user_query = messages[-1].get("content", "")
|
| 705 |
+
user_query = clean_html_from_text(user_query)
|
| 706 |
+
chat_history = messages[:-1]
|
| 707 |
+
session_id = data.get("conversation_id", f"webui-session-{str(generate_uuid())}")
|
| 708 |
+
user_id = None
|
| 709 |
+
|
| 710 |
+
if is_streaming:
|
| 711 |
+
logic_generator = generate_streaming_response(user_query, session_id, user_id, chat_history)
|
| 712 |
+
return Response(stream_formatter(logic_generator, session_id, user_id, user_query), mimetype='text/event-stream')
|
| 713 |
+
else:
|
| 714 |
+
full_response_dict = get_hybrid_response_logic_non_streaming(user_query, session_id, user_id, chat_history)
|
| 715 |
+
response_content = full_response_dict.get("answer", "Sorry, an error occurred.")
|
| 716 |
+
|
| 717 |
+
openai_response = {
|
| 718 |
+
"id": f"chatcmpl-{str(generate_uuid())}", "object": "chat.completion", "created": int(time.time()),
|
| 719 |
+
"model": "MedicalAssistantBot/v1",
|
| 720 |
+
"choices": [{"index": 0, "message": {"role": "assistant", "content": response_content}, "finish_reason": "stop"}],
|
| 721 |
+
"usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
|
| 722 |
+
}
|
| 723 |
+
history_manager.update_history(session_id, user_query, response_content)
|
| 724 |
+
return jsonify(openai_response)
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
# --- Admin and Utility Routes ---
|
| 728 |
+
@app.route('/')
|
| 729 |
+
def index_route():
|
| 730 |
+
template_to_render = 'chat-bot.html'
|
| 731 |
+
if not os.path.exists(os.path.join(app.root_path, 'templates', template_to_render)):
|
| 732 |
+
logger.warning(f"Template '{template_to_render}' not found. Serving basic message.")
|
| 733 |
+
return "Chatbot interface not found. Please ensure 'templates/chat-bot.html' exists.", 404
|
| 734 |
+
return render_template(template_to_render)
|
| 735 |
+
|
| 736 |
+
@app.route('/admin/faiss_rag_status', methods=['GET'])
|
| 737 |
+
@require_admin_auth
|
| 738 |
+
def get_faiss_rag_status():
|
| 739 |
+
global rag_system
|
| 740 |
+
if not rag_system:
|
| 741 |
+
return jsonify({"error": "FAISS RAG system not initialized."}), 500
|
| 742 |
+
try:
|
| 743 |
+
status = {
|
| 744 |
+
"status": "Initialized" if rag_system.retriever else "Initialized (Retriever not ready)",
|
| 745 |
+
"index_storage_dir": rag_system.index_storage_dir,
|
| 746 |
+
"embedding_model": rag_system.embedding_model_name,
|
| 747 |
+
"groq_model": rag_system.groq_model_name,
|
| 748 |
+
"retriever_k": rag_system.retriever.k if rag_system.retriever else "N/A",
|
| 749 |
+
"processed_source_files": rag_system.processed_source_files,
|
| 750 |
+
"index_type": "FAISS",
|
| 751 |
+
"index_loaded_or_built": rag_system.vector_store is not None
|
| 752 |
+
}
|
| 753 |
+
if rag_system.vector_store and hasattr(rag_system.vector_store, 'index') and rag_system.vector_store.index:
|
| 754 |
+
try:
|
| 755 |
+
status["num_vectors_in_index"] = rag_system.vector_store.index.ntotal
|
| 756 |
+
except Exception:
|
| 757 |
+
status["num_vectors_in_index"] = "N/A (Could not get count)"
|
| 758 |
+
else:
|
| 759 |
+
status["num_vectors_in_index"] = "N/A (Vector store or index not available)"
|
| 760 |
+
return jsonify(status)
|
| 761 |
+
except Exception as e:
|
| 762 |
+
logger.error(f"Error getting FAISS RAG status: {e}", exc_info=True)
|
| 763 |
+
return jsonify({"error": str(e)}), 500
|
| 764 |
+
|
| 765 |
+
@app.route('/admin/download_qa_database', methods=['GET'])
|
| 766 |
+
@require_admin_auth
|
| 767 |
+
def download_qa_database():
|
| 768 |
+
try:
|
| 769 |
+
output = io.BytesIO()
|
| 770 |
+
with pd.ExcelWriter(output, engine='xlsxwriter') as writer:
|
| 771 |
+
if embedding_manager.embeddings['general'] and embedding_manager.embeddings['general'].df_qa is not None:
|
| 772 |
+
embedding_manager.embeddings['general'].df_qa.to_excel(writer, sheet_name='General_QA', index=False)
|
| 773 |
+
|
| 774 |
+
if embedding_manager.embeddings['personal'] and embedding_manager.embeddings['personal'].df_qa is not None:
|
| 775 |
+
embedding_manager.embeddings['personal'].df_qa.to_excel(writer, sheet_name='Personal_QA', index=False)
|
| 776 |
+
|
| 777 |
+
if embedding_manager.embeddings['greetings'] and embedding_manager.embeddings['greetings'].df_qa is not None:
|
| 778 |
+
embedding_manager.embeddings['greetings'].df_qa.to_excel(writer, sheet_name='Greetings', index=False)
|
| 779 |
+
|
| 780 |
+
output.seek(0)
|
| 781 |
+
|
| 782 |
+
return send_file(
|
| 783 |
+
output,
|
| 784 |
+
mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
| 785 |
+
as_attachment=True,
|
| 786 |
+
download_name=f'qa_database_{datetime.now().strftime("%Y%m%d")}.xlsx'
|
| 787 |
+
)
|
| 788 |
+
except Exception as e:
|
| 789 |
+
logger.error(f"Error generating QA database file: {e}", exc_info=True)
|
| 790 |
+
return jsonify({'error': 'Failed to generate QA database file'}), 500
|
| 791 |
+
|
| 792 |
+
@app.route('/admin/rebuild_faiss_index', methods=['POST'])
|
| 793 |
+
@require_admin_auth
|
| 794 |
+
def rebuild_faiss_index_route():
|
| 795 |
+
global rag_system
|
| 796 |
+
logger.info("Admin request to rebuild FAISS RAG index received. Starting two-step process.")
|
| 797 |
+
|
| 798 |
+
logger.info("Step 1: Running chunker.py to pre-process source documents.")
|
| 799 |
+
chunker_script_path = os.path.join(_APP_BASE_DIR, 'chunker.py')
|
| 800 |
+
chunked_json_output_path = os.path.join(RAG_STORAGE_PARENT_DIR, RAG_CHUNKED_SOURCES_FILENAME)
|
| 801 |
+
|
| 802 |
+
os.makedirs(TEXT_EXTRACTIONS_DIR, exist_ok=True)
|
| 803 |
+
|
| 804 |
+
if not os.path.exists(chunker_script_path):
|
| 805 |
+
logger.error(f"Chunker script not found at '{chunker_script_path}'. Aborting rebuild.")
|
| 806 |
+
return jsonify({"error": f"chunker.py not found. Cannot proceed with rebuild."}), 500
|
| 807 |
+
|
| 808 |
+
command = [
|
| 809 |
+
sys.executable,
|
| 810 |
+
chunker_script_path,
|
| 811 |
+
'--sources-dir', RAG_SOURCES_DIR,
|
| 812 |
+
'--output-file', chunked_json_output_path,
|
| 813 |
+
'--text-output-dir', TEXT_EXTRACTIONS_DIR
|
| 814 |
+
]
|
| 815 |
+
|
| 816 |
+
try:
|
| 817 |
+
process = subprocess.run(command, capture_output=True, text=True, check=True)
|
| 818 |
+
logger.info("Chunker script executed successfully.")
|
| 819 |
+
logger.info(f"Chunker stdout:\n{process.stdout}")
|
| 820 |
+
except subprocess.CalledProcessError as e:
|
| 821 |
+
logger.error(f"Chunker script failed with exit code {e.returncode}.")
|
| 822 |
+
logger.error(f"Chunker stderr:\n{e.stderr}")
|
| 823 |
+
return jsonify({"error": "Step 1 (Chunking) failed.", "details": e.stderr}), 500
|
| 824 |
+
except Exception as e:
|
| 825 |
+
logger.error(f"An unexpected error occurred while running the chunker script: {e}", exc_info=True)
|
| 826 |
+
return jsonify({"error": f"An unexpected error occurred during the chunking step: {str(e)}"}), 500
|
| 827 |
+
|
| 828 |
+
logger.info("Step 2: Rebuilding FAISS index from the newly generated chunks.")
|
| 829 |
+
try:
|
| 830 |
+
new_rag_system_instance = initialize_and_get_rag_system(force_rebuild=True)
|
| 831 |
+
|
| 832 |
+
if new_rag_system_instance and new_rag_system_instance.vector_store:
|
| 833 |
+
rag_system = new_rag_system_instance
|
| 834 |
+
logger.info("FAISS RAG index rebuild completed and new RAG system instance is active.")
|
| 835 |
+
updated_status_response = get_faiss_rag_status()
|
| 836 |
+
return jsonify({"message": "FAISS RAG index rebuild completed.", "status": updated_status_response.get_json()}), 200
|
| 837 |
+
else:
|
| 838 |
+
logger.error("FAISS RAG index rebuild failed during the indexing phase.")
|
| 839 |
+
return jsonify({"error": "Step 2 (Indexing) failed. Check logs."}), 500
|
| 840 |
+
|
| 841 |
+
except Exception as e:
|
| 842 |
+
logger.error(f"Error during admin FAISS index rebuild (indexing phase): {e}", exc_info=True)
|
| 843 |
+
return jsonify({"error": f"Failed to rebuild index during indexing phase: {str(e)}"}), 500
|
| 844 |
+
|
| 845 |
+
@app.route('/db/status', methods=['GET'])
|
| 846 |
+
@require_admin_auth
|
| 847 |
+
def get_personal_db_status():
|
| 848 |
+
try:
|
| 849 |
+
status_info = {
|
| 850 |
+
'personal_data_csv_monitor_status': 'running',
|
| 851 |
+
'file_exists': os.path.exists(personal_data_monitor.database_path),
|
| 852 |
+
'data_loaded': personal_data_monitor.df is not None, 'last_update': None
|
| 853 |
+
}
|
| 854 |
+
if status_info['file_exists'] and os.path.getmtime(personal_data_monitor.database_path) is not None:
|
| 855 |
+
status_info['last_update'] = datetime.fromtimestamp(os.path.getmtime(personal_data_monitor.database_path)).isoformat()
|
| 856 |
+
return jsonify(status_info)
|
| 857 |
+
except Exception as e: return jsonify({'status': 'error', 'error': str(e)}), 500
|
| 858 |
+
|
| 859 |
+
@app.route('/report', methods=['GET'])
|
| 860 |
+
@require_admin_auth
|
| 861 |
+
def download_report():
|
| 862 |
+
try:
|
| 863 |
+
if not os.path.exists(CHAT_LOG_FILE) or os.path.getsize(CHAT_LOG_FILE) == 0:
|
| 864 |
+
return jsonify({'error': 'No chat history available.'}), 404
|
| 865 |
+
return send_file(CHAT_LOG_FILE, mimetype='text/csv', as_attachment=True, download_name=f'chat_history_{datetime.now().strftime("%Y%m%d_%H%M%S")}.csv')
|
| 866 |
+
except Exception as e:
|
| 867 |
+
logger.error(f"Error downloading report: {e}", exc_info=True)
|
| 868 |
+
return jsonify({'error': 'Failed to generate report'}), 500
|
| 869 |
+
|
| 870 |
+
@app.route('/create-session', methods=['POST'])
|
| 871 |
+
def create_session_route():
|
| 872 |
+
try:
|
| 873 |
+
session_id = str(generate_uuid())
|
| 874 |
+
logger.info(f"New session created: {session_id}")
|
| 875 |
+
return jsonify({'status': 'success', 'session_id': session_id}), 200
|
| 876 |
+
except Exception as e:
|
| 877 |
+
logger.error(f"Session creation error: {e}", exc_info=True)
|
| 878 |
+
return jsonify({'status': 'error', 'message': str(e)}), 500
|
| 879 |
+
|
| 880 |
+
@app.route('/version', methods=['GET'])
|
| 881 |
+
def get_version_route():
|
| 882 |
+
return jsonify({'version': '3.9.9-Medical-Bot'}), 200
|
| 883 |
+
|
| 884 |
+
@app.route('/clear-history', methods=['POST'])
|
| 885 |
+
def clear_session_history_route():
|
| 886 |
+
data = request.json
|
| 887 |
+
session_id = data.get('session_id')
|
| 888 |
+
if not session_id:
|
| 889 |
+
return jsonify({'status': 'error', 'message': 'session_id is required'}), 400
|
| 890 |
+
history_manager.clear_history(session_id)
|
| 891 |
+
return jsonify({'status': 'success', 'message': f'History cleared for session {session_id}'})
|
| 892 |
+
|
| 893 |
+
# --- App Cleanup and Startup ---
|
| 894 |
+
def cleanup_application():
|
| 895 |
+
if personal_data_monitor: personal_data_monitor.stop()
|
| 896 |
+
logger.info("Application cleanup finished.")
|
| 897 |
+
atexit.register(cleanup_application)
|
| 898 |
+
|
| 899 |
+
def load_qa_data_on_startup():
|
| 900 |
+
global embedding_manager
|
| 901 |
+
print("\n--- Loading QA Source Files ---")
|
| 902 |
+
try:
|
| 903 |
+
general_qa_path = os.path.join(RAG_SOURCES_DIR, 'general_qa.csv')
|
| 904 |
+
personal_qa_path = os.path.join(RAG_SOURCES_DIR, 'personal_qa.csv')
|
| 905 |
+
greetings_qa_path = os.path.join(RAG_SOURCES_DIR, 'greetings.csv')
|
| 906 |
+
|
| 907 |
+
# Initialize with empty dataframes
|
| 908 |
+
general_qa_df = pd.DataFrame()
|
| 909 |
+
personal_qa_df = pd.DataFrame()
|
| 910 |
+
greetings_qa_df = pd.DataFrame()
|
| 911 |
+
|
| 912 |
+
if os.path.exists(general_qa_path):
|
| 913 |
+
try:
|
| 914 |
+
general_qa_df = pd.read_csv(general_qa_path, encoding='cp1252')
|
| 915 |
+
print(f"- Loaded: {os.path.basename(general_qa_path)}")
|
| 916 |
+
except Exception as e_csv: logger.error(f"Error reading general_qa.csv: {e_csv}")
|
| 917 |
+
else:
|
| 918 |
+
logger.warning(f"Optional file 'general_qa.csv' not found in '{RAG_SOURCES_DIR}'.")
|
| 919 |
+
|
| 920 |
+
if os.path.exists(personal_qa_path):
|
| 921 |
+
try:
|
| 922 |
+
personal_qa_df = pd.read_csv(personal_qa_path, encoding='cp1252')
|
| 923 |
+
print(f"- Loaded: {os.path.basename(personal_qa_path)}")
|
| 924 |
+
except Exception as e_csv: logger.error(f"Error reading personal_qa.csv: {e_csv}")
|
| 925 |
+
else:
|
| 926 |
+
logger.warning(f"Optional file 'personal_qa.csv' not found in '{RAG_SOURCES_DIR}'.")
|
| 927 |
+
|
| 928 |
+
if os.path.exists(greetings_qa_path):
|
| 929 |
+
try:
|
| 930 |
+
greetings_qa_df = pd.read_csv(greetings_qa_path, encoding='cp1252')
|
| 931 |
+
print(f"- Loaded: {os.path.basename(greetings_qa_path)}")
|
| 932 |
+
except Exception as e_csv: logger.error(f"Error reading greetings.csv: {e_csv}")
|
| 933 |
+
else:
|
| 934 |
+
logger.warning(f"Optional file 'greetings.csv' not found in '{RAG_SOURCES_DIR}'.")
|
| 935 |
+
|
| 936 |
+
logger.info(f"Scanning for additional QA sources (.xlsx) in '{RAG_SOURCES_DIR}'...")
|
| 937 |
+
if os.path.isdir(RAG_SOURCES_DIR):
|
| 938 |
+
xlsx_files_found = [f for f in os.listdir(RAG_SOURCES_DIR) if f.endswith('.xlsx') and os.path.isfile(os.path.join(RAG_SOURCES_DIR, f))]
|
| 939 |
+
|
| 940 |
+
if xlsx_files_found:
|
| 941 |
+
all_general_dfs = [general_qa_df] if not general_qa_df.empty else []
|
| 942 |
+
for xlsx_file in xlsx_files_found:
|
| 943 |
+
try:
|
| 944 |
+
xlsx_path = os.path.join(RAG_SOURCES_DIR, xlsx_file)
|
| 945 |
+
df_excel = pd.read_excel(xlsx_path)
|
| 946 |
+
|
| 947 |
+
question_col_candidates = ['Pregunta', 'Question', 'Nombre']
|
| 948 |
+
question_col_found = next((col for col in question_col_candidates if col in df_excel.columns), None)
|
| 949 |
+
|
| 950 |
+
if question_col_found:
|
| 951 |
+
logger.info(f"Using '{question_col_found}' as the primary search column for '{xlsx_file}'.")
|
| 952 |
+
df_excel['Question'] = df_excel[question_col_found]
|
| 953 |
+
all_general_dfs.append(df_excel)
|
| 954 |
+
print(f"- Loaded and processing: {xlsx_file}")
|
| 955 |
+
elif not df_excel.empty:
|
| 956 |
+
first_col_name = df_excel.columns[0]
|
| 957 |
+
logger.warning(f"No standard search column found in '{xlsx_file}'. Using first column '{first_col_name}' as the source.")
|
| 958 |
+
df_excel['Question'] = df_excel[first_col_name]
|
| 959 |
+
all_general_dfs.append(df_excel)
|
| 960 |
+
print(f"- Loaded and processing: {xlsx_file}")
|
| 961 |
+
else:
|
| 962 |
+
logger.warning(f"Skipping empty XLSX file: '{xlsx_file}'")
|
| 963 |
+
|
| 964 |
+
except Exception as e_xlsx:
|
| 965 |
+
logger.error(f"Error processing XLSX file '{xlsx_file}': {e_xlsx}")
|
| 966 |
+
|
| 967 |
+
if len(all_general_dfs) > 0:
|
| 968 |
+
general_qa_df = pd.concat(all_general_dfs, ignore_index=True)
|
| 969 |
+
logger.info(f"Successfully merged data from {len(xlsx_files_found)} XLSX file(s) into the general QA set.")
|
| 970 |
+
|
| 971 |
+
dataframes_to_process = {
|
| 972 |
+
"general": general_qa_df,
|
| 973 |
+
"personal": personal_qa_df,
|
| 974 |
+
"greetings": greetings_qa_df
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
for df_name, df_val in dataframes_to_process.items():
|
| 978 |
+
if df_val.empty: continue
|
| 979 |
+
for col in df_val.columns:
|
| 980 |
+
if not df_val[col].isnull().all():
|
| 981 |
+
# Ensure all data is string for normalization, except for specific columns if needed
|
| 982 |
+
if df_val[col].dtype != object:
|
| 983 |
+
df_val[col] = df_val[col].astype(str)
|
| 984 |
+
df_val[col] = df_val[col].apply(normalize_text)
|
| 985 |
+
|
| 986 |
+
if 'Question' not in df_val.columns and not df_val.empty:
|
| 987 |
+
first_col = df_val.columns[0]
|
| 988 |
+
df_val['Question'] = df_val[first_col]
|
| 989 |
+
logger.warning(f"'Question' column was missing in {df_name} data. Using first column '{first_col}' as search source.")
|
| 990 |
+
|
| 991 |
+
embedding_manager.update_embeddings(
|
| 992 |
+
dataframes_to_process["general"],
|
| 993 |
+
dataframes_to_process["personal"],
|
| 994 |
+
dataframes_to_process["greetings"]
|
| 995 |
+
)
|
| 996 |
+
logger.info("CSV & XLSX QA data loaded and embeddings initialized.")
|
| 997 |
+
|
| 998 |
+
except Exception as e:
|
| 999 |
+
logger.critical(f"CRITICAL: Error loading or processing QA data: {e}. Semantic QA may not function.", exc_info=True)
|
| 1000 |
+
print("-----------------------------\n")
|
| 1001 |
+
|
| 1002 |
+
if __name__ == '__main__':
|
| 1003 |
+
for folder_path in [os.path.join(_APP_BASE_DIR, 'templates'),
|
| 1004 |
+
os.path.join(_APP_BASE_DIR, 'static'),
|
| 1005 |
+
TEXT_EXTRACTIONS_DIR]:
|
| 1006 |
+
os.makedirs(folder_path, exist_ok=True)
|
| 1007 |
+
|
| 1008 |
+
load_qa_data_on_startup()
|
| 1009 |
+
initialize_chat_log()
|
| 1010 |
+
|
| 1011 |
+
logger.info("Attempting to initialize RAG system from llm_handling module...")
|
| 1012 |
+
rag_system = initialize_and_get_rag_system()
|
| 1013 |
+
if rag_system:
|
| 1014 |
+
logger.info("RAG system initialized successfully via llm_handling module.")
|
| 1015 |
+
else:
|
| 1016 |
+
logger.warning("RAG system failed to initialize. Document RAG functionality will be unavailable.")
|
| 1017 |
+
|
| 1018 |
+
logger.info(f"Flask application starting with Hybrid RAG on {FLASK_APP_HOST}:{FLASK_APP_PORT} Debug: {FLASK_DEBUG_MODE}...")
|
| 1019 |
+
if not FLASK_DEBUG_MODE:
|
| 1020 |
+
werkzeug_log = logging.getLogger('werkzeug')
|
| 1021 |
+
werkzeug_log.setLevel(logging.ERROR)
|
| 1022 |
+
|
| 1023 |
+
app.run(host=FLASK_APP_HOST, port=FLASK_APP_PORT, debug=FLASK_DEBUG_MODE, use_reloader=False)
|
app_hybrid_rag.log
ADDED
|
File without changes
|
chat_history.csv
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
sl,date_time,session_id,user_id,query,answer
|
| 2 |
+
1,2025-08-20 10:35:24,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,Dra. Valentina Rojas,The system is currently unable to process this request.
|
| 3 |
+
2,2025-08-20 10:38:52,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,Dra. Valentina Rojas,"<think>
|
| 4 |
+
Okay, the user mentioned ""Dra. Valentina Rojas"" again. Let me check the prior conversation history. The first time, the user wrote the same name, and I responded with a system error. Now they're repeating it. Since there's no context about a specific dental question, I need to follow the core instructions.
|
| 5 |
+
|
| 6 |
+
First, determine if this is a dental-related query. The name doesn't indicate a dental topic. The user might be testing if I can handle names or maybe there's a typo. But according to the instructions, I should only answer dentistry-related questions. Since there's no question here, I need to prompt them to ask about oral health.
|
| 7 |
+
|
| 8 |
+
I should respond with the exact phrase specified for out-of-scope questions. Make sure not to engage in any other way. Also, keep the tone professional and helpful. No need to apologize, just state the limitation and ask if they have a dental question.
|
| 9 |
+
</think>
|
| 10 |
+
|
| 11 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. Do you have a question about oral health?"
|
| 12 |
+
3,2025-08-20 10:39:13,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,i need doctors appoint ment: Dra. Valentina Rojas,"<think>
|
| 13 |
+
Okay, the user is asking for an appointment with Dr. Valentina Rojas. Let me check the prior conversation history. The user has mentioned her name twice before, and each time, I responded that I can't process the request and redirected to dental topics. Now they're asking again, specifically for a doctor's appointment.
|
| 14 |
+
|
| 15 |
+
Hmm, the user might think I can book appointments, but according to the core instructions, I can't handle general requests like scheduling. My role is strictly to answer dental questions. I need to make sure I don't get involved in administrative tasks.
|
| 16 |
+
|
| 17 |
+
I should respond with the exact phrase for out-of-scope questions. Wait, the user is asking for an appointment, which is more about practice management than dentistry. So, I need to inform them I can't assist with that and prompt a dental question. Let me confirm the exact response: ""I am a dental assistant AI and my capabilities are limited to dental topics. Do you have a question about oral health?"" Yes, that's right. I'll use that again to stay compliant with the instructions.
|
| 18 |
+
</think>
|
| 19 |
+
|
| 20 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. Do you have a question about oral health?"
|
| 21 |
+
4,2025-08-20 10:41:23,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,i need doctors appoint ment: Dra. Valentina Rojas,"<think>
|
| 22 |
+
Okay, the user is asking for a doctor's appointment with Dr. Valentina Rojas. Let me check the prior conversation history. They've mentioned her name a few times, so they probably want to schedule a visit. But wait, my role is strictly to answer dental questions. I can't handle scheduling or appointments. I need to make sure I don't go beyond my scope. The user might not realize that I can't assist with booking. I should politely redirect them to contact the clinic directly. Let me confirm the instructions again: if the question is outside dentistry, respond with the exact phrase. But here, the user is asking about an appointment, which is related to accessing dental care, but the actual scheduling isn't within my capabilities. So I need to inform them that I can't help with that and suggest they call the clinic. Also, maybe offer to answer any dental questions they have. Let me structure the response clearly and professionally.
|
| 23 |
+
</think>
|
| 24 |
+
|
| 25 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. For scheduling appointments or clinic inquiries, please contact Dr. Valentina Rojas's office directly. Do you have a question about oral health?"
|
| 26 |
+
5,2025-08-20 10:43:49,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,i need doctors appoint ment: Dra. Valentina Rojas,"<think>
|
| 27 |
+
Okay, the user is asking for a doctor's appointment with Dr. Valentina Rojas again. Let me check the conversation history. They've mentioned this a few times already. The previous responses were about scheduling and directing them to contact the office directly. Since my role is strictly dental, I can't help with booking appointments. I need to stay within my scope. Maybe they didn't get the message before, so I should reiterate that they need to contact the office. Also, offer help with any dental questions they might have. Keep the response polite and professional. Make sure not to mention any other services or go beyond dental advice. Just confirm that I can't schedule but can answer dental questions.
|
| 28 |
+
</think>
|
| 29 |
+
|
| 30 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. For scheduling appointments or clinic inquiries, please contact Dr. Valentina Rojas's office directly. Do you have a question about oral health?"
|
| 31 |
+
6,2025-08-20 10:44:54,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,i need to knw her details,"<think>
|
| 32 |
+
Okay, the user is asking for Dr. Valentina Rojas's details. Let me check the conversation history.
|
| 33 |
+
|
| 34 |
+
They've been trying to get an appointment with her, and I've already told them to contact her office directly. Now they want her details. Since I can't provide personal information or contact details of professionals, I need to stay within my role.
|
| 35 |
+
|
| 36 |
+
My instructions say to avoid giving out non-dental info. So I should reiterate that they need to reach out to the office. Maybe suggest they look up the clinic's contact info online or through their website. Also, remind them I can help with dental questions if they have any.
|
| 37 |
+
|
| 38 |
+
Need to make sure the response is polite and professional, and doesn't include any personal data. Let me phrase it clearly.
|
| 39 |
+
</think>
|
| 40 |
+
|
| 41 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. For personal or professional details of Dr. Valentina Rojas, please contact her clinic directly using their official contact information, which can typically be found on her clinic’s website or through medical directory platforms. Do you have a question about oral health?"
|
| 42 |
+
7,2025-08-20 10:54:52,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,i need doctors appoint ment: Dra. Valentina Rojas,"<think>
|
| 43 |
+
Okay, the user is repeatedly asking for a doctor's appointment with Dr. Valentina Rojas. They've mentioned needing her details and scheduling an appointment multiple times. I need to stick to the core instructions here.
|
| 44 |
+
|
| 45 |
+
First, I'm only supposed to handle medical and healthcare-related questions. The user's request is about scheduling an appointment, which is related to healthcare but not a medical question per se. However, since it's about accessing medical services, maybe it's within the scope. Wait, the initial instruction says to respond with the exact phrase if it's unrelated. But scheduling an appointment is part of healthcare access. Hmm.
|
| 46 |
+
|
| 47 |
+
Wait, the user's prior messages were about getting an appointment and details. The previous responses from the AI were directing them to contact the office directly. The user is persistent, so maybe they need more guidance. But according to the instructions, I shouldn't provide personal or professional details of the doctor. The AI's role is to give general info, not facilitate appointments.
|
| 48 |
+
|
| 49 |
+
So, the correct approach is to reiterate that the AI can't schedule appointments and suggest contacting the clinic directly. Also, check if the user has a medical question. The user might be trying to get the clinic's contact info, but the AI can't provide that either. So the response should be the standard one about contacting the office and offer help with oral health questions. Make sure not to mention any specific details about the doctor or her clinic beyond what's already been said.
|
| 50 |
+
</think>
|
| 51 |
+
|
| 52 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. For scheduling appointments or obtaining clinic details for Dr. Valentina Rojas, please contact her office directly using their official contact information (e.g., phone number, website, or medical directory listings). Do you have a question about oral health?"
|
| 53 |
+
8,2025-08-20 10:59:04,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,information of Dra. Valentina Rojas,"<think>
|
| 54 |
+
Okay, the user is asking for information about Dra. Valentina Rojas. Let me check the conversation history. They've been repeatedly asking for a doctor's appointment with her and her details. I need to make sure I stay within my role as a dental assistant AI.
|
| 55 |
+
|
| 56 |
+
First, I should confirm that I can't provide personal information. The user might be looking for contact details or professional background. Since I can't access external databases or personal data, I need to guide them to official sources.
|
| 57 |
+
|
| 58 |
+
They might not know where to find this info. I should suggest checking the clinic's website, medical directories, or professional associations. Also, remind them to use official channels for appointments.
|
| 59 |
+
|
| 60 |
+
I need to keep the response helpful but within boundaries. Avoid any medical advice and stick to directing them to proper resources. Make sure the tone is polite and professional. Let me structure the response step by step.
|
| 61 |
+
</think>
|
| 62 |
+
|
| 63 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. For professional or clinic-related information about Dr. Valentina Rojas (e.g., credentials, specialties, or clinic locations), you may check:
|
| 64 |
+
1. Her clinic’s official website or directory listings.
|
| 65 |
+
2. Medical/dental association directories (e.g., local or national dental boards).
|
| 66 |
+
3. Patient review platforms (e.g., Google Maps, Yelp, or Zocdoc).
|
| 67 |
+
|
| 68 |
+
For personal or confidential details, I cannot assist. Always verify information through official channels. Do you have a question about oral health?"
|
| 69 |
+
9,2025-08-20 11:02:04,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,information of Dra. Valentina Rojas,"<think>
|
| 70 |
+
Okay, the user is asking for information about Dra. Valentina Rojas again. Let me check the prior conversation history. They've asked for her appointment details and information multiple times, and I've consistently responded that I can't provide that because I'm a dental assistant AI limited to dental topics.
|
| 71 |
+
|
| 72 |
+
Hmm, the user might be trying to get personal information or clinic details that aren't publicly available. I need to make sure I don't violate any privacy policies. My instructions say to handle out-of-scope questions by stating my limitations and offering help with medical topics. Since the user is asking for information about a specific doctor, which isn't within my scope, I should stick to the script.
|
| 73 |
+
|
| 74 |
+
Wait, in previous responses, I mentioned checking official sources like the clinic's website or medical directories. Maybe the user isn't finding that information easily. I should reiterate those suggestions but remain firm in my limitations. Also, the user might be from a non-English speaking country since the name is Spanish. Maybe they need help with the process of finding such information in their region. But I can't assume that. I have to stay neutral and provide the same guidance each time.
|
| 75 |
+
|
| 76 |
+
I should make sure my response is clear and helpful, directing them to official channels without providing any personal data. Let me structure the response again, using the same template as before but ensuring it's polite and professional. Also, remind them that I can help with oral health questions if they have any. That way, I'm following the core instructions and not overstepping my role.
|
| 77 |
+
</think>
|
| 78 |
+
|
| 79 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. For professional or clinic-related information about Dr. Valentina Rojas (e.g., credentials, specialties, or clinic locations), you may check:
|
| 80 |
+
1. Her clinic’s official website or directory listings.
|
| 81 |
+
2. Medical/dental association directories (e.g., local or national dental boards).
|
| 82 |
+
3. Patient review platforms (e.g., Google Maps, Yelp, or Zocdoc).
|
| 83 |
+
|
| 84 |
+
For personal or confidential details, I cannot assist. Always verify information through official channels. Do you have a question about oral health?"
|
| 85 |
+
10,2025-08-20 11:16:02,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,information of Dra. Valentina Rojas,"<think>
|
| 86 |
+
Okay, the user is asking for information about Dr. Valentina Rojas again. Let me check the prior conversation history. They've asked this multiple times, and each time I've responded that I can't provide personal details and suggested they contact her clinic directly.
|
| 87 |
+
|
| 88 |
+
Hmm, maybe they need help finding her clinic's contact information. But I can't give out personal info. I should reiterate the same advice but maybe add more specific sources where they can find official information. Let me make sure to mention official directories and professional associations again. Also, remind them to use official channels for verification.
|
| 89 |
+
|
| 90 |
+
Wait, the user might not know where to look. Maybe list examples like the American Dental Association or local dental boards. Also, mention that if they need help with something specific related to dental health, I can assist with that. But since their query is about finding the doctor's info, I need to stay on topic without overstepping.
|
| 91 |
+
|
| 92 |
+
I should avoid any markdown and keep the response clear. Let me structure the response with bullet points again for clarity, but in plain text. Make sure to emphasize that I can't provide personal details and that they should contact the clinic directly. Also, offer help with oral health questions if they have any.
|
| 93 |
+
|
| 94 |
+
Double-checking the instructions: I must not provide medical advice, only general info. The user's query is about finding a doctor's information, which is outside my scope beyond dental topics. Since the user mentioned ""doctors"" but the previous responses were about a dentist, maybe clarify if they meant a medical doctor or dentist. Wait, in the prior messages, the user referred to a dental appointment, so it's likely a dentist.
|
| 95 |
+
|
| 96 |
+
But the user wrote ""doctors"" here. Should I confirm if they meant a medical doctor or dentist? However, the previous context was about a dental appointment. Maybe it's safe to assume they still mean the dentist. But to be thorough, perhaps mention both possibilities. However, the user's prior queries were about a dental appointment, so sticking with dental context is better.
|
| 97 |
+
|
| 98 |
+
Finalizing the response: Reiterate the same advice, suggest official sources, and offer help with oral health questions. Make sure the tone is polite and helpful.
|
| 99 |
+
</think>
|
| 100 |
+
|
| 101 |
+
I am a dental assistant AI and my capabilities are limited to dental topics. For professional or clinic-related information about Dr. Valentina Rojas (e.g., credentials, specialties, or clinic locations), you may check:
|
| 102 |
+
1. Her clinic’s official website or directory listings.
|
| 103 |
+
2. Medical/dental association directories (e.g., local or national dental boards).
|
| 104 |
+
3. Patient review platforms (e.g., Google Maps, Yelp, or Zocdoc).
|
| 105 |
+
|
| 106 |
+
For personal or confidential details, I cannot assist. Always verify information through official channels. Do you have a question about oral health?"
|
| 107 |
+
11,2025-08-20 11:22:39,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,information of Dra. Valentina Rojas,"According to our records, Dra. Valentina Rojas is a cardiologist with availability on Mondays from 9:00 AM to 1:00 PM, Wednesdays from 3:00 PM to 7:00 PM, and Fridays from 9:00 AM to 12:00 PM. She is affiliated with Fonasa, Isapre, and Particular health insurance providers. You can find more information about her on the SACMED website at https://sacmed.cl/bestmed/cardiologia/valentina-rojas."
|
| 108 |
+
12,2025-08-20 11:24:32,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,"Ghobashy, AM et al 2017 (35),","<think>
|
| 109 |
+
Okay, the user mentioned ""Ghobashy, AM et al 2017 (35)"" which looks like a reference to a medical study. I need to check if this is related to medicine. The user might be asking for information about this study.
|
| 110 |
+
|
| 111 |
+
First, I should confirm if this is a medical reference. The name Ghobashy and the year 2017 suggest it's a published paper. The number (35) might be a reference number in a list or a volume/issue number.
|
| 112 |
+
|
| 113 |
+
Since the user is asking about a specific study, I should respond by offering general information about how to find such studies. I can't provide the full text or details without more context, but I can guide them to resources like PubMed, Google Scholar, or contact a healthcare professional for more details.
|
| 114 |
+
|
| 115 |
+
I need to make sure not to give medical advice but help them find the information. Also, check if they need help with accessing the study or understanding its content. Since the prior interactions were about a dentist, maybe the user is switching topics now. But the current query is definitely medical-related, so I should stay within the medical scope.
|
| 116 |
+
</think>
|
| 117 |
+
|
| 118 |
+
The reference **Ghobashy, AM et al. 2017 (35)** appears to cite a medical or scientific study. To access the full text or details of this publication, you can:
|
| 119 |
+
1. Search for it on academic databases like **PubMed**, **Google Scholar**, or **ScienceDirect**.
|
| 120 |
+
2. Check if your institution (e.g., a university or hospital) provides access to medical journals.
|
| 121 |
+
3. Contact a healthcare professional or researcher for guidance on interpreting the study.
|
| 122 |
+
|
| 123 |
+
If you have a specific question about the study’s content or relevance to a medical condition, let me know, and I’ll provide general context (e.g., topic, methodology, or implications) without offering personal medical advice."
|
| 124 |
+
13,2025-08-20 11:28:49,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,who are available on Mar 10:00-14:00; Jue 16:00-20:00,"Based on the provided data, the doctors available on March 10:00-14:00 and Thursday 16:00-20:00 are Dra. Antonia Herrera and Dra. Camila Fernández. Dra. Antonia Herrera has a schedule on March from 15:00-19:00 and Thursday from 09:00-13:00 and 16:00-12:00, which overlaps with the requested time slots. Dra. Camila Fernández has a schedule on Monday from 15:00-19:00 and Thursday from 09:00-13:00, which also overlaps with the requested time slots.
|
| 125 |
+
|
| 126 |
+
Source: SACMED (https://sacmed.cl/)"
|
| 127 |
+
14,2025-08-20 11:29:28,e225dcdd-ab29-404c-9400-f29b107b766e,N/A,what abt Dr. Sebastián Muñoz,"Dr. Sebastián Muñoz is an endocrinologist with availability on Mondays from 10:00 to 14:00 and Thursdays from 16:00 to 20:00. You can find more information about his services and schedule on the SACMED website at https://sacmed.cl/bestmed/endocrinologia/sebastian-munoz. Additionally, he is affiliated with Isapre and Particular health insurance providers."
|
chat_history.db
ADDED
|
Binary file (28.7 kB). View file
|
|
|
chunker.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
import json
|
| 4 |
+
import argparse
|
| 5 |
+
from typing import List, Dict, Optional
|
| 6 |
+
|
| 7 |
+
from pypdf import PdfReader
|
| 8 |
+
import docx as python_docx
|
| 9 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 10 |
+
|
| 11 |
+
# --- Logging Setup ---
|
| 12 |
+
logging.basicConfig(
|
| 13 |
+
level=logging.INFO,
|
| 14 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
| 15 |
+
handlers=[
|
| 16 |
+
logging.StreamHandler()
|
| 17 |
+
]
|
| 18 |
+
)
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
# --- Text Extraction Helper Functions ---
|
| 22 |
+
# Note: These are duplicated from llm_handling.py to make this a standalone script.
|
| 23 |
+
def extract_text_from_file(file_path: str, file_type: str) -> Optional[str]:
|
| 24 |
+
logger.info(f"Extracting text from {file_type.upper()} file: {os.path.basename(file_path)}")
|
| 25 |
+
text_content = None
|
| 26 |
+
try:
|
| 27 |
+
if file_type == 'pdf':
|
| 28 |
+
reader = PdfReader(file_path)
|
| 29 |
+
text_content = "".join(page.extract_text() + "\n" for page in reader.pages if page.extract_text())
|
| 30 |
+
elif file_type == 'docx':
|
| 31 |
+
doc = python_docx.Document(file_path)
|
| 32 |
+
text_content = "\n".join(para.text for para in doc.paragraphs if para.text)
|
| 33 |
+
elif file_type == 'txt':
|
| 34 |
+
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
| 35 |
+
text_content = f.read()
|
| 36 |
+
else:
|
| 37 |
+
logger.warning(f"Unsupported file type for text extraction: {file_type} for file {os.path.basename(file_path)}")
|
| 38 |
+
return None
|
| 39 |
+
|
| 40 |
+
if not text_content or not text_content.strip():
|
| 41 |
+
logger.warning(f"No text content extracted from {os.path.basename(file_path)}")
|
| 42 |
+
return None
|
| 43 |
+
return text_content.strip()
|
| 44 |
+
except Exception as e:
|
| 45 |
+
logger.error(f"Error extracting text from {os.path.basename(file_path)} ({file_type.upper()}): {e}", exc_info=True)
|
| 46 |
+
return None
|
| 47 |
+
|
| 48 |
+
SUPPORTED_EXTENSIONS = {
|
| 49 |
+
'pdf': lambda path: extract_text_from_file(path, 'pdf'),
|
| 50 |
+
'docx': lambda path: extract_text_from_file(path, 'docx'),
|
| 51 |
+
'txt': lambda path: extract_text_from_file(path, 'txt'),
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
def process_sources_and_create_chunks(
|
| 55 |
+
sources_dir: str,
|
| 56 |
+
output_file: str,
|
| 57 |
+
chunk_size: int = 1000,
|
| 58 |
+
chunk_overlap: int = 150,
|
| 59 |
+
text_output_dir: Optional[str] = None # MODIFIED: Added optional parameter
|
| 60 |
+
) -> None:
|
| 61 |
+
"""
|
| 62 |
+
Scans a directory for source files, extracts text, splits it into chunks,
|
| 63 |
+
and saves the chunks to a single JSON file.
|
| 64 |
+
Optionally saves the raw extracted text to a specified directory.
|
| 65 |
+
"""
|
| 66 |
+
if not os.path.isdir(sources_dir):
|
| 67 |
+
logger.error(f"Source directory not found: '{sources_dir}'")
|
| 68 |
+
raise FileNotFoundError(f"Source directory not found: '{sources_dir}'")
|
| 69 |
+
|
| 70 |
+
logger.info(f"Starting chunking process. Sources: '{sources_dir}', Output: '{output_file}'")
|
| 71 |
+
|
| 72 |
+
# MODIFIED: Create text output directory if provided
|
| 73 |
+
if text_output_dir:
|
| 74 |
+
os.makedirs(text_output_dir, exist_ok=True)
|
| 75 |
+
logger.info(f"Will save raw extracted text to: '{text_output_dir}'")
|
| 76 |
+
|
| 77 |
+
all_chunks_for_json: List[Dict] = []
|
| 78 |
+
processed_files_count = 0
|
| 79 |
+
|
| 80 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
| 81 |
+
|
| 82 |
+
for filename in os.listdir(sources_dir):
|
| 83 |
+
file_path = os.path.join(sources_dir, filename)
|
| 84 |
+
if not os.path.isfile(file_path):
|
| 85 |
+
continue
|
| 86 |
+
|
| 87 |
+
file_ext = filename.split('.')[-1].lower()
|
| 88 |
+
if file_ext not in SUPPORTED_EXTENSIONS:
|
| 89 |
+
logger.debug(f"Skipping unsupported file: {filename}")
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
logger.info(f"Processing source file: {filename}")
|
| 93 |
+
text_content = SUPPORTED_EXTENSIONS[file_ext](file_path)
|
| 94 |
+
|
| 95 |
+
if text_content:
|
| 96 |
+
# MODIFIED: Save the raw text to a file if directory is specified
|
| 97 |
+
if text_output_dir:
|
| 98 |
+
try:
|
| 99 |
+
text_output_path = os.path.join(text_output_dir, f"{filename}.txt")
|
| 100 |
+
with open(text_output_path, 'w', encoding='utf-8') as f_text:
|
| 101 |
+
f_text.write(text_content)
|
| 102 |
+
logger.info(f"Saved extracted text for '{filename}' to '{text_output_path}'")
|
| 103 |
+
except Exception as e_text_save:
|
| 104 |
+
logger.error(f"Could not save extracted text for '{filename}': {e_text_save}")
|
| 105 |
+
|
| 106 |
+
chunks = text_splitter.split_text(text_content)
|
| 107 |
+
if not chunks:
|
| 108 |
+
logger.warning(f"No chunks generated from {filename}. Skipping.")
|
| 109 |
+
continue
|
| 110 |
+
|
| 111 |
+
for i, chunk_text in enumerate(chunks):
|
| 112 |
+
chunk_data = {
|
| 113 |
+
"page_content": chunk_text,
|
| 114 |
+
"metadata": {
|
| 115 |
+
"source_document_name": filename,
|
| 116 |
+
"chunk_index": i,
|
| 117 |
+
"full_location": f"{filename}, Chunk {i+1}"
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
all_chunks_for_json.append(chunk_data)
|
| 121 |
+
|
| 122 |
+
processed_files_count += 1
|
| 123 |
+
else:
|
| 124 |
+
logger.warning(f"Could not extract text from {filename}. Skipping.")
|
| 125 |
+
|
| 126 |
+
if not all_chunks_for_json:
|
| 127 |
+
logger.warning(f"No processable documents found or no text extracted in '{sources_dir}'. JSON file will be empty.")
|
| 128 |
+
|
| 129 |
+
output_dir = os.path.dirname(output_file)
|
| 130 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 131 |
+
|
| 132 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 133 |
+
json.dump(all_chunks_for_json, f, indent=2)
|
| 134 |
+
|
| 135 |
+
logger.info(f"Chunking complete. Processed {processed_files_count} files.")
|
| 136 |
+
logger.info(f"Created a total of {len(all_chunks_for_json)} chunks.")
|
| 137 |
+
logger.info(f"Chunked JSON output saved to: {output_file}")
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def main():
|
| 141 |
+
parser = argparse.ArgumentParser(description="Process source documents into a JSON file of text chunks for RAG.")
|
| 142 |
+
parser.add_argument(
|
| 143 |
+
'--sources-dir',
|
| 144 |
+
type=str,
|
| 145 |
+
required=True,
|
| 146 |
+
help="The directory containing source files (PDFs, DOCX, TXT)."
|
| 147 |
+
)
|
| 148 |
+
parser.add_argument(
|
| 149 |
+
'--output-file',
|
| 150 |
+
type=str,
|
| 151 |
+
required=True,
|
| 152 |
+
help="The full path for the output JSON file containing the chunks."
|
| 153 |
+
)
|
| 154 |
+
# MODIFIED: Added new optional argument
|
| 155 |
+
parser.add_argument(
|
| 156 |
+
'--text-output-dir',
|
| 157 |
+
type=str,
|
| 158 |
+
default=None,
|
| 159 |
+
help="Optional: The directory to save raw extracted text files for debugging."
|
| 160 |
+
)
|
| 161 |
+
parser.add_argument(
|
| 162 |
+
'--chunk-size',
|
| 163 |
+
type=int,
|
| 164 |
+
default=1000,
|
| 165 |
+
help="The character size for each text chunk."
|
| 166 |
+
)
|
| 167 |
+
parser.add_argument(
|
| 168 |
+
'--chunk-overlap',
|
| 169 |
+
type=int,
|
| 170 |
+
default=150,
|
| 171 |
+
help="The character overlap between consecutive chunks."
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
args = parser.parse_args()
|
| 175 |
+
|
| 176 |
+
try:
|
| 177 |
+
process_sources_and_create_chunks(
|
| 178 |
+
sources_dir=args.sources_dir,
|
| 179 |
+
output_file=args.output_file,
|
| 180 |
+
chunk_size=args.chunk_size,
|
| 181 |
+
chunk_overlap=args.chunk_overlap,
|
| 182 |
+
text_output_dir=args.text_output_dir # MODIFIED: Pass argument
|
| 183 |
+
)
|
| 184 |
+
except Exception as e:
|
| 185 |
+
logger.critical(f"A critical error occurred during the chunking process: {e}", exc_info=True)
|
| 186 |
+
exit(1)
|
| 187 |
+
|
| 188 |
+
if __name__ == "__main__":
|
| 189 |
+
main()
|
llm_handling.py
ADDED
|
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
import json
|
| 4 |
+
from typing import List, Dict, Tuple, Optional, Any, Iterator
|
| 5 |
+
import shutil
|
| 6 |
+
import re
|
| 7 |
+
import time
|
| 8 |
+
import requests
|
| 9 |
+
import zipfile
|
| 10 |
+
import tempfile
|
| 11 |
+
import gdown
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from sentence_transformers import SentenceTransformer
|
| 15 |
+
from pypdf import PdfReader
|
| 16 |
+
import docx as python_docx
|
| 17 |
+
|
| 18 |
+
from llama_index.core.llms import ChatMessage
|
| 19 |
+
from llama_index.llms.groq import Groq as LlamaIndexGroqClient
|
| 20 |
+
|
| 21 |
+
from langchain_groq import ChatGroq
|
| 22 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 23 |
+
from langchain_community.vectorstores import FAISS
|
| 24 |
+
from langchain.prompts import ChatPromptTemplate
|
| 25 |
+
from langchain.schema import Document, BaseRetriever
|
| 26 |
+
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
|
| 27 |
+
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
|
| 28 |
+
from langchain.schema.output_parser import StrOutputParser
|
| 29 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 30 |
+
# MODIFIED: Import the new prompt
|
| 31 |
+
from system_prompts import RAG_SYSTEM_PROMPT, FALLBACK_SYSTEM_PROMPT, QA_FORMATTER_PROMPT
|
| 32 |
+
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
if not logger.handlers:
|
| 35 |
+
logging.basicConfig(
|
| 36 |
+
level=logging.INFO,
|
| 37 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
# --- Configuration Constants ---
|
| 41 |
+
GROQ_API_KEY = os.getenv('BOT_API_KEY')
|
| 42 |
+
if not GROQ_API_KEY:
|
| 43 |
+
logger.critical("CRITICAL: BOT_API_KEY environment variable not found. Services will fail.")
|
| 44 |
+
|
| 45 |
+
FALLBACK_LLM_MODEL_NAME = os.getenv("GROQ_FALLBACK_MODEL", "llama-3.1-70b-versatile")
|
| 46 |
+
# ADDED: New constant for the auxiliary model
|
| 47 |
+
AUXILIARY_LLM_MODEL_NAME = os.getenv("GROQ_AUXILIARY_MODEL", "llama3-8b-8192")
|
| 48 |
+
_MODULE_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 49 |
+
RAG_FAISS_INDEX_SUBDIR_NAME = "faiss_index"
|
| 50 |
+
RAG_STORAGE_PARENT_DIR = os.getenv("RAG_STORAGE_DIR", os.path.join(_MODULE_BASE_DIR, "faiss_storage"))
|
| 51 |
+
RAG_SOURCES_DIR = os.getenv("SOURCES_DIR", os.path.join(_MODULE_BASE_DIR, "sources"))
|
| 52 |
+
RAG_CHUNKED_SOURCES_FILENAME = "pre_chunked_sources.json"
|
| 53 |
+
os.makedirs(RAG_SOURCES_DIR, exist_ok=True)
|
| 54 |
+
os.makedirs(RAG_STORAGE_PARENT_DIR, exist_ok=True)
|
| 55 |
+
RAG_EMBEDDING_MODEL_NAME = os.getenv("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2")
|
| 56 |
+
RAG_EMBEDDING_USE_GPU = os.getenv("RAG_EMBEDDING_GPU", "False").lower() == "true"
|
| 57 |
+
RAG_LLM_MODEL_NAME = os.getenv("RAG_LLM_MODEL", "llama-3.1-70b-versatile")
|
| 58 |
+
RAG_LLM_TEMPERATURE = float(os.getenv("RAG_TEMPERATURE", 0.0))
|
| 59 |
+
RAG_LOAD_INDEX_ON_STARTUP = os.getenv("RAG_LOAD_INDEX", "True").lower() == "true"
|
| 60 |
+
RAG_DEFAULT_RETRIEVER_K = int(os.getenv("RAG_RETRIEVER_K", 3))
|
| 61 |
+
GDRIVE_SOURCES_ENABLED = os.getenv("GDRIVE_SOURCES_ENABLED", "False").lower() == "true"
|
| 62 |
+
GDRIVE_FOLDER_ID_OR_URL = os.getenv("GDRIVE_FOLDER_URL")
|
| 63 |
+
|
| 64 |
+
# --- Text Extraction Helper Function ---
|
| 65 |
+
def extract_text_from_file(file_path: str, file_type: str) -> Optional[str]:
|
| 66 |
+
logger.info(f"Extracting text from {file_type.upper()} file: {os.path.basename(file_path)}")
|
| 67 |
+
try:
|
| 68 |
+
if file_type == 'pdf':
|
| 69 |
+
reader = PdfReader(file_path)
|
| 70 |
+
return "".join(page.extract_text() + "\n" for page in reader.pages if page.extract_text())
|
| 71 |
+
elif file_type == 'docx':
|
| 72 |
+
doc = python_docx.Document(file_path)
|
| 73 |
+
return "\n".join(para.text for para in doc.paragraphs if para.text)
|
| 74 |
+
elif file_type == 'txt':
|
| 75 |
+
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
| 76 |
+
return f.read()
|
| 77 |
+
logger.warning(f"Unsupported file type for text extraction: {file_type}")
|
| 78 |
+
return None
|
| 79 |
+
except Exception as e:
|
| 80 |
+
logger.error(f"Error extracting text from {os.path.basename(file_path)}: {e}", exc_info=True)
|
| 81 |
+
return None
|
| 82 |
+
|
| 83 |
+
FAISS_RAG_SUPPORTED_EXTENSIONS = {'pdf': 'pdf', 'docx': 'docx', 'txt': 'txt'}
|
| 84 |
+
|
| 85 |
+
# --- FAISS RAG System ---
|
| 86 |
+
class FAISSRetrieverWithScore(BaseRetriever):
|
| 87 |
+
vectorstore: FAISS
|
| 88 |
+
k: int = RAG_DEFAULT_RETRIEVER_K
|
| 89 |
+
|
| 90 |
+
def _get_relevant_documents(
|
| 91 |
+
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
|
| 92 |
+
) -> List[Document]:
|
| 93 |
+
docs_and_scores = self.vectorstore.similarity_search_with_score(query, k=self.k)
|
| 94 |
+
relevant_docs = []
|
| 95 |
+
for doc, score in docs_and_scores:
|
| 96 |
+
doc.metadata["retrieval_score"] = float(score)
|
| 97 |
+
relevant_docs.append(doc)
|
| 98 |
+
return relevant_docs
|
| 99 |
+
|
| 100 |
+
class KnowledgeRAG:
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
index_storage_dir: str,
|
| 104 |
+
embedding_model_name: str,
|
| 105 |
+
groq_model_name_for_rag: str,
|
| 106 |
+
use_gpu_for_embeddings: bool,
|
| 107 |
+
groq_api_key_for_rag: str,
|
| 108 |
+
temperature: float,
|
| 109 |
+
):
|
| 110 |
+
self.logger = logging.getLogger(__name__ + ".KnowledgeRAG")
|
| 111 |
+
self.index_storage_dir = index_storage_dir
|
| 112 |
+
self.embedding_model_name = embedding_model_name
|
| 113 |
+
self.groq_model_name = groq_model_name_for_rag
|
| 114 |
+
self.temperature = temperature
|
| 115 |
+
|
| 116 |
+
device = "cuda" if use_gpu_for_embeddings and torch.cuda.is_available() else "cpu"
|
| 117 |
+
self.logger.info(f"Initializing Hugging Face embedding model: {self.embedding_model_name} on device: {device}")
|
| 118 |
+
try:
|
| 119 |
+
self.embeddings = HuggingFaceEmbeddings(
|
| 120 |
+
model_name=self.embedding_model_name,
|
| 121 |
+
model_kwargs={"device": device},
|
| 122 |
+
encode_kwargs={"normalize_embeddings": True}
|
| 123 |
+
)
|
| 124 |
+
except Exception as e:
|
| 125 |
+
self.logger.critical(f"Failed to load embedding model: {e}", exc_info=True)
|
| 126 |
+
raise
|
| 127 |
+
|
| 128 |
+
self.logger.info(f"Initializing Langchain ChatGroq LLM for RAG: {self.groq_model_name}")
|
| 129 |
+
if not groq_api_key_for_rag:
|
| 130 |
+
raise ValueError("Groq API Key for RAG is missing.")
|
| 131 |
+
try:
|
| 132 |
+
self.llm = ChatGroq(
|
| 133 |
+
temperature=self.temperature,
|
| 134 |
+
groq_api_key=groq_api_key_for_rag,
|
| 135 |
+
model_name=self.groq_model_name
|
| 136 |
+
)
|
| 137 |
+
except Exception as e:
|
| 138 |
+
self.logger.critical(f"Failed to initialize Langchain ChatGroq LLM: {e}", exc_info=True)
|
| 139 |
+
raise
|
| 140 |
+
|
| 141 |
+
self.vector_store: Optional[FAISS] = None
|
| 142 |
+
self.retriever: Optional[FAISSRetrieverWithScore] = None
|
| 143 |
+
self.rag_chain = None
|
| 144 |
+
self.processed_source_files: List[str] = []
|
| 145 |
+
|
| 146 |
+
def build_index_from_source_files(self, source_folder_path: str, k: int = RAG_DEFAULT_RETRIEVER_K):
|
| 147 |
+
all_docs_for_vectorstore: List[Document] = []
|
| 148 |
+
processed_files_this_build: List[str] = []
|
| 149 |
+
pre_chunked_json_path = os.path.join(self.index_storage_dir, RAG_CHUNKED_SOURCES_FILENAME)
|
| 150 |
+
|
| 151 |
+
if os.path.exists(pre_chunked_json_path):
|
| 152 |
+
self.logger.info(f"Loading documents from pre-chunked file: {pre_chunked_json_path}")
|
| 153 |
+
try:
|
| 154 |
+
with open(pre_chunked_json_path, 'r', encoding='utf-8') as f:
|
| 155 |
+
chunk_data_list = json.load(f)
|
| 156 |
+
source_filenames = set()
|
| 157 |
+
for chunk_data in chunk_data_list:
|
| 158 |
+
doc = Document(page_content=chunk_data.get("page_content", ""), metadata=chunk_data.get("metadata", {}))
|
| 159 |
+
all_docs_for_vectorstore.append(doc)
|
| 160 |
+
if 'source_document_name' in doc.metadata:
|
| 161 |
+
source_filenames.add(doc.metadata['source_document_name'])
|
| 162 |
+
processed_files_this_build = sorted(list(source_filenames))
|
| 163 |
+
except Exception as e:
|
| 164 |
+
self.logger.error(f"Error processing pre-chunked JSON, falling back to raw files: {e}")
|
| 165 |
+
all_docs_for_vectorstore.clear()
|
| 166 |
+
|
| 167 |
+
if not all_docs_for_vectorstore:
|
| 168 |
+
self.logger.info(f"Processing raw files from '{source_folder_path}' to build index.")
|
| 169 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
|
| 170 |
+
for filename in os.listdir(source_folder_path):
|
| 171 |
+
file_path = os.path.join(source_folder_path, filename)
|
| 172 |
+
file_ext = filename.split('.')[-1].lower()
|
| 173 |
+
if os.path.isfile(file_path) and file_ext in FAISS_RAG_SUPPORTED_EXTENSIONS:
|
| 174 |
+
text_content = extract_text_from_file(file_path, file_ext)
|
| 175 |
+
if text_content:
|
| 176 |
+
chunks = text_splitter.split_text(text_content)
|
| 177 |
+
for i, chunk_text in enumerate(chunks):
|
| 178 |
+
metadata = {"source_document_name": filename, "chunk_index": i}
|
| 179 |
+
all_docs_for_vectorstore.append(Document(page_content=chunk_text, metadata=metadata))
|
| 180 |
+
processed_files_this_build.append(filename)
|
| 181 |
+
|
| 182 |
+
if not all_docs_for_vectorstore:
|
| 183 |
+
self.logger.warning(f"No processable PDF/DOCX/TXT documents found in '{source_folder_path}'. RAG index will only contain other sources if available.")
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
self.processed_source_files = processed_files_this_build
|
| 187 |
+
|
| 188 |
+
# This print statement is kept for console visibility on startup/rebuild
|
| 189 |
+
print("\n--- Document Files Used for RAG Index ---")
|
| 190 |
+
if self.processed_source_files:
|
| 191 |
+
for filename in self.processed_source_files:
|
| 192 |
+
print(f"- {filename}")
|
| 193 |
+
else:
|
| 194 |
+
print("No PDF/DOCX/TXT source files were processed for the RAG index.")
|
| 195 |
+
print("---------------------------------------\n")
|
| 196 |
+
|
| 197 |
+
if not all_docs_for_vectorstore:
|
| 198 |
+
self.logger.warning("No documents to build FAISS index from. Skipping FAISS build.")
|
| 199 |
+
return
|
| 200 |
+
|
| 201 |
+
self.logger.info(f"Creating FAISS index from {len(all_docs_for_vectorstore)} document chunks...")
|
| 202 |
+
self.vector_store = FAISS.from_documents(all_docs_for_vectorstore, self.embeddings)
|
| 203 |
+
faiss_index_path = os.path.join(self.index_storage_dir, RAG_FAISS_INDEX_SUBDIR_NAME)
|
| 204 |
+
self.vector_store.save_local(faiss_index_path)
|
| 205 |
+
self.logger.info(f"FAISS index built and saved to '{faiss_index_path}'.")
|
| 206 |
+
self.retriever = FAISSRetrieverWithScore(vectorstore=self.vector_store, k=k)
|
| 207 |
+
self.setup_rag_chain()
|
| 208 |
+
|
| 209 |
+
def load_index_from_disk(self, k: int = RAG_DEFAULT_RETRIEVER_K):
|
| 210 |
+
faiss_index_path = os.path.join(self.index_storage_dir, RAG_FAISS_INDEX_SUBDIR_NAME)
|
| 211 |
+
if not os.path.isdir(faiss_index_path):
|
| 212 |
+
raise FileNotFoundError(f"FAISS index directory not found at '{faiss_index_path}'.")
|
| 213 |
+
|
| 214 |
+
self.logger.info(f"Loading FAISS index from: {faiss_index_path}")
|
| 215 |
+
self.vector_store = FAISS.load_local(
|
| 216 |
+
folder_path=faiss_index_path,
|
| 217 |
+
embeddings=self.embeddings,
|
| 218 |
+
allow_dangerous_deserialization=True
|
| 219 |
+
)
|
| 220 |
+
self.retriever = FAISSRetrieverWithScore(vectorstore=self.vector_store, k=k)
|
| 221 |
+
self.setup_rag_chain()
|
| 222 |
+
|
| 223 |
+
def format_docs(self, docs: List[Document]) -> str:
|
| 224 |
+
return "\n\n---\n\n".join([f"[Excerpt from {doc.metadata.get('source_document_name', 'N/A')}, Chunk {doc.metadata.get('chunk_index', 'N/A')}]\nContent:\n{doc.page_content}" for doc in docs])
|
| 225 |
+
|
| 226 |
+
def setup_rag_chain(self):
|
| 227 |
+
if not self.retriever or not self.llm:
|
| 228 |
+
raise RuntimeError("Retriever and LLM must be initialized.")
|
| 229 |
+
|
| 230 |
+
prompt = ChatPromptTemplate.from_template(RAG_SYSTEM_PROMPT)
|
| 231 |
+
|
| 232 |
+
self.rag_chain = (
|
| 233 |
+
RunnableParallel(
|
| 234 |
+
context=(self.retriever | self.format_docs),
|
| 235 |
+
question=RunnablePassthrough()
|
| 236 |
+
)
|
| 237 |
+
| prompt
|
| 238 |
+
| self.llm
|
| 239 |
+
| StrOutputParser()
|
| 240 |
+
)
|
| 241 |
+
self.logger.info("RAG LCEL chain set up successfully.")
|
| 242 |
+
|
| 243 |
+
def invoke(self, query: str, top_k: Optional[int] = None) -> Dict[str, Any]:
|
| 244 |
+
if not self.rag_chain:
|
| 245 |
+
# MODIFIED: Changed severity
|
| 246 |
+
self.logger.warning("RAG system not fully initialized. Cannot invoke.")
|
| 247 |
+
return {"answer": "The provided bibliography does not contain specific information on this topic.", "source": "system_error", "cited_source_details": []}
|
| 248 |
+
|
| 249 |
+
if not query or not query.strip():
|
| 250 |
+
return {"answer": "Please provide a valid question.", "source": "system_error", "cited_source_details": []}
|
| 251 |
+
|
| 252 |
+
k_to_use = top_k if top_k is not None and top_k > 0 else self.retriever.k
|
| 253 |
+
self.logger.info(f"Processing RAG query with k={k_to_use}: '{query[:100]}...'")
|
| 254 |
+
|
| 255 |
+
original_k = self.retriever.k
|
| 256 |
+
if k_to_use != original_k:
|
| 257 |
+
self.retriever.k = k_to_use
|
| 258 |
+
|
| 259 |
+
try:
|
| 260 |
+
retrieved_docs = self.retriever.get_relevant_documents(query)
|
| 261 |
+
if not retrieved_docs:
|
| 262 |
+
return {"answer": "The provided bibliography does not contain specific information on this topic.", "source": "no_docs_found", "cited_source_details": []}
|
| 263 |
+
|
| 264 |
+
context_str = self.format_docs(retrieved_docs)
|
| 265 |
+
|
| 266 |
+
# MODIFIED: Added full logging as per user request
|
| 267 |
+
print(f"\n--- RAG INVOKE ---")
|
| 268 |
+
print(f"QUESTION: {query}")
|
| 269 |
+
print(f"CONTEXT:\n{context_str}")
|
| 270 |
+
|
| 271 |
+
llm_answer = self.rag_chain.invoke(query, config={"context": context_str})
|
| 272 |
+
|
| 273 |
+
print(f"LLM_ANSWER: {llm_answer}")
|
| 274 |
+
print(f"--------------------\n")
|
| 275 |
+
|
| 276 |
+
structured_sources = [{
|
| 277 |
+
"source_document_name": doc.metadata.get('source_document_name', 'Unknown'),
|
| 278 |
+
"chunk_index": doc.metadata.get('chunk_index', 'N/A'),
|
| 279 |
+
"retrieval_score": doc.metadata.get("retrieval_score"),
|
| 280 |
+
} for doc in retrieved_docs]
|
| 281 |
+
|
| 282 |
+
if "the provided bibliography does not contain specific information" in llm_answer.lower():
|
| 283 |
+
final_answer = llm_answer
|
| 284 |
+
source_tag = "no_answer_in_bibliography"
|
| 285 |
+
else:
|
| 286 |
+
final_answer = f"{llm_answer}\n\n*Source: Bibliography-Based*"
|
| 287 |
+
source_tag = "bibliography_based"
|
| 288 |
+
|
| 289 |
+
return {
|
| 290 |
+
"query": query,
|
| 291 |
+
"answer": final_answer.strip(),
|
| 292 |
+
"source": source_tag,
|
| 293 |
+
"cited_source_details": structured_sources,
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
except Exception as e:
|
| 297 |
+
self.logger.error(f"Error during RAG query processing: {e}", exc_info=True)
|
| 298 |
+
return {"answer": "An error occurred while processing your request.", "source": "system_error", "cited_source_details": []}
|
| 299 |
+
finally:
|
| 300 |
+
if k_to_use != original_k:
|
| 301 |
+
self.retriever.k = original_k
|
| 302 |
+
|
| 303 |
+
def stream(self, query: str, top_k: Optional[int] = None) -> Iterator[str]:
|
| 304 |
+
if not self.rag_chain:
|
| 305 |
+
self.logger.error("RAG system not fully initialized for streaming.")
|
| 306 |
+
yield "Error: RAG system is not ready."
|
| 307 |
+
return
|
| 308 |
+
|
| 309 |
+
k_to_use = top_k if top_k is not None and top_k > 0 else self.retriever.k
|
| 310 |
+
self.logger.info(f"Processing RAG stream with k={k_to_use}: '{query[:100]}...'")
|
| 311 |
+
|
| 312 |
+
original_k = self.retriever.k
|
| 313 |
+
if k_to_use != original_k:
|
| 314 |
+
self.retriever.k = k_to_use
|
| 315 |
+
|
| 316 |
+
try:
|
| 317 |
+
# Check for docs first to avoid streaming "no info" message
|
| 318 |
+
retrieved_docs = self.retriever.get_relevant_documents(query)
|
| 319 |
+
if not retrieved_docs:
|
| 320 |
+
yield "The provided bibliography does not contain specific information on this topic."
|
| 321 |
+
return
|
| 322 |
+
|
| 323 |
+
# MODIFIED: Added full logging for streaming as per user request
|
| 324 |
+
context_str = self.format_docs(retrieved_docs)
|
| 325 |
+
print(f"\n--- RAG STREAM ---")
|
| 326 |
+
print(f"QUESTION: {query}")
|
| 327 |
+
print(f"CONTEXT:\n{context_str}")
|
| 328 |
+
print(f"STREAMING LLM_ANSWER...")
|
| 329 |
+
print(f"--------------------\n")
|
| 330 |
+
|
| 331 |
+
yield from self.rag_chain.stream(query, config={"context": context_str})
|
| 332 |
+
except Exception as e:
|
| 333 |
+
self.logger.error(f"Error during RAG stream processing: {e}", exc_info=True)
|
| 334 |
+
yield "An error occurred while processing your request."
|
| 335 |
+
finally:
|
| 336 |
+
if k_to_use != original_k:
|
| 337 |
+
self.retriever.k = original_k
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
# --- Groq Fallback Bot ---
|
| 341 |
+
class GroqBot:
|
| 342 |
+
def __init__(self):
|
| 343 |
+
self.logger = logging.getLogger(__name__ + ".GroqBot")
|
| 344 |
+
if not GROQ_API_KEY:
|
| 345 |
+
self.client = None
|
| 346 |
+
self.logger.critical("GroqBot not initialized: BOT_API_KEY is missing.")
|
| 347 |
+
return
|
| 348 |
+
try:
|
| 349 |
+
self.client = LlamaIndexGroqClient(model=FALLBACK_LLM_MODEL_NAME, api_key=GROQ_API_KEY)
|
| 350 |
+
self.system_prompt = FALLBACK_SYSTEM_PROMPT
|
| 351 |
+
except Exception as e:
|
| 352 |
+
self.logger.error(f"Failed to initialize LlamaIndexGroqClient for Fallback Bot: {e}", exc_info=True)
|
| 353 |
+
self.client = None
|
| 354 |
+
|
| 355 |
+
def stream_response(self, context: dict) -> Iterator[str]:
|
| 356 |
+
if not self.client:
|
| 357 |
+
yield "The system is currently unable to process this request."
|
| 358 |
+
return
|
| 359 |
+
|
| 360 |
+
current_query = context.get('current_query', '')
|
| 361 |
+
chat_history = context.get('chat_history', [])
|
| 362 |
+
qa_info = context.get('qa_related_info', '')
|
| 363 |
+
|
| 364 |
+
messages = [ChatMessage(role="system", content=self.system_prompt)]
|
| 365 |
+
if chat_history:
|
| 366 |
+
messages.extend([ChatMessage(**msg) for msg in chat_history])
|
| 367 |
+
if qa_info:
|
| 368 |
+
messages.append(ChatMessage(role="system", content=f"**Potentially Relevant Q&A Information from other sources:**\n{qa_info}"))
|
| 369 |
+
messages.append(ChatMessage(role="user", content=f"**Current User Query:**\n{current_query}"))
|
| 370 |
+
|
| 371 |
+
# MODIFIED: Added full logging as per user request
|
| 372 |
+
# The conversion to dict is necessary because ChatMessage is not directly JSON serializable
|
| 373 |
+
messages_for_print = [msg.dict() for msg in messages]
|
| 374 |
+
print(f"\n--- FALLBACK STREAM ---")
|
| 375 |
+
print(f"MESSAGES SENT TO LLM:\n{json.dumps(messages_for_print, indent=2)}")
|
| 376 |
+
print(f"STREAMING LLM_ANSWER...")
|
| 377 |
+
print(f"-----------------------\n")
|
| 378 |
+
|
| 379 |
+
try:
|
| 380 |
+
response_stream = self.client.stream_chat(messages)
|
| 381 |
+
for r_chunk in response_stream:
|
| 382 |
+
yield r_chunk.delta
|
| 383 |
+
except Exception as e:
|
| 384 |
+
self.logger.error(f"Groq API error in get_response (Fallback): {e}", exc_info=True)
|
| 385 |
+
yield "I am currently unable to process this request due to a technical issue."
|
| 386 |
+
|
| 387 |
+
# ADDED: New function for formatting QA answers
|
| 388 |
+
def get_answer_from_context(question: str, context: str, system_prompt: str) -> str:
|
| 389 |
+
"""
|
| 390 |
+
Calls the LLM with a specific question and context from a QA source (CSV/XLSX).
|
| 391 |
+
"""
|
| 392 |
+
logger.info(f"Formatting answer for question '{question[:50]}...' using QA context.")
|
| 393 |
+
try:
|
| 394 |
+
# Use the auxiliary model for this task for speed and cost-efficiency
|
| 395 |
+
formatter_llm = ChatGroq(
|
| 396 |
+
temperature=0.1,
|
| 397 |
+
groq_api_key=GROQ_API_KEY,
|
| 398 |
+
model_name=AUXILIARY_LLM_MODEL_NAME
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
prompt_template = ChatPromptTemplate.from_template(system_prompt)
|
| 402 |
+
|
| 403 |
+
chain = prompt_template | formatter_llm | StrOutputParser()
|
| 404 |
+
|
| 405 |
+
# MODIFIED: Added full logging as per user request
|
| 406 |
+
print(f"\n--- QA FORMATTER ---")
|
| 407 |
+
print(f"QUESTION: {question}")
|
| 408 |
+
print(f"CONTEXT:\n{context}")
|
| 409 |
+
|
| 410 |
+
response = chain.invoke({
|
| 411 |
+
"context": context,
|
| 412 |
+
"question": question
|
| 413 |
+
})
|
| 414 |
+
|
| 415 |
+
print(f"LLM_ANSWER: {response}")
|
| 416 |
+
print(f"--------------------\n")
|
| 417 |
+
|
| 418 |
+
return response.strip()
|
| 419 |
+
|
| 420 |
+
except Exception as e:
|
| 421 |
+
logger.error(f"Error in get_answer_from_context: {e}", exc_info=True)
|
| 422 |
+
return "Sorry, I was unable to formulate an answer based on the available information."
|
| 423 |
+
|
| 424 |
+
# ADDED: New function for streaming QA answers
|
| 425 |
+
def stream_answer_from_context(question: str, context: str, system_prompt: str) -> Iterator[str]:
|
| 426 |
+
"""
|
| 427 |
+
Calls the LLM with a specific question and context from a QA source and streams the response.
|
| 428 |
+
"""
|
| 429 |
+
logger.info(f"Streaming formatted answer for question '{question[:50]}...' using QA context.")
|
| 430 |
+
try:
|
| 431 |
+
# Use the auxiliary model for this task for speed and cost-efficiency
|
| 432 |
+
formatter_llm = ChatGroq(
|
| 433 |
+
temperature=0.1,
|
| 434 |
+
groq_api_key=GROQ_API_KEY,
|
| 435 |
+
model_name=AUXILIARY_LLM_MODEL_NAME
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
prompt_template = ChatPromptTemplate.from_template(system_prompt)
|
| 439 |
+
|
| 440 |
+
chain = prompt_template | formatter_llm | StrOutputParser()
|
| 441 |
+
|
| 442 |
+
# MODIFIED: Added full logging as per user request
|
| 443 |
+
print(f"\n--- QA FORMATTER (STREAM) ---")
|
| 444 |
+
print(f"QUESTION: {question}")
|
| 445 |
+
print(f"CONTEXT:\n{context}")
|
| 446 |
+
print(f"STREAMING LLM_ANSWER...")
|
| 447 |
+
print(f"---------------------------\n")
|
| 448 |
+
|
| 449 |
+
yield from chain.stream({
|
| 450 |
+
"context": context,
|
| 451 |
+
"question": question
|
| 452 |
+
})
|
| 453 |
+
|
| 454 |
+
except Exception as e:
|
| 455 |
+
logger.error(f"Error in stream_answer_from_context: {e}", exc_info=True)
|
| 456 |
+
yield "Sorry, I was unable to formulate an answer based on the available information."
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
# --- Initialization and Interface Functions ---
|
| 460 |
+
def get_id_from_gdrive_input(url_or_id: str) -> Optional[str]:
|
| 461 |
+
if not url_or_id: return None
|
| 462 |
+
patterns = [r"/folders/([a-zA-Z0-9_-]+)", r"/d/([a-zA-Z0-9_-]+)", r"id=([a-zA-Z0-9_-]+)"]
|
| 463 |
+
for pattern in patterns:
|
| 464 |
+
match = re.search(pattern, url_or_id)
|
| 465 |
+
if match: return match.group(1)
|
| 466 |
+
if "/" not in url_or_id and "=" not in url_or_id and len(url_or_id) > 15:
|
| 467 |
+
return url_or_id
|
| 468 |
+
return None
|
| 469 |
+
|
| 470 |
+
def download_and_unzip_gdrive_folder(folder_id_or_url: str, target_dir: str) -> bool:
|
| 471 |
+
folder_id = get_id_from_gdrive_input(folder_id_or_url)
|
| 472 |
+
if not folder_id:
|
| 473 |
+
logger.error(f"Invalid Google Drive Folder ID or URL: {folder_id_or_url}")
|
| 474 |
+
return False
|
| 475 |
+
|
| 476 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 477 |
+
try:
|
| 478 |
+
logger.info(f"Attempting to download GDrive folder ID: {folder_id}")
|
| 479 |
+
download_path = gdown.download_folder(id=folder_id, output=temp_dir, quiet=False, use_cookies=False)
|
| 480 |
+
if not download_path or not os.listdir(temp_dir):
|
| 481 |
+
logger.error("gdown failed to download or extract the folder.")
|
| 482 |
+
return False
|
| 483 |
+
|
| 484 |
+
source_content_root = temp_dir
|
| 485 |
+
items_in_temp = os.listdir(temp_dir)
|
| 486 |
+
if len(items_in_temp) == 1 and os.path.isdir(os.path.join(temp_dir, items_in_temp[0])):
|
| 487 |
+
source_content_root = os.path.join(temp_dir, items_in_temp[0])
|
| 488 |
+
|
| 489 |
+
logger.info(f"Moving contents from {source_content_root} to {target_dir}")
|
| 490 |
+
if os.path.exists(target_dir):
|
| 491 |
+
shutil.rmtree(target_dir)
|
| 492 |
+
shutil.copytree(source_content_root, target_dir)
|
| 493 |
+
logger.info(f"Successfully moved GDrive contents to {target_dir}")
|
| 494 |
+
return True
|
| 495 |
+
except Exception as e:
|
| 496 |
+
# MODIFIED: Corrected self.logger to logger
|
| 497 |
+
logger.error(f"Error during GDrive download/processing: {e}", exc_info=True)
|
| 498 |
+
return False
|
| 499 |
+
|
| 500 |
+
def initialize_and_get_rag_system(force_rebuild: bool = False) -> Optional[KnowledgeRAG]:
|
| 501 |
+
if not GROQ_API_KEY:
|
| 502 |
+
logger.error("RAG system cannot be initialized without BOT_API_KEY.")
|
| 503 |
+
return None
|
| 504 |
+
|
| 505 |
+
if GDRIVE_SOURCES_ENABLED and GDRIVE_FOLDER_ID_OR_URL:
|
| 506 |
+
logger.info("Google Drive sources enabled. Downloading...")
|
| 507 |
+
if os.path.isdir(RAG_SOURCES_DIR):
|
| 508 |
+
logger.info(f"Clearing existing RAG sources directory: {RAG_SOURCES_DIR}")
|
| 509 |
+
shutil.rmtree(RAG_SOURCES_DIR)
|
| 510 |
+
os.makedirs(RAG_SOURCES_DIR)
|
| 511 |
+
|
| 512 |
+
download_successful = download_and_unzip_gdrive_folder(GDRIVE_FOLDER_ID_OR_URL, RAG_SOURCES_DIR)
|
| 513 |
+
if not download_successful:
|
| 514 |
+
logger.error("Failed to download sources from Google Drive. Using local files if available.")
|
| 515 |
+
|
| 516 |
+
faiss_index_path = os.path.join(RAG_STORAGE_PARENT_DIR, RAG_FAISS_INDEX_SUBDIR_NAME)
|
| 517 |
+
if force_rebuild and os.path.exists(RAG_STORAGE_PARENT_DIR):
|
| 518 |
+
logger.info(f"Force Rebuild: Deleting existing index storage directory at '{RAG_STORAGE_PARENT_DIR}'")
|
| 519 |
+
shutil.rmtree(RAG_STORAGE_PARENT_DIR)
|
| 520 |
+
os.makedirs(RAG_STORAGE_PARENT_DIR)
|
| 521 |
+
|
| 522 |
+
try:
|
| 523 |
+
rag_instance = KnowledgeRAG(
|
| 524 |
+
index_storage_dir=RAG_STORAGE_PARENT_DIR,
|
| 525 |
+
embedding_model_name=RAG_EMBEDDING_MODEL_NAME,
|
| 526 |
+
groq_model_name_for_rag=RAG_LLM_MODEL_NAME,
|
| 527 |
+
use_gpu_for_embeddings=RAG_EMBEDDING_USE_GPU,
|
| 528 |
+
groq_api_key_for_rag=GROQ_API_KEY,
|
| 529 |
+
temperature=RAG_LLM_TEMPERATURE,
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
should_build = True
|
| 533 |
+
if RAG_LOAD_INDEX_ON_STARTUP and not force_rebuild:
|
| 534 |
+
try:
|
| 535 |
+
rag_instance.load_index_from_disk(k=RAG_DEFAULT_RETRIEVER_K)
|
| 536 |
+
logger.info("RAG index loaded successfully from disk.")
|
| 537 |
+
should_build = False
|
| 538 |
+
except FileNotFoundError:
|
| 539 |
+
logger.warning("Index not found on disk. Will attempt to build.")
|
| 540 |
+
except Exception as e:
|
| 541 |
+
logger.error(f"Error loading index: {e}. Will attempt to rebuild.", exc_info=True)
|
| 542 |
+
|
| 543 |
+
if should_build:
|
| 544 |
+
logger.info("Building new RAG index from source data...")
|
| 545 |
+
rag_instance.build_index_from_source_files(
|
| 546 |
+
source_folder_path=RAG_SOURCES_DIR,
|
| 547 |
+
k=RAG_DEFAULT_RETRIEVER_K
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
return rag_instance
|
| 551 |
+
|
| 552 |
+
except Exception as e:
|
| 553 |
+
logger.critical(f"FATAL: Failed to initialize RAG system: {e}", exc_info=True)
|
| 554 |
+
return None
|
| 555 |
+
|
| 556 |
+
groq_bot_instance = GroqBot()
|
| 557 |
+
|
| 558 |
+
def get_auxiliary_chat_response(messages: List[Dict]) -> str:
|
| 559 |
+
"""
|
| 560 |
+
Handles requests for auxiliary tasks like generating titles or follow-up questions.
|
| 561 |
+
Uses a separate, smaller model for efficiency.
|
| 562 |
+
"""
|
| 563 |
+
logger.info(f"Routing auxiliary request to model: {AUXILIARY_LLM_MODEL_NAME}")
|
| 564 |
+
try:
|
| 565 |
+
# Initialize a dedicated client for this call to use the specific auxiliary model
|
| 566 |
+
aux_client = ChatGroq(
|
| 567 |
+
temperature=0.2, # A bit more creative than RAG, but still grounded
|
| 568 |
+
groq_api_key=GROQ_API_KEY,
|
| 569 |
+
model_name=AUXILIARY_LLM_MODEL_NAME
|
| 570 |
+
)
|
| 571 |
+
response = aux_client.invoke(messages)
|
| 572 |
+
return response.content
|
| 573 |
+
except Exception as e:
|
| 574 |
+
logger.error(f"Error with auxiliary model call: {e}", exc_info=True)
|
| 575 |
+
return "Could not generate suggestions."
|
postman_collection.json
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"info": {
|
| 3 |
+
"_postman_id": "a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6",
|
| 4 |
+
"name": "NOW DentalBot API Collection",
|
| 5 |
+
"description": "A comprehensive Postman collection for interacting with the Dental Assistant Chatbot API, based on the provided Python application files. It includes endpoints for chat, admin, and session management.",
|
| 6 |
+
"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
|
| 7 |
+
},
|
| 8 |
+
"item": [
|
| 9 |
+
{
|
| 10 |
+
"name": "Chat Endpoints",
|
| 11 |
+
"description": "Endpoints for interacting with the chatbot.",
|
| 12 |
+
"item": [
|
| 13 |
+
{
|
| 14 |
+
"name": "/v1/chat/completions (Streaming)",
|
| 15 |
+
"request": {
|
| 16 |
+
"method": "POST",
|
| 17 |
+
"header": [
|
| 18 |
+
{
|
| 19 |
+
"key": "Content-Type",
|
| 20 |
+
"value": "application/json"
|
| 21 |
+
}
|
| 22 |
+
],
|
| 23 |
+
"body": {
|
| 24 |
+
"mode": "raw",
|
| 25 |
+
"raw": "{\n \"model\": \"MedicalAssisstantBot/v1\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"What are the common treatments for gingivitis?\"\n }\n ],\n \"stream\": true,\n \"conversation_id\": \"your-session-id-here\"\n}",
|
| 26 |
+
"options": {
|
| 27 |
+
"raw": {
|
| 28 |
+
"language": "json"
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
"url": {
|
| 33 |
+
"raw": "{{baseUrl}}/v1/chat/completions",
|
| 34 |
+
"host": [
|
| 35 |
+
"{{baseUrl}}"
|
| 36 |
+
],
|
| 37 |
+
"path": [
|
| 38 |
+
"v1",
|
| 39 |
+
"chat",
|
| 40 |
+
"completions"
|
| 41 |
+
]
|
| 42 |
+
},
|
| 43 |
+
"description": "This endpoint provides an OpenAI-compatible interface for chatting with the bot. Setting `\"stream\": true` will return a response as a server-sent event stream. Make sure to provide a `conversation_id` obtained from `/create-session`."
|
| 44 |
+
},
|
| 45 |
+
"response": []
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"name": "/v1/chat/completions (Non-Streaming)",
|
| 49 |
+
"request": {
|
| 50 |
+
"method": "POST",
|
| 51 |
+
"header": [
|
| 52 |
+
{
|
| 53 |
+
"key": "Content-Type",
|
| 54 |
+
"value": "application/json"
|
| 55 |
+
}
|
| 56 |
+
],
|
| 57 |
+
"body": {
|
| 58 |
+
"mode": "raw",
|
| 59 |
+
"raw": "{\n \"model\": \"MedicalAssisstantBot/v1\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Tell me about dental implants.\"\n }\n ],\n \"stream\": false,\n \"conversation_id\": \"your-session-id-here\",\n \"user_id\": \"some-user-123\"\n}",
|
| 60 |
+
"options": {
|
| 61 |
+
"raw": {
|
| 62 |
+
"language": "json"
|
| 63 |
+
}
|
| 64 |
+
}
|
| 65 |
+
},
|
| 66 |
+
"url": {
|
| 67 |
+
"raw": "{{baseUrl}}/v1/chat/completions",
|
| 68 |
+
"host": [
|
| 69 |
+
"{{baseUrl}}"
|
| 70 |
+
],
|
| 71 |
+
"path": [
|
| 72 |
+
"v1",
|
| 73 |
+
"chat",
|
| 74 |
+
"completions"
|
| 75 |
+
]
|
| 76 |
+
},
|
| 77 |
+
"description": "Sends a non-streaming request to the chat endpoint. It will return a complete JSON response. Providing a `user_id` allows the bot to access personalized information."
|
| 78 |
+
},
|
| 79 |
+
"response": []
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"name": "/chat-bot (Legacy Endpoint)",
|
| 83 |
+
"request": {
|
| 84 |
+
"method": "POST",
|
| 85 |
+
"header": [
|
| 86 |
+
{
|
| 87 |
+
"key": "Content-Type",
|
| 88 |
+
"value": "application/json"
|
| 89 |
+
}
|
| 90 |
+
],
|
| 91 |
+
"body": {
|
| 92 |
+
"mode": "raw",
|
| 93 |
+
"raw": "{\n \"query\": \"What is a root canal?\",\n \"session_id\": \"your-session-id-here\",\n \"user_id\": \"some-user-123\"\n}",
|
| 94 |
+
"options": {
|
| 95 |
+
"raw": {
|
| 96 |
+
"language": "json"
|
| 97 |
+
}
|
| 98 |
+
}
|
| 99 |
+
},
|
| 100 |
+
"url": {
|
| 101 |
+
"raw": "{{baseUrl}}/chat-bot",
|
| 102 |
+
"host": [
|
| 103 |
+
"{{baseUrl}}"
|
| 104 |
+
],
|
| 105 |
+
"path": [
|
| 106 |
+
"chat-bot"
|
| 107 |
+
]
|
| 108 |
+
},
|
| 109 |
+
"description": "The original, non-OpenAI-compatible chat endpoint. It takes a query, session_id, and optional user_id."
|
| 110 |
+
},
|
| 111 |
+
"response": []
|
| 112 |
+
}
|
| 113 |
+
]
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"name": "Session Management",
|
| 117 |
+
"description": "Endpoints for managing user sessions.",
|
| 118 |
+
"item": [
|
| 119 |
+
{
|
| 120 |
+
"name": "/create-session",
|
| 121 |
+
"request": {
|
| 122 |
+
"method": "POST",
|
| 123 |
+
"header": [],
|
| 124 |
+
"url": {
|
| 125 |
+
"raw": "{{baseUrl}}/create-session",
|
| 126 |
+
"host": [
|
| 127 |
+
"{{baseUrl}}"
|
| 128 |
+
],
|
| 129 |
+
"path": [
|
| 130 |
+
"create-session"
|
| 131 |
+
]
|
| 132 |
+
},
|
| 133 |
+
"description": "Creates a new, unique session ID required for tracking conversation history. Call this endpoint to start a new conversation."
|
| 134 |
+
},
|
| 135 |
+
"response": []
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"name": "/clear-history",
|
| 139 |
+
"request": {
|
| 140 |
+
"method": "POST",
|
| 141 |
+
"header": [
|
| 142 |
+
{
|
| 143 |
+
"key": "Content-Type",
|
| 144 |
+
"value": "application/json"
|
| 145 |
+
}
|
| 146 |
+
],
|
| 147 |
+
"body": {
|
| 148 |
+
"mode": "raw",
|
| 149 |
+
"raw": "{\n \"session_id\": \"your-session-id-here\"\n}",
|
| 150 |
+
"options": {
|
| 151 |
+
"raw": {
|
| 152 |
+
"language": "json"
|
| 153 |
+
}
|
| 154 |
+
}
|
| 155 |
+
},
|
| 156 |
+
"url": {
|
| 157 |
+
"raw": "{{baseUrl}}/clear-history",
|
| 158 |
+
"host": [
|
| 159 |
+
"{{baseUrl}}"
|
| 160 |
+
],
|
| 161 |
+
"path": [
|
| 162 |
+
"clear-history"
|
| 163 |
+
]
|
| 164 |
+
},
|
| 165 |
+
"description": "Clears the chat history associated with a specific session ID from the server."
|
| 166 |
+
},
|
| 167 |
+
"response": []
|
| 168 |
+
}
|
| 169 |
+
]
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"name": "Admin Endpoints",
|
| 173 |
+
"description": "Administrative endpoints for managing the RAG system and monitoring the application. These endpoints require Basic Authentication.",
|
| 174 |
+
"item": [
|
| 175 |
+
{
|
| 176 |
+
"name": "/admin/rebuild_faiss_index",
|
| 177 |
+
"request": {
|
| 178 |
+
"method": "POST",
|
| 179 |
+
"header": [],
|
| 180 |
+
"url": {
|
| 181 |
+
"raw": "{{baseUrl}}/admin/rebuild_faiss_index",
|
| 182 |
+
"host": [
|
| 183 |
+
"{{baseUrl}}"
|
| 184 |
+
],
|
| 185 |
+
"path": [
|
| 186 |
+
"admin",
|
| 187 |
+
"rebuild_faiss_index"
|
| 188 |
+
]
|
| 189 |
+
},
|
| 190 |
+
"description": "Triggers a full rebuild of the FAISS vector index from the source documents. This can be a long-running process."
|
| 191 |
+
},
|
| 192 |
+
"response": []
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"name": "/admin/faiss_rag_status",
|
| 196 |
+
"request": {
|
| 197 |
+
"method": "GET",
|
| 198 |
+
"header": [],
|
| 199 |
+
"url": {
|
| 200 |
+
"raw": "{{baseUrl}}/admin/faiss_rag_status",
|
| 201 |
+
"host": [
|
| 202 |
+
"{{baseUrl}}"
|
| 203 |
+
],
|
| 204 |
+
"path": [
|
| 205 |
+
"admin",
|
| 206 |
+
"faiss_rag_status"
|
| 207 |
+
]
|
| 208 |
+
},
|
| 209 |
+
"description": "Retrieves the current status of the FAISS RAG (Retrieval-Augmented Generation) system, including index information and processed files."
|
| 210 |
+
},
|
| 211 |
+
"response": []
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"name": "/db/status",
|
| 215 |
+
"request": {
|
| 216 |
+
"method": "GET",
|
| 217 |
+
"header": [],
|
| 218 |
+
"url": {
|
| 219 |
+
"raw": "{{baseUrl}}/db/status",
|
| 220 |
+
"host": [
|
| 221 |
+
"{{baseUrl}}"
|
| 222 |
+
],
|
| 223 |
+
"path": [
|
| 224 |
+
"db",
|
| 225 |
+
"status"
|
| 226 |
+
]
|
| 227 |
+
},
|
| 228 |
+
"description": "Checks the status of the personal data CSV file monitor."
|
| 229 |
+
},
|
| 230 |
+
"response": []
|
| 231 |
+
},
|
| 232 |
+
{
|
| 233 |
+
"name": "/admin/download_qa_database",
|
| 234 |
+
"request": {
|
| 235 |
+
"method": "GET",
|
| 236 |
+
"header": [],
|
| 237 |
+
"url": {
|
| 238 |
+
"raw": "{{baseUrl}}/admin/download_qa_database",
|
| 239 |
+
"host": [
|
| 240 |
+
"{{baseUrl}}"
|
| 241 |
+
],
|
| 242 |
+
"path": [
|
| 243 |
+
"admin",
|
| 244 |
+
"download_qa_database"
|
| 245 |
+
]
|
| 246 |
+
},
|
| 247 |
+
"description": "Downloads all the loaded question-answer datasets (General, Personal, Greetings) as a single Excel file."
|
| 248 |
+
},
|
| 249 |
+
"response": []
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"name": "/report",
|
| 253 |
+
"request": {
|
| 254 |
+
"method": "GET",
|
| 255 |
+
"header": [],
|
| 256 |
+
"url": {
|
| 257 |
+
"raw": "{{baseUrl}}/report",
|
| 258 |
+
"host": [
|
| 259 |
+
"{{baseUrl}}"
|
| 260 |
+
],
|
| 261 |
+
"path": [
|
| 262 |
+
"report"
|
| 263 |
+
]
|
| 264 |
+
},
|
| 265 |
+
"description": "Downloads the complete chat history log as a CSV file."
|
| 266 |
+
},
|
| 267 |
+
"response": []
|
| 268 |
+
}
|
| 269 |
+
],
|
| 270 |
+
"auth": {
|
| 271 |
+
"type": "basic",
|
| 272 |
+
"basic": [
|
| 273 |
+
{
|
| 274 |
+
"key": "password",
|
| 275 |
+
"value": "admin",
|
| 276 |
+
"type": "string"
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"key": "username",
|
| 280 |
+
"value": "admin",
|
| 281 |
+
"type": "string"
|
| 282 |
+
}
|
| 283 |
+
]
|
| 284 |
+
}
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"name": "General",
|
| 288 |
+
"description": "General application endpoints.",
|
| 289 |
+
"item": [
|
| 290 |
+
{
|
| 291 |
+
"name": "/v1/models",
|
| 292 |
+
"request": {
|
| 293 |
+
"method": "GET",
|
| 294 |
+
"header": [],
|
| 295 |
+
"url": {
|
| 296 |
+
"raw": "{{baseUrl}}/v1/models",
|
| 297 |
+
"host": [
|
| 298 |
+
"{{baseUrl}}"
|
| 299 |
+
],
|
| 300 |
+
"path": [
|
| 301 |
+
"v1",
|
| 302 |
+
"models"
|
| 303 |
+
]
|
| 304 |
+
},
|
| 305 |
+
"description": "An OpenAI-compatible endpoint that lists the available models. In this application, it returns the custom bot model name."
|
| 306 |
+
},
|
| 307 |
+
"response": []
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"name": "/version",
|
| 311 |
+
"request": {
|
| 312 |
+
"method": "GET",
|
| 313 |
+
"header": [],
|
| 314 |
+
"url": {
|
| 315 |
+
"raw": "{{baseUrl}}/version",
|
| 316 |
+
"host": [
|
| 317 |
+
"{{baseUrl}}"
|
| 318 |
+
],
|
| 319 |
+
"path": [
|
| 320 |
+
"version"
|
| 321 |
+
]
|
| 322 |
+
},
|
| 323 |
+
"description": "Returns the current version of the application."
|
| 324 |
+
},
|
| 325 |
+
"response": []
|
| 326 |
+
}
|
| 327 |
+
]
|
| 328 |
+
}
|
| 329 |
+
],
|
| 330 |
+
"auth": {
|
| 331 |
+
"type": "bearer",
|
| 332 |
+
"bearer": [
|
| 333 |
+
{
|
| 334 |
+
"key": "token",
|
| 335 |
+
"value": "YGHDADUASDASDijuh7uyhj",
|
| 336 |
+
"type": "string"
|
| 337 |
+
}
|
| 338 |
+
]
|
| 339 |
+
},
|
| 340 |
+
"variable": [
|
| 341 |
+
{
|
| 342 |
+
"key": "baseUrl",
|
| 343 |
+
"value": "http://localhost:7860",
|
| 344 |
+
"type": "string",
|
| 345 |
+
"description": "The base URL of the running Flask application."
|
| 346 |
+
}
|
| 347 |
+
]
|
| 348 |
+
}
|
requirements.txt
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Flask==3.0.3
|
| 2 |
+
Flask_Cors==5.0.0
|
| 3 |
+
numpy
|
| 4 |
+
pandas==2.2.3
|
| 5 |
+
#rapidfuzz==3.10.1
|
| 6 |
+
Requests==2.32.3
|
| 7 |
+
#scikit_learn==1.4.1.post1
|
| 8 |
+
#scikit_learn==1.5.2
|
| 9 |
+
psycopg2-binary==2.9.10
|
| 10 |
+
python-dotenv==1.0.1
|
| 11 |
+
apscheduler==3.11.0
|
| 12 |
+
redis==3.5.3
|
| 13 |
+
faiss-cpu==1.10.0
|
| 14 |
+
groq==0.15.0
|
| 15 |
+
llama_index==0.12.13
|
| 16 |
+
llama_index.llms.groq==0.3.1
|
| 17 |
+
#langchain_groq==0.2.4
|
| 18 |
+
#langchain_core==0.3.39
|
| 19 |
+
sentence_transformers==3.4.0
|
| 20 |
+
gunicorn
|
| 21 |
+
llama-index-embeddings-huggingface==0.5.4
|
| 22 |
+
onnxruntime==1.22.0
|
| 23 |
+
langchain-groq==0.3.2
|
| 24 |
+
python-docx==1.1.2
|
| 25 |
+
langchain_community==0.3.23
|
| 26 |
+
requests==2.32.3
|
| 27 |
+
gdown==5.2.0
|
| 28 |
+
pymupdf==1.25.5
|
| 29 |
+
openpyxl==3.1.5
|
| 30 |
+
# must install https://aka.ms/vs/17/release/vc_redist.x64.exe
|
system_prompts.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
This module centralizes all system prompts for the specialized medical chatbot application.
|
| 4 |
+
This allows for easy management and updating of prompts without altering core logic.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
# --- RAG System Prompt for Bibliography-Based Answers ---
|
| 8 |
+
# This prompt instructs the LLM to answer based *only* on the context provided
|
| 9 |
+
# by the RAG system from scientific documents (PDFs, etc.).
|
| 10 |
+
# Placeholders {context} and {question} will be filled by the LangChain pipeline.
|
| 11 |
+
RAG_SYSTEM_PROMPT = """You are a specialized medical assistant AI. Your role is to provide accurate, evidence-based information on a specific medical topic.
|
| 12 |
+
|
| 13 |
+
**Your Task:**
|
| 14 |
+
Your primary task is to answer the user's question accurately and concisely, based *exclusively* on the "Provided Document Excerpts" below. These excerpts are from vetted scientific and medical publications.
|
| 15 |
+
|
| 16 |
+
**Provided Document Excerpts:**
|
| 17 |
+
{context}
|
| 18 |
+
|
| 19 |
+
**User Question:**
|
| 20 |
+
{question}
|
| 21 |
+
|
| 22 |
+
---
|
| 23 |
+
**Core Instructions:**
|
| 24 |
+
1. **Language:** Your default language is **Spanish**. But follow the language of user. If they ask question in Spanish, reply in Spanish. If they ask in English, reply in English, even if the context is Spanish.
|
| 25 |
+
2. **Strictly Adhere to Context:** Your answer **must** be derived solely from the "Provided Document Excerpts." Do not use any external knowledge or make assumptions beyond what is presented in the text.
|
| 26 |
+
3. **Professional Tone:** Maintain a clinical, objective, and professional tone suitable for a medical context.
|
| 27 |
+
4. **Do Not Speculate:** If the provided excerpts do not contain the information needed to answer the question, you must not invent an answer.
|
| 28 |
+
5. **Handling Unanswerable Questions:** If you cannot answer the question based on the provided excerpts, respond with: "The provided bibliography does not contain specific information on this topic." Do not attempt to guide the user elsewhere or apologize.
|
| 29 |
+
6. **No Self-Reference:** Do not mention that you are an AI, that you are "looking at documents," or refer to the "provided excerpts" in your final answer. Simply present the information as requested.
|
| 30 |
+
|
| 31 |
+
**Answer Format:**
|
| 32 |
+
Provide a direct answer to the user's question based on the information available.
|
| 33 |
+
|
| 34 |
+
**Answer:**"""
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# --- Fallback System Prompt for General/Triage Purposes ---
|
| 38 |
+
FALLBACK_SYSTEM_PROMPT = """You are a specialized medical assistant AI. Your one and only role is to answer questions strictly related to medicine and healthcare.
|
| 39 |
+
|
| 40 |
+
**Core Instructions:**
|
| 41 |
+
1. **Medical Focus Only:** You MUST NOT engage in any general conversation, small talk, or answer questions outside the scope of medicine or healthcare.
|
| 42 |
+
2. **Handle Out-of-Scope Questions:** If the user's question is unrelated to medicine, you must respond with the following exact phrase: "I am a medical assistant AI and my capabilities are limited to medical topics. Do you have a question about health?"
|
| 43 |
+
3. **Stateful Conversation:** Pay attention to the `Prior Conversation History` to understand the context of the user's medical inquiries.
|
| 44 |
+
4. **Professional Tone:** Always be polite, helpful, and professional.
|
| 45 |
+
5. **Do Not Make Up Clinical Advice:** Do not provide medical diagnoses or treatment plans. You can provide general information but should always recommend consulting a professional for personal health concerns.
|
| 46 |
+
|
| 47 |
+
**Response Guidance:**
|
| 48 |
+
- Review the `Prior Conversation History` to understand the context.
|
| 49 |
+
- Formulate a helpful, professional answer to the `Current User Query` if it is about medicine or healthcare.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
# REVISED: New prompt to format answers based on any structured data from CSV/XLSX files.
|
| 53 |
+
QA_FORMATTER_PROMPT = """You are a helpful assistant. You will be given a user's question and a set of data points corresponding to a single database entry that is highly relevant to the question.
|
| 54 |
+
Your task is to synthesize a natural, conversational answer to the user's question based *only* on the provided data.
|
| 55 |
+
|
| 56 |
+
- Formulate a coherent response by integrating the information from the provided data fields.
|
| 57 |
+
- Do not just list the data. Create a proper sentence or paragraph.
|
| 58 |
+
- If the data contains a source, citation, or link (e.g., 'Fuente', 'Source', 'Link SACMED'), cite it at the end of your answer.
|
| 59 |
+
|
| 60 |
+
**Provided Data:**
|
| 61 |
+
{context}
|
| 62 |
+
|
| 63 |
+
**User Question:**
|
| 64 |
+
{question}
|
| 65 |
+
|
| 66 |
+
**Answer:**"""
|