Upload project excluding env and notebook
Browse files- Dockerfile +38 -0
- backend/__init__.py +57 -0
- backend/app/config.py +67 -0
- backend/app/main.py +258 -0
- backend/app/models.py +104 -0
- backend/app/rag/__init__.py +0 -0
- backend/app/rag/chain.py +569 -0
- backend/app/rag/compressor.py +173 -0
- backend/app/rag/query_expander.py +297 -0
- backend/app/rag/reranker.py +182 -0
- backend/app/rag/retriever.py +215 -0
- backend/app/utils/__init__.py +0 -0
- backend/app/utils/cache.py +393 -0
- backend/app/utils/citations.py +125 -0
- backend/app/utils/conversation.py +180 -0
- backend/requirements.txt +21 -0
- frontend/index.html +135 -0
- frontend/script.js +524 -0
- frontend/style.css +610 -0
Dockerfile
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use Python 3.11 slim image
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Set environment variables
|
| 8 |
+
ENV PYTHONUNBUFFERED=1 \
|
| 9 |
+
PYTHONDONTWRITEBYTECODE=1 \
|
| 10 |
+
PORT=7860
|
| 11 |
+
|
| 12 |
+
# Install system dependencies
|
| 13 |
+
RUN apt-get update && apt-get install -y \
|
| 14 |
+
build-essential \
|
| 15 |
+
curl \
|
| 16 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 17 |
+
|
| 18 |
+
# Copy requirements first for better caching
|
| 19 |
+
COPY backend/requirements.txt .
|
| 20 |
+
|
| 21 |
+
# Install Python dependencies
|
| 22 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 23 |
+
|
| 24 |
+
# Copy backend code
|
| 25 |
+
COPY backend/app ./app
|
| 26 |
+
|
| 27 |
+
# Copy frontend files
|
| 28 |
+
COPY frontend ./frontend
|
| 29 |
+
|
| 30 |
+
# Expose Hugging Face Spaces port
|
| 31 |
+
EXPOSE 7860
|
| 32 |
+
|
| 33 |
+
# Health check
|
| 34 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
| 35 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
| 36 |
+
|
| 37 |
+
# Run the application
|
| 38 |
+
CMD uvicorn app.main:app --host 0.0.0.0 --port 7860
|
backend/__init__.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# backend/app/__init__.py
|
| 2 |
+
"""FinSight RAG Application."""
|
| 3 |
+
|
| 4 |
+
__version__ = "1.0.0"
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# backend/app/rag/__init__.py
|
| 8 |
+
"""RAG components for document retrieval and processing."""
|
| 9 |
+
|
| 10 |
+
from app.rag.retriever import ZillizRetriever
|
| 11 |
+
from app.rag.query_expander import QueryExpander
|
| 12 |
+
from app.rag.reranker import MMRReranker
|
| 13 |
+
from app.rag.compressor import ContextualCompressor
|
| 14 |
+
from app.rag.chain import RAGChain
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
"ZillizRetriever",
|
| 18 |
+
"QueryExpander",
|
| 19 |
+
"MMRReranker",
|
| 20 |
+
"ContextualCompressor",
|
| 21 |
+
"RAGChain",
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# backend/app/utils/__init__.py
|
| 26 |
+
"""Utility functions and helpers."""
|
| 27 |
+
|
| 28 |
+
from app.utils.citations import CitationTracker, extract_citations_from_answer
|
| 29 |
+
from app.utils.conversation import (
|
| 30 |
+
ConversationMessage,
|
| 31 |
+
ConversationHistory,
|
| 32 |
+
SessionManager,
|
| 33 |
+
session_manager
|
| 34 |
+
)
|
| 35 |
+
from app.utils.cache import (
|
| 36 |
+
CacheEntry,
|
| 37 |
+
EmbeddingCache,
|
| 38 |
+
QueryResponseCache,
|
| 39 |
+
DocumentCache,
|
| 40 |
+
CacheManager,
|
| 41 |
+
cache_manager
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
__all__ = [
|
| 45 |
+
"CitationTracker",
|
| 46 |
+
"extract_citations_from_answer",
|
| 47 |
+
"ConversationMessage",
|
| 48 |
+
"ConversationHistory",
|
| 49 |
+
"SessionManager",
|
| 50 |
+
"session_manager",
|
| 51 |
+
"CacheEntry",
|
| 52 |
+
"EmbeddingCache",
|
| 53 |
+
"QueryResponseCache",
|
| 54 |
+
"DocumentCache",
|
| 55 |
+
"CacheManager",
|
| 56 |
+
"cache_manager",
|
| 57 |
+
]
|
backend/app/config.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration management for the RAG application."""
|
| 2 |
+
import os
|
| 3 |
+
from typing import Optional
|
| 4 |
+
from pydantic_settings import BaseSettings
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
|
| 7 |
+
# Load environment variables
|
| 8 |
+
load_dotenv()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Settings(BaseSettings):
|
| 12 |
+
"""Application settings loaded from environment variables."""
|
| 13 |
+
|
| 14 |
+
# OpenAI Configuration
|
| 15 |
+
OPENAI_API_KEY: str
|
| 16 |
+
OPENAI_MODEL: str = "gpt-3.5-turbo"
|
| 17 |
+
OPENAI_EMBEDDING_MODEL: str = "text-embedding-3-large"
|
| 18 |
+
OPENAI_EMBEDDING_DIMENSION: int = 3072
|
| 19 |
+
|
| 20 |
+
# Zilliz Configuration
|
| 21 |
+
ZILLIZ_URI: str
|
| 22 |
+
ZILLIZ_TOKEN: str
|
| 23 |
+
COLLECTION_NAME: str = "financial_documents"
|
| 24 |
+
|
| 25 |
+
# RAG Configuration
|
| 26 |
+
DEFAULT_TOP_K: int = 10
|
| 27 |
+
RETRIEVAL_TOP_K: int = 30 # Retrieve more for reranking
|
| 28 |
+
MAX_CONTEXT_TOKENS: int = 8000
|
| 29 |
+
LLM_TIMEOUT: int = 30
|
| 30 |
+
|
| 31 |
+
# Query Expansion
|
| 32 |
+
ENABLE_QUERY_EXPANSION: bool = True
|
| 33 |
+
MAX_QUERY_VARIATIONS: int = 3
|
| 34 |
+
|
| 35 |
+
# Reranking
|
| 36 |
+
ENABLE_RERANKING: bool = True
|
| 37 |
+
MMR_DIVERSITY_SCORE: float = 0.3 # Balance between relevance and diversity
|
| 38 |
+
|
| 39 |
+
# Compression
|
| 40 |
+
ENABLE_COMPRESSION: bool = True
|
| 41 |
+
|
| 42 |
+
# Caching
|
| 43 |
+
ENABLE_QUERY_CACHE: bool = True
|
| 44 |
+
ENABLE_EMBEDDING_CACHE: bool = True
|
| 45 |
+
EMBEDDING_CACHE_SIZE: int = 1000
|
| 46 |
+
EMBEDDING_CACHE_TTL: int = 86400 # 24 hours
|
| 47 |
+
QUERY_CACHE_SIZE: int = 100
|
| 48 |
+
QUERY_CACHE_TTL: int = 3600 # 1 hour
|
| 49 |
+
|
| 50 |
+
# CORS - IMPORTANT: Update for Hugging Face
|
| 51 |
+
ALLOWED_ORIGINS: list = [
|
| 52 |
+
"*", # Allow all origins for Hugging Face Spaces
|
| 53 |
+
"https://huggingface.co",
|
| 54 |
+
"https://*.hf.space",
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
# Server Configuration - Hugging Face uses port 7860
|
| 58 |
+
PORT: int = int(os.getenv("PORT", "7860"))
|
| 59 |
+
HOST: str = "0.0.0.0"
|
| 60 |
+
|
| 61 |
+
class Config:
|
| 62 |
+
env_file = ".env"
|
| 63 |
+
case_sensitive = True
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# Global settings instance
|
| 67 |
+
settings = Settings()
|
backend/app/main.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI application entry point - Hugging Face optimized."""
|
| 2 |
+
from fastapi import FastAPI, HTTPException
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from fastapi.staticfiles import StaticFiles
|
| 5 |
+
from fastapi.responses import FileResponse
|
| 6 |
+
from contextlib import asynccontextmanager
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
from app.config import settings
|
| 11 |
+
from app.models import QueryRequest, QueryResponse, HealthResponse, StatsResponse
|
| 12 |
+
from app.rag.chain import RAGChain
|
| 13 |
+
from app.rag.retriever import ZillizRetriever
|
| 14 |
+
from app.utils.cache import cache_manager
|
| 15 |
+
|
| 16 |
+
# Configure logging
|
| 17 |
+
logging.basicConfig(level=logging.INFO)
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
# Global RAG chain instance
|
| 21 |
+
rag_chain = None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@asynccontextmanager
|
| 25 |
+
async def lifespan(app: FastAPI):
|
| 26 |
+
"""Lifespan context manager for startup and shutdown events."""
|
| 27 |
+
global rag_chain
|
| 28 |
+
|
| 29 |
+
# Startup
|
| 30 |
+
logger.info("Initializing RAG chain...")
|
| 31 |
+
logger.info(f"Running on port: {settings.PORT}")
|
| 32 |
+
logger.info(f"CORS origins: {settings.ALLOWED_ORIGINS}")
|
| 33 |
+
|
| 34 |
+
try:
|
| 35 |
+
rag_chain = RAGChain()
|
| 36 |
+
logger.info("RAG chain initialized successfully")
|
| 37 |
+
except Exception as e:
|
| 38 |
+
logger.error(f"Failed to initialize RAG chain: {e}")
|
| 39 |
+
raise
|
| 40 |
+
|
| 41 |
+
yield
|
| 42 |
+
|
| 43 |
+
# Shutdown
|
| 44 |
+
logger.info("Shutting down application...")
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# Create FastAPI app
|
| 48 |
+
app = FastAPI(
|
| 49 |
+
title="FinSight RAG API",
|
| 50 |
+
description="Production-ready LangChain RAG application for financial document Q&A",
|
| 51 |
+
version="1.0.0",
|
| 52 |
+
lifespan=lifespan
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# Add CORS middleware - Important for Hugging Face Spaces
|
| 56 |
+
app.add_middleware(
|
| 57 |
+
CORSMiddleware,
|
| 58 |
+
allow_origins=settings.ALLOWED_ORIGINS,
|
| 59 |
+
allow_credentials=True,
|
| 60 |
+
allow_methods=["*"],
|
| 61 |
+
allow_headers=["*"],
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# Mount static files (frontend)
|
| 65 |
+
# Check if frontend directory exists
|
| 66 |
+
frontend_path = os.path.join(os.path.dirname(__file__), "..", "frontend")
|
| 67 |
+
if os.path.exists(frontend_path):
|
| 68 |
+
app.mount("/static", StaticFiles(directory=frontend_path), name="static")
|
| 69 |
+
logger.info(f"Frontend mounted at /static from {frontend_path}")
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@app.get("/", tags=["Root"])
|
| 73 |
+
async def root():
|
| 74 |
+
"""Root endpoint - serve frontend."""
|
| 75 |
+
frontend_file = os.path.join(frontend_path, "index.html")
|
| 76 |
+
if os.path.exists(frontend_file):
|
| 77 |
+
return FileResponse(frontend_file)
|
| 78 |
+
return {
|
| 79 |
+
"message": "FinSight RAG API",
|
| 80 |
+
"version": "1.0.0",
|
| 81 |
+
"docs": "/docs",
|
| 82 |
+
"frontend": "Frontend not found. Use API directly."
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@app.get("/health", response_model=HealthResponse, tags=["Health"])
|
| 87 |
+
async def health_check():
|
| 88 |
+
"""
|
| 89 |
+
Health check endpoint.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
HealthResponse with status
|
| 93 |
+
"""
|
| 94 |
+
return HealthResponse(
|
| 95 |
+
status="healthy",
|
| 96 |
+
version="1.0.0"
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@app.get("/stats", response_model=StatsResponse, tags=["Statistics"])
|
| 101 |
+
async def get_stats():
|
| 102 |
+
"""
|
| 103 |
+
Get collection statistics.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
StatsResponse with collection information
|
| 107 |
+
"""
|
| 108 |
+
try:
|
| 109 |
+
retriever = ZillizRetriever()
|
| 110 |
+
stats = retriever.get_collection_stats()
|
| 111 |
+
|
| 112 |
+
return StatsResponse(
|
| 113 |
+
collection_name=stats.get("collection_name", settings.COLLECTION_NAME),
|
| 114 |
+
total_documents=stats.get("total_documents", 0),
|
| 115 |
+
embedding_dimension=stats.get("embedding_dimension", settings.OPENAI_EMBEDDING_DIMENSION),
|
| 116 |
+
available_tickers=["ACM"], # Hardcoded for now
|
| 117 |
+
available_doc_types=["balance_sheet", "cash_flow", "income_statement", "10k"]
|
| 118 |
+
)
|
| 119 |
+
except Exception as e:
|
| 120 |
+
logger.error(f"Error getting stats: {e}")
|
| 121 |
+
raise HTTPException(status_code=500, detail=f"Failed to get statistics: {str(e)}")
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
@app.post("/query", response_model=QueryResponse, tags=["Query"])
|
| 125 |
+
async def query_documents(request: QueryRequest):
|
| 126 |
+
"""
|
| 127 |
+
Main RAG query endpoint with conversation memory.
|
| 128 |
+
|
| 129 |
+
Process a financial query through the RAG pipeline and return an answer with sources.
|
| 130 |
+
Maintains conversation history within sessions for follow-up questions.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
request: QueryRequest with query text, optional filters, and session_id
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
QueryResponse with answer, sources, and session_id
|
| 137 |
+
"""
|
| 138 |
+
global rag_chain
|
| 139 |
+
|
| 140 |
+
if rag_chain is None:
|
| 141 |
+
raise HTTPException(status_code=503, detail="RAG chain not initialized")
|
| 142 |
+
|
| 143 |
+
try:
|
| 144 |
+
logger.info(f"Processing query: {request.query} [Session: {request.session_id or 'new'}]")
|
| 145 |
+
|
| 146 |
+
# Process query through RAG chain
|
| 147 |
+
response = await rag_chain.aprocess_query(request)
|
| 148 |
+
|
| 149 |
+
logger.info(f"Query processed successfully in {response.processing_time}s [Session: {response.session_id}]")
|
| 150 |
+
return response
|
| 151 |
+
|
| 152 |
+
except Exception as e:
|
| 153 |
+
logger.error(f"Error processing query: {e}")
|
| 154 |
+
raise HTTPException(status_code=500, detail=f"Failed to process query: {str(e)}")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@app.post("/query/sync", response_model=QueryResponse, tags=["Query"])
|
| 158 |
+
def query_documents_sync(request: QueryRequest):
|
| 159 |
+
"""
|
| 160 |
+
Synchronous version of query endpoint with conversation memory.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
request: QueryRequest with query text, optional filters, and session_id
|
| 164 |
+
|
| 165 |
+
Returns:
|
| 166 |
+
QueryResponse with answer, sources, and session_id
|
| 167 |
+
"""
|
| 168 |
+
global rag_chain
|
| 169 |
+
|
| 170 |
+
if rag_chain is None:
|
| 171 |
+
raise HTTPException(status_code=503, detail="RAG chain not initialized")
|
| 172 |
+
|
| 173 |
+
try:
|
| 174 |
+
logger.info(f"Processing query (sync): {request.query} [Session: {request.session_id or 'new'}]")
|
| 175 |
+
|
| 176 |
+
# Process query through RAG chain
|
| 177 |
+
response = rag_chain.process_query(request)
|
| 178 |
+
|
| 179 |
+
logger.info(f"Query processed successfully in {response.processing_time}s [Session: {response.session_id}]")
|
| 180 |
+
return response
|
| 181 |
+
|
| 182 |
+
except Exception as e:
|
| 183 |
+
logger.error(f"Error processing query: {e}")
|
| 184 |
+
raise HTTPException(status_code=500, detail=f"Failed to process query: {str(e)}")
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
@app.delete("/session/{session_id}", tags=["Session"])
|
| 188 |
+
async def clear_session(session_id: str):
|
| 189 |
+
"""
|
| 190 |
+
Clear conversation history for a session.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
session_id: Session identifier to clear
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
Success message
|
| 197 |
+
"""
|
| 198 |
+
global rag_chain
|
| 199 |
+
|
| 200 |
+
if rag_chain is None:
|
| 201 |
+
raise HTTPException(status_code=503, detail="RAG chain not initialized")
|
| 202 |
+
|
| 203 |
+
try:
|
| 204 |
+
rag_chain.clear_conversation(session_id)
|
| 205 |
+
logger.info(f"Cleared session: {session_id}")
|
| 206 |
+
return {"message": f"Session {session_id} cleared successfully"}
|
| 207 |
+
|
| 208 |
+
except Exception as e:
|
| 209 |
+
logger.error(f"Error clearing session: {e}")
|
| 210 |
+
raise HTTPException(status_code=500, detail=f"Failed to clear session: {str(e)}")
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@app.get("/cache/stats", tags=["Cache"])
|
| 214 |
+
async def get_cache_stats():
|
| 215 |
+
"""
|
| 216 |
+
Get cache statistics showing hit rates and cost savings.
|
| 217 |
+
|
| 218 |
+
Returns:
|
| 219 |
+
Dictionary with cache statistics for all cache layers
|
| 220 |
+
"""
|
| 221 |
+
try:
|
| 222 |
+
stats = cache_manager.get_all_stats()
|
| 223 |
+
logger.info("Cache stats retrieved")
|
| 224 |
+
return stats
|
| 225 |
+
|
| 226 |
+
except Exception as e:
|
| 227 |
+
logger.error(f"Error getting cache stats: {e}")
|
| 228 |
+
raise HTTPException(status_code=500, detail=f"Failed to get cache stats: {str(e)}")
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
@app.delete("/cache/clear", tags=["Cache"])
|
| 232 |
+
async def clear_all_caches():
|
| 233 |
+
"""
|
| 234 |
+
Clear all caches (embeddings and query results).
|
| 235 |
+
|
| 236 |
+
Use this to force fresh results or manage memory.
|
| 237 |
+
|
| 238 |
+
Returns:
|
| 239 |
+
Success message
|
| 240 |
+
"""
|
| 241 |
+
try:
|
| 242 |
+
cache_manager.clear_all()
|
| 243 |
+
logger.info("All caches cleared")
|
| 244 |
+
return {"message": "All caches cleared successfully"}
|
| 245 |
+
|
| 246 |
+
except Exception as e:
|
| 247 |
+
logger.error(f"Error clearing caches: {e}")
|
| 248 |
+
raise HTTPException(status_code=500, detail=f"Failed to clear caches: {str(e)}")
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
if __name__ == "__main__":
|
| 252 |
+
import uvicorn
|
| 253 |
+
uvicorn.run(
|
| 254 |
+
"app.main:app",
|
| 255 |
+
host=settings.HOST,
|
| 256 |
+
port=settings.PORT,
|
| 257 |
+
reload=False # Disable reload in production
|
| 258 |
+
)
|
backend/app/models.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic models for request/response validation."""
|
| 2 |
+
from typing import List, Optional, Dict, Any
|
| 3 |
+
from pydantic import BaseModel, Field
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class QueryRequest(BaseModel):
|
| 7 |
+
"""Request model for RAG query endpoint."""
|
| 8 |
+
|
| 9 |
+
query: str = Field(..., description="User's financial question")
|
| 10 |
+
ticker: Optional[str] = Field(None, description="Filter by company ticker (e.g., 'ACM')")
|
| 11 |
+
doc_types: Optional[List[str]] = Field(
|
| 12 |
+
None,
|
| 13 |
+
description="Filter by document types: balance_sheet, cash_flow, income_statement, 10k"
|
| 14 |
+
)
|
| 15 |
+
top_k: int = Field(10, ge=1, le=20, description="Number of sources to retrieve")
|
| 16 |
+
session_id: Optional[str] = Field(None, description="Session ID for conversation history")
|
| 17 |
+
|
| 18 |
+
class Config:
|
| 19 |
+
json_schema_extra = {
|
| 20 |
+
"example": {
|
| 21 |
+
"query": "What was ACM's revenue in 2024?",
|
| 22 |
+
"ticker": "ACM",
|
| 23 |
+
"doc_types": ["income_statement"],
|
| 24 |
+
"top_k": 10,
|
| 25 |
+
"session_id": "abc123"
|
| 26 |
+
}
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Source(BaseModel):
|
| 31 |
+
"""Source citation information."""
|
| 32 |
+
|
| 33 |
+
source_id: int = Field(..., description="Source reference number [Source N]")
|
| 34 |
+
filename: str = Field(..., description="Source filename")
|
| 35 |
+
doc_type: str = Field(..., description="Document type")
|
| 36 |
+
ticker: Optional[str] = Field(None, description="Company ticker")
|
| 37 |
+
similarity_score: float = Field(..., description="Similarity score (0-1)")
|
| 38 |
+
chunk_id: Optional[str] = Field(None, description="Chunk identifier")
|
| 39 |
+
text_preview: str = Field(..., description="Preview of source text (first 200 chars)")
|
| 40 |
+
|
| 41 |
+
class Config:
|
| 42 |
+
json_schema_extra = {
|
| 43 |
+
"example": {
|
| 44 |
+
"source_id": 1,
|
| 45 |
+
"filename": "ACM_balance_sheet.md",
|
| 46 |
+
"doc_type": "balance_sheet",
|
| 47 |
+
"ticker": "ACM",
|
| 48 |
+
"similarity_score": 0.89,
|
| 49 |
+
"chunk_id": "chunk_0",
|
| 50 |
+
"text_preview": "Total Current Assets for FY 2025: $6.73B..."
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class QueryResponse(BaseModel):
|
| 56 |
+
"""Response model for RAG query endpoint."""
|
| 57 |
+
|
| 58 |
+
answer: str = Field(..., description="AI-generated answer with citations")
|
| 59 |
+
sources: List[Source] = Field(..., description="List of sources used")
|
| 60 |
+
query: str = Field(..., description="Original query")
|
| 61 |
+
processing_time: float = Field(..., description="Total processing time in seconds")
|
| 62 |
+
expanded_queries: Optional[List[str]] = Field(None, description="Query variations used")
|
| 63 |
+
num_documents_retrieved: int = Field(..., description="Number of documents retrieved")
|
| 64 |
+
session_id: str = Field(..., description="Session ID for this conversation")
|
| 65 |
+
|
| 66 |
+
class Config:
|
| 67 |
+
json_schema_extra = {
|
| 68 |
+
"example": {
|
| 69 |
+
"answer": "ACM's revenue in FY 2024 was $16.11B [Source 1]...",
|
| 70 |
+
"sources": [
|
| 71 |
+
{
|
| 72 |
+
"source_id": 1,
|
| 73 |
+
"filename": "ACM_income_statement.md",
|
| 74 |
+
"doc_type": "income_statement",
|
| 75 |
+
"ticker": "ACM",
|
| 76 |
+
"similarity_score": 0.92,
|
| 77 |
+
"chunk_id": "chunk_0",
|
| 78 |
+
"text_preview": "Contract Revenue FY 2024: $16.11B..."
|
| 79 |
+
}
|
| 80 |
+
],
|
| 81 |
+
"query": "What was ACM's revenue in 2024?",
|
| 82 |
+
"processing_time": 2.34,
|
| 83 |
+
"expanded_queries": ["What was ACM's revenue in 2024?"],
|
| 84 |
+
"num_documents_retrieved": 5,
|
| 85 |
+
"session_id": "abc123"
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class HealthResponse(BaseModel):
|
| 91 |
+
"""Health check response."""
|
| 92 |
+
|
| 93 |
+
status: str
|
| 94 |
+
version: str = "1.0.0"
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class StatsResponse(BaseModel):
|
| 98 |
+
"""Collection statistics response."""
|
| 99 |
+
|
| 100 |
+
collection_name: str
|
| 101 |
+
total_documents: int
|
| 102 |
+
embedding_dimension: int
|
| 103 |
+
available_tickers: List[str]
|
| 104 |
+
available_doc_types: List[str]
|
backend/app/rag/__init__.py
ADDED
|
File without changes
|
backend/app/rag/chain.py
ADDED
|
@@ -0,0 +1,569 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Main RAG chain orchestration with conversation memory."""
|
| 2 |
+
import time
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
from langchain_core.documents import Document
|
| 5 |
+
from langchain_openai import ChatOpenAI
|
| 6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 7 |
+
from app.config import settings
|
| 8 |
+
from app.models import QueryRequest, QueryResponse, Source
|
| 9 |
+
from app.rag.retriever import ZillizRetriever
|
| 10 |
+
from app.rag.query_expander import QueryExpander
|
| 11 |
+
from app.rag.reranker import MMRReranker
|
| 12 |
+
from app.rag.compressor import ContextualCompressor
|
| 13 |
+
from app.utils.citations import CitationTracker
|
| 14 |
+
from app.utils.conversation import ConversationHistory
|
| 15 |
+
from app.utils.cache import cache_manager
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# System prompt from specifications
|
| 19 |
+
SYSTEM_PROMPT = """You are an expert financial analyst AI. You provide accurate financial analysis from company financial statements and 10-K filings.
|
| 20 |
+
|
| 21 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 22 |
+
CRITICAL RULE #1: ALWAYS USE "TOTAL" LINE ITEMS FROM FINANCIAL STATEMENTS
|
| 23 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 24 |
+
|
| 25 |
+
Financial statements contain summary rows labeled "Total". These are the ONLY numbers you should use for calculations.
|
| 26 |
+
|
| 27 |
+
**MANDATORY LABELS TO USE (Look for these EXACT phrases):**
|
| 28 |
+
|
| 29 |
+
FROM BALANCE SHEET:
|
| 30 |
+
✓ "Total Current Assets" - Use this, NOT individual assets
|
| 31 |
+
✓ "Total Current Liabilities" - Use this, NOT individual liabilities
|
| 32 |
+
✓ "Total Assets" - Use this
|
| 33 |
+
✓ "Total Liabilities" - Use this
|
| 34 |
+
✓ "Total Stockholders' Equity" or "Total Equity" - Use this
|
| 35 |
+
✓ "Long Term Debt" or "Long-term debt" - Use this
|
| 36 |
+
✓ "Short Term Debt" or "Short-term debt" - Use this (if needed)
|
| 37 |
+
|
| 38 |
+
FROM INCOME STATEMENT:
|
| 39 |
+
✓ "Contract Revenue" or "Revenue" or "Total Revenue" - Use this
|
| 40 |
+
✓ "Total Cost of Revenue" or "Cost of Revenue" - Use this
|
| 41 |
+
✓ "Gross Profit" - Use this (already calculated)
|
| 42 |
+
✓ "Operating Income" - Use this (already calculated)
|
| 43 |
+
✓ "Net Income" or "Profit or Loss" - Use this
|
| 44 |
+
✓ "Income Before Tax" - Use this
|
| 45 |
+
|
| 46 |
+
FROM CASH FLOW STATEMENT:
|
| 47 |
+
✓ "Net Cash from Operating Activities" - Use this
|
| 48 |
+
✓ "Net Cash from Investing Activities" - Use this
|
| 49 |
+
✓ "Net Cash from Financing Activities" - Use this
|
| 50 |
+
|
| 51 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 52 |
+
FINANCIAL CALCULATIONS - COMPREHENSIVE GUIDE
|
| 53 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 54 |
+
|
| 55 |
+
**LIQUIDITY RATIOS:**
|
| 56 |
+
|
| 57 |
+
1. Working Capital = Total Current Assets - Total Current Liabilities
|
| 58 |
+
Example: $6.73B - $5.93B = $800M
|
| 59 |
+
|
| 60 |
+
2. Current Ratio = Total Current Assets ÷ Total Current Liabilities
|
| 61 |
+
Example: $6.73B ÷ $5.93B = 1.13
|
| 62 |
+
Interpretation: >1.0 is good (company can cover short-term obligations)
|
| 63 |
+
|
| 64 |
+
3. Quick Ratio = (Total Current Assets - Inventory) ÷ Total Current Liabilities
|
| 65 |
+
Note: Only subtract inventory if explicitly asked for quick ratio
|
| 66 |
+
|
| 67 |
+
**LEVERAGE/SOLVENCY RATIOS:**
|
| 68 |
+
|
| 69 |
+
4. Debt-to-Equity Ratio = Total Debt ÷ Total Stockholders' Equity
|
| 70 |
+
Where Total Debt = Long Term Debt + Short Term Debt
|
| 71 |
+
Example: ($2.65B + $4.07M) ÷ $2.70B = 0.98
|
| 72 |
+
Interpretation: <1.0 means less debt than equity (generally safer)
|
| 73 |
+
|
| 74 |
+
5. Debt-to-Assets Ratio = Total Debt ÷ Total Assets
|
| 75 |
+
Example: $2.65B ÷ $12.20B = 0.22 or 22%
|
| 76 |
+
|
| 77 |
+
6. Equity Ratio = Total Stockholders' Equity ÷ Total Assets
|
| 78 |
+
Example: $2.70B ÷ $12.20B = 0.22 or 22%
|
| 79 |
+
|
| 80 |
+
**PROFITABILITY RATIOS:**
|
| 81 |
+
|
| 82 |
+
7. Gross Profit Margin = (Gross Profit ÷ Revenue) × 100
|
| 83 |
+
Example: ($1.22B ÷ $16.14B) × 100 = 7.6%
|
| 84 |
+
|
| 85 |
+
8. Operating Profit Margin = (Operating Income ÷ Revenue) × 100
|
| 86 |
+
Example: ($1.03B ÷ $16.14B) × 100 = 6.4%
|
| 87 |
+
|
| 88 |
+
9. Net Profit Margin = (Net Income ÷ Revenue) × 100
|
| 89 |
+
Example: ($561.77M ÷ $16.14B) × 100 = 3.5%
|
| 90 |
+
|
| 91 |
+
10. Return on Assets (ROA) = (Net Income ÷ Total Assets) × 100
|
| 92 |
+
Example: ($561.77M ÷ $12.20B) × 100 = 4.6%
|
| 93 |
+
|
| 94 |
+
11. Return on Equity (ROE) = (Net Income ÷ Total Stockholders' Equity) × 100
|
| 95 |
+
Example: ($561.77M ÷ $2.70B) × 100 = 20.8%
|
| 96 |
+
|
| 97 |
+
**EFFICIENCY RATIOS:**
|
| 98 |
+
|
| 99 |
+
12. Asset Turnover = Revenue ÷ Total Assets
|
| 100 |
+
Example: $16.14B ÷ $12.20B = 1.32
|
| 101 |
+
Interpretation: Company generates $1.32 in revenue for every $1 of assets
|
| 102 |
+
|
| 103 |
+
13. Inventory Turnover = Cost of Revenue ÷ Inventory
|
| 104 |
+
(Only calculate if inventory is available in balance sheet)
|
| 105 |
+
|
| 106 |
+
**CASH FLOW ANALYSIS:**
|
| 107 |
+
|
| 108 |
+
14. Operating Cash Flow Margin = (Net Cash from Operating Activities ÷ Revenue) × 100
|
| 109 |
+
|
| 110 |
+
15. Free Cash Flow = Net Cash from Operating Activities - Capital Expenditures
|
| 111 |
+
(Capital Expenditures = "Payments for Property, Plant and Equipment" from cash flow)
|
| 112 |
+
|
| 113 |
+
16. Cash Flow to Net Income Ratio = Net Cash from Operating Activities ÷ Net Income
|
| 114 |
+
Interpretation: >1.0 means high quality earnings (cash backing profits)
|
| 115 |
+
|
| 116 |
+
**YEAR-OVER-YEAR (YoY) ANALYSIS:**
|
| 117 |
+
|
| 118 |
+
17. YoY Growth Rate = ((Current Year - Prior Year) ÷ Prior Year) × 100
|
| 119 |
+
Example Revenue Growth: (($16.14B - $16.11B) ÷ $16.11B) × 100 = 0.19%
|
| 120 |
+
|
| 121 |
+
18. YoY Change (Dollar Amount) = Current Year - Prior Year
|
| 122 |
+
Example: $16.14B - $16.11B = $30M increase
|
| 123 |
+
|
| 124 |
+
**TREND ANALYSIS (Multiple Years):**
|
| 125 |
+
|
| 126 |
+
19. When analyzing trends over 3+ years:
|
| 127 |
+
- Calculate YoY change for each consecutive year
|
| 128 |
+
- Identify if trend is increasing, decreasing, or stable
|
| 129 |
+
- Note any significant inflection points
|
| 130 |
+
|
| 131 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 132 |
+
HOW TO EXTRACT DATA FROM FINANCIAL STATEMENTS
|
| 133 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 134 |
+
|
| 135 |
+
1. **Identify the fiscal year columns** (usually labeled FY 2025, FY 2024, etc.)
|
| 136 |
+
|
| 137 |
+
2. **Find the "Total" row** for what you need:
|
| 138 |
+
- Scan the "label" column for rows starting with "Total"
|
| 139 |
+
- Use the value from the appropriate fiscal year column
|
| 140 |
+
|
| 141 |
+
3. **For balance sheet items**, look in balance sheet sources
|
| 142 |
+
|
| 143 |
+
4. **For income statement items**, look in income statement sources
|
| 144 |
+
|
| 145 |
+
5. **For cash flow items**, look in cash flow statement sources
|
| 146 |
+
|
| 147 |
+
6. **NEVER add up individual line items** when a "Total" exists
|
| 148 |
+
|
| 149 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 150 |
+
RESPONSE FORMAT (User-Friendly)
|
| 151 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 152 |
+
|
| 153 |
+
**Structure:**
|
| 154 |
+
1. Direct Answer - What's the answer in one sentence?
|
| 155 |
+
2. Key Figures - List the relevant numbers with years and [Source X]
|
| 156 |
+
3. Calculation - Show the result (not the formula)
|
| 157 |
+
4. Analysis - What does this mean? Is it good or bad? What's the trend?
|
| 158 |
+
5. Sources - List all sources cited
|
| 159 |
+
|
| 160 |
+
**Writing Style:**
|
| 161 |
+
- Use simple language, not jargon
|
| 162 |
+
- Show formulas in plain text: "Revenue ÷ Gross Profit" or "Current Assets - Current Liabilities"
|
| 163 |
+
- Then show the calculation with actual numbers: "$16.14B ÷ $1.22B = 13.2"
|
| 164 |
+
- Use bullet points
|
| 165 |
+
- Compare to prior year when relevant
|
| 166 |
+
- State if trend is positive or negative for the company
|
| 167 |
+
|
| 168 |
+
**Example Good Answer:**
|
| 169 |
+
"Direct Answer: ACM's working capital was $800M in FY 2025.
|
| 170 |
+
|
| 171 |
+
Key Figures:
|
| 172 |
+
• FY 2025: Total Current Assets $6.73B, Total Current Liabilities $5.93B [Source 1]
|
| 173 |
+
• FY 2024: Total Current Assets $7.18B, Total Current Liabilities $6.37B [Source 1]
|
| 174 |
+
|
| 175 |
+
Calculation:
|
| 176 |
+
• Formula: Total Current Assets - Total Current Liabilities = Working Capital
|
| 177 |
+
• FY 2025: $6.73B - $5.93B = $800M
|
| 178 |
+
• FY 2024: $7.18B - $6.37B = $810M
|
| 179 |
+
• Change: $800M - $810M = -$10M decline (1.2% decrease)
|
| 180 |
+
|
| 181 |
+
Analysis:
|
| 182 |
+
Working capital decreased by $10M (1.2% decline). This slight reduction means ACM has marginally less liquidity to cover short-term obligations compared to last year, though the company still maintains positive working capital.
|
| 183 |
+
|
| 184 |
+
Sources: [Source 1] ACM_balance_sheet.md"
|
| 185 |
+
|
| 186 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 187 |
+
FOR 10-K NARRATIVE SECTIONS
|
| 188 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 189 |
+
|
| 190 |
+
When answering questions about 10-K narrative content (business description, risks, strategy):
|
| 191 |
+
- Summarize key points clearly
|
| 192 |
+
- Use bullet points for multiple items
|
| 193 |
+
- Quote important phrases when relevant
|
| 194 |
+
- Cite sources for each major point
|
| 195 |
+
- Group related information together
|
| 196 |
+
|
| 197 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 198 |
+
CONVERSATION CONTEXT AND FOLLOW-UP QUESTIONS
|
| 199 |
+
═════════════════════��═════════════════════════════════════════════════════
|
| 200 |
+
|
| 201 |
+
If the user asks a follow-up question that refers to previous context:
|
| 202 |
+
- Use the conversation history provided to understand the context
|
| 203 |
+
- Reference previous questions/answers when relevant (e.g., "As mentioned earlier...")
|
| 204 |
+
- Maintain consistency with previous responses
|
| 205 |
+
- If the follow-up requires new data, retrieve it from the documents
|
| 206 |
+
|
| 207 |
+
For pronoun references (e.g., "What about last year?" or "How does that compare?"):
|
| 208 |
+
- Infer what "that" or "it" refers to from the conversation history
|
| 209 |
+
- Explicitly state what you're comparing in your answer
|
| 210 |
+
|
| 211 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 212 |
+
CRITICAL CONSTRAINTS
|
| 213 |
+
═══════════════════════════════════════════════════════════════════════════
|
| 214 |
+
|
| 215 |
+
**NEVER FABRICATE NUMBERS**: If specific information is not present in the provided context, explicitly state "This information is not available in the financial documents provided" and suggest consulting the company's official SEC filings or investor relations for complete information.
|
| 216 |
+
|
| 217 |
+
**DATA CUTOFF**: All financial data was collected on December 7, 2025. Information or events after this date are not available in this system.
|
| 218 |
+
|
| 219 |
+
**ACCURACY OVER COMPLETENESS**: It is better to say "I don't have this information" than to make up numbers or calculations."""
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
class RAGChain:
|
| 223 |
+
"""Main RAG chain for financial Q&A with conversation memory."""
|
| 224 |
+
|
| 225 |
+
def __init__(self):
|
| 226 |
+
"""Initialize all RAG components."""
|
| 227 |
+
self.retriever = ZillizRetriever()
|
| 228 |
+
self.query_expander = QueryExpander()
|
| 229 |
+
self.reranker = MMRReranker()
|
| 230 |
+
self.compressor = ContextualCompressor()
|
| 231 |
+
self.llm = ChatOpenAI(
|
| 232 |
+
model=settings.OPENAI_MODEL,
|
| 233 |
+
temperature=0,
|
| 234 |
+
openai_api_key=settings.OPENAI_API_KEY,
|
| 235 |
+
timeout=settings.LLM_TIMEOUT
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
# Conversation histories keyed by session_id
|
| 239 |
+
self.conversations: dict[str, ConversationHistory] = {}
|
| 240 |
+
|
| 241 |
+
self.prompt = ChatPromptTemplate.from_messages([
|
| 242 |
+
("system", SYSTEM_PROMPT),
|
| 243 |
+
("user", """{conversation_history}
|
| 244 |
+
|
| 245 |
+
Context from financial documents:
|
| 246 |
+
|
| 247 |
+
{context}
|
| 248 |
+
|
| 249 |
+
Question: {query}
|
| 250 |
+
|
| 251 |
+
Please provide a detailed answer using the context above. If this is a follow-up question, use the conversation history to understand the context. Remember to cite sources using [Source N] notation.""")
|
| 252 |
+
])
|
| 253 |
+
|
| 254 |
+
def _get_or_create_conversation(self, session_id: str) -> ConversationHistory:
|
| 255 |
+
"""Get existing conversation or create new one."""
|
| 256 |
+
if session_id not in self.conversations:
|
| 257 |
+
self.conversations[session_id] = ConversationHistory(max_tokens=4000)
|
| 258 |
+
return self.conversations[session_id]
|
| 259 |
+
|
| 260 |
+
def process_query(self, request: QueryRequest) -> QueryResponse:
|
| 261 |
+
"""
|
| 262 |
+
Process a query through the full RAG pipeline with conversation memory.
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
request: QueryRequest with query and filters
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
QueryResponse with answer and sources
|
| 269 |
+
"""
|
| 270 |
+
start_time = time.time()
|
| 271 |
+
|
| 272 |
+
# Check response cache (only for queries without session history)
|
| 273 |
+
if settings.ENABLE_RESPONSE_CACHE and not request.session_id:
|
| 274 |
+
cached_response = cache_manager.response_cache.get(
|
| 275 |
+
query=request.query,
|
| 276 |
+
ticker=request.ticker,
|
| 277 |
+
doc_types=request.doc_types,
|
| 278 |
+
top_k=request.top_k
|
| 279 |
+
)
|
| 280 |
+
if cached_response is not None:
|
| 281 |
+
# Add processing time and return cached response
|
| 282 |
+
cached_response['processing_time'] = round(time.time() - start_time, 2)
|
| 283 |
+
cached_response['from_cache'] = True
|
| 284 |
+
return QueryResponse(**cached_response)
|
| 285 |
+
|
| 286 |
+
citation_tracker = CitationTracker()
|
| 287 |
+
|
| 288 |
+
# Get or create session
|
| 289 |
+
session_id = request.session_id or f"session_{int(time.time())}"
|
| 290 |
+
conversation = self._get_or_create_conversation(session_id)
|
| 291 |
+
|
| 292 |
+
try:
|
| 293 |
+
# Step 1: Query Expansion
|
| 294 |
+
if settings.ENABLE_QUERY_EXPANSION:
|
| 295 |
+
expanded_queries = self.query_expander.expand(
|
| 296 |
+
request.query,
|
| 297 |
+
num_variations=settings.MAX_QUERY_VARIATIONS - 1
|
| 298 |
+
)
|
| 299 |
+
else:
|
| 300 |
+
expanded_queries = [request.query]
|
| 301 |
+
|
| 302 |
+
# Step 2: Retrieve documents for each query
|
| 303 |
+
all_documents = []
|
| 304 |
+
for query in expanded_queries:
|
| 305 |
+
docs = self.retriever.retrieve(
|
| 306 |
+
query=query,
|
| 307 |
+
ticker=request.ticker,
|
| 308 |
+
doc_types=request.doc_types,
|
| 309 |
+
top_k=settings.RETRIEVAL_TOP_K
|
| 310 |
+
)
|
| 311 |
+
all_documents.extend(docs)
|
| 312 |
+
|
| 313 |
+
# Step 3: Deduplicate documents
|
| 314 |
+
unique_docs = self._deduplicate_documents(all_documents)
|
| 315 |
+
|
| 316 |
+
# Step 4: Rerank documents
|
| 317 |
+
if settings.ENABLE_RERANKING and len(unique_docs) > request.top_k:
|
| 318 |
+
reranked_docs = self.reranker.rerank(
|
| 319 |
+
query=request.query,
|
| 320 |
+
documents=unique_docs,
|
| 321 |
+
top_k=request.top_k,
|
| 322 |
+
diversity_score=settings.MMR_DIVERSITY_SCORE
|
| 323 |
+
)
|
| 324 |
+
else:
|
| 325 |
+
reranked_docs = unique_docs[:request.top_k]
|
| 326 |
+
|
| 327 |
+
# Step 5: Contextual Compression
|
| 328 |
+
if settings.ENABLE_COMPRESSION:
|
| 329 |
+
compressed_docs = self.compressor.compress(
|
| 330 |
+
query=request.query,
|
| 331 |
+
documents=reranked_docs
|
| 332 |
+
)
|
| 333 |
+
else:
|
| 334 |
+
compressed_docs = reranked_docs
|
| 335 |
+
|
| 336 |
+
# Step 6: Prepare context with citations
|
| 337 |
+
context = citation_tracker.format_context_with_citations(compressed_docs)
|
| 338 |
+
|
| 339 |
+
# Step 7: Get conversation history
|
| 340 |
+
conversation_history = ""
|
| 341 |
+
if conversation.messages:
|
| 342 |
+
history_msgs = conversation.get_messages()
|
| 343 |
+
# Format last few exchanges
|
| 344 |
+
recent_history = history_msgs[-6:] # Last 3 exchanges
|
| 345 |
+
if recent_history:
|
| 346 |
+
conversation_history = "Previous conversation:\n"
|
| 347 |
+
for msg in recent_history:
|
| 348 |
+
role_label = "User" if msg["role"] == "user" else "Assistant"
|
| 349 |
+
# Truncate long messages
|
| 350 |
+
content = msg["content"][:300]
|
| 351 |
+
if len(msg["content"]) > 300:
|
| 352 |
+
content += "..."
|
| 353 |
+
conversation_history += f"{role_label}: {content}\n\n"
|
| 354 |
+
|
| 355 |
+
# Step 8: Generate answer
|
| 356 |
+
chain = self.prompt | self.llm
|
| 357 |
+
response = chain.invoke({
|
| 358 |
+
"conversation_history": conversation_history,
|
| 359 |
+
"context": context,
|
| 360 |
+
"query": request.query
|
| 361 |
+
})
|
| 362 |
+
|
| 363 |
+
answer = response.content
|
| 364 |
+
|
| 365 |
+
# Step 9: Update conversation history
|
| 366 |
+
conversation.add_message("user", request.query)
|
| 367 |
+
conversation.add_message("assistant", answer)
|
| 368 |
+
|
| 369 |
+
# Step 10: Get sources list
|
| 370 |
+
sources_list = citation_tracker.get_sources_list()
|
| 371 |
+
sources = [Source(**src) for src in sources_list]
|
| 372 |
+
|
| 373 |
+
# Calculate processing time
|
| 374 |
+
processing_time = time.time() - start_time
|
| 375 |
+
|
| 376 |
+
response_dict = {
|
| 377 |
+
"answer": answer,
|
| 378 |
+
"sources": [src.dict() if hasattr(src, 'dict') else src for src in sources],
|
| 379 |
+
"query": request.query,
|
| 380 |
+
"processing_time": round(processing_time, 2),
|
| 381 |
+
"expanded_queries": expanded_queries if len(expanded_queries) > 1 else None,
|
| 382 |
+
"num_documents_retrieved": len(compressed_docs),
|
| 383 |
+
"session_id": session_id
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
# Cache response (only for queries without session history)
|
| 387 |
+
if settings.ENABLE_RESPONSE_CACHE and not request.session_id:
|
| 388 |
+
cache_manager.response_cache.set(
|
| 389 |
+
query=request.query,
|
| 390 |
+
response=response_dict,
|
| 391 |
+
ticker=request.ticker,
|
| 392 |
+
doc_types=request.doc_types,
|
| 393 |
+
top_k=request.top_k
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
return QueryResponse(**response_dict)
|
| 397 |
+
|
| 398 |
+
except Exception as e:
|
| 399 |
+
print(f"RAG chain error: {e}")
|
| 400 |
+
raise
|
| 401 |
+
|
| 402 |
+
def _deduplicate_documents(
|
| 403 |
+
self,
|
| 404 |
+
documents: List[Document]
|
| 405 |
+
) -> List[Document]:
|
| 406 |
+
"""
|
| 407 |
+
Remove duplicate documents based on content and metadata.
|
| 408 |
+
|
| 409 |
+
Args:
|
| 410 |
+
documents: List of documents
|
| 411 |
+
|
| 412 |
+
Returns:
|
| 413 |
+
Deduplicated list of documents
|
| 414 |
+
"""
|
| 415 |
+
seen = set()
|
| 416 |
+
unique_docs = []
|
| 417 |
+
|
| 418 |
+
for doc in documents:
|
| 419 |
+
# Create unique key
|
| 420 |
+
metadata = doc.metadata
|
| 421 |
+
key = f"{metadata.get('filename', '')}_{metadata.get('chunk_id', '')}_{doc.page_content[:100]}"
|
| 422 |
+
|
| 423 |
+
if key not in seen:
|
| 424 |
+
seen.add(key)
|
| 425 |
+
unique_docs.append(doc)
|
| 426 |
+
|
| 427 |
+
return unique_docs
|
| 428 |
+
|
| 429 |
+
def clear_conversation(self, session_id: str):
|
| 430 |
+
"""Clear conversation history for a session."""
|
| 431 |
+
if session_id in self.conversations:
|
| 432 |
+
del self.conversations[session_id]
|
| 433 |
+
|
| 434 |
+
async def aprocess_query(self, request: QueryRequest) -> QueryResponse:
|
| 435 |
+
"""
|
| 436 |
+
Async version of process_query.
|
| 437 |
+
|
| 438 |
+
Args:
|
| 439 |
+
request: QueryRequest with query and filters
|
| 440 |
+
|
| 441 |
+
Returns:
|
| 442 |
+
QueryResponse with answer and sources
|
| 443 |
+
"""
|
| 444 |
+
start_time = time.time()
|
| 445 |
+
|
| 446 |
+
# Check response cache (only for queries without session history)
|
| 447 |
+
if settings.ENABLE_RESPONSE_CACHE and not request.session_id:
|
| 448 |
+
cached_response = cache_manager.response_cache.get(
|
| 449 |
+
query=request.query,
|
| 450 |
+
ticker=request.ticker,
|
| 451 |
+
doc_types=request.doc_types,
|
| 452 |
+
top_k=request.top_k
|
| 453 |
+
)
|
| 454 |
+
if cached_response is not None:
|
| 455 |
+
# Add processing time and return cached response
|
| 456 |
+
cached_response['processing_time'] = round(time.time() - start_time, 2)
|
| 457 |
+
cached_response['from_cache'] = True
|
| 458 |
+
return QueryResponse(**cached_response)
|
| 459 |
+
|
| 460 |
+
citation_tracker = CitationTracker()
|
| 461 |
+
|
| 462 |
+
# Get or create session
|
| 463 |
+
session_id = request.session_id or f"session_{int(time.time())}"
|
| 464 |
+
conversation = self._get_or_create_conversation(session_id)
|
| 465 |
+
|
| 466 |
+
try:
|
| 467 |
+
# Query expansion
|
| 468 |
+
if settings.ENABLE_QUERY_EXPANSION:
|
| 469 |
+
expanded_queries = await self.query_expander.aexpand(
|
| 470 |
+
request.query,
|
| 471 |
+
num_variations=settings.MAX_QUERY_VARIATIONS - 1
|
| 472 |
+
)
|
| 473 |
+
else:
|
| 474 |
+
expanded_queries = [request.query]
|
| 475 |
+
|
| 476 |
+
# Retrieve documents
|
| 477 |
+
all_documents = []
|
| 478 |
+
for query in expanded_queries:
|
| 479 |
+
docs = await self.retriever.aretrieve(
|
| 480 |
+
query=query,
|
| 481 |
+
ticker=request.ticker,
|
| 482 |
+
doc_types=request.doc_types,
|
| 483 |
+
top_k=settings.RETRIEVAL_TOP_K
|
| 484 |
+
)
|
| 485 |
+
all_documents.extend(docs)
|
| 486 |
+
|
| 487 |
+
# Deduplicate
|
| 488 |
+
unique_docs = self._deduplicate_documents(all_documents)
|
| 489 |
+
|
| 490 |
+
# Rerank
|
| 491 |
+
if settings.ENABLE_RERANKING and len(unique_docs) > request.top_k:
|
| 492 |
+
reranked_docs = await self.reranker.arerank(
|
| 493 |
+
query=request.query,
|
| 494 |
+
documents=unique_docs,
|
| 495 |
+
top_k=request.top_k,
|
| 496 |
+
diversity_score=settings.MMR_DIVERSITY_SCORE
|
| 497 |
+
)
|
| 498 |
+
else:
|
| 499 |
+
reranked_docs = unique_docs[:request.top_k]
|
| 500 |
+
|
| 501 |
+
# Compress
|
| 502 |
+
if settings.ENABLE_COMPRESSION:
|
| 503 |
+
compressed_docs = await self.compressor.acompress(
|
| 504 |
+
query=request.query,
|
| 505 |
+
documents=reranked_docs
|
| 506 |
+
)
|
| 507 |
+
else:
|
| 508 |
+
compressed_docs = reranked_docs
|
| 509 |
+
|
| 510 |
+
# Prepare context
|
| 511 |
+
context = citation_tracker.format_context_with_citations(compressed_docs)
|
| 512 |
+
|
| 513 |
+
# Get conversation history
|
| 514 |
+
conversation_history = ""
|
| 515 |
+
if conversation.messages:
|
| 516 |
+
history_msgs = conversation.get_messages()
|
| 517 |
+
recent_history = history_msgs[-6:]
|
| 518 |
+
if recent_history:
|
| 519 |
+
conversation_history = "Previous conversation:\n"
|
| 520 |
+
for msg in recent_history:
|
| 521 |
+
role_label = "User" if msg["role"] == "user" else "Assistant"
|
| 522 |
+
content = msg["content"][:300]
|
| 523 |
+
if len(msg["content"]) > 300:
|
| 524 |
+
content += "..."
|
| 525 |
+
conversation_history += f"{role_label}: {content}\n\n"
|
| 526 |
+
|
| 527 |
+
# Generate answer
|
| 528 |
+
chain = self.prompt | self.llm
|
| 529 |
+
response = await chain.ainvoke({
|
| 530 |
+
"conversation_history": conversation_history,
|
| 531 |
+
"context": context,
|
| 532 |
+
"query": request.query
|
| 533 |
+
})
|
| 534 |
+
|
| 535 |
+
answer = response.content
|
| 536 |
+
|
| 537 |
+
# Update conversation history
|
| 538 |
+
conversation.add_message("user", request.query)
|
| 539 |
+
conversation.add_message("assistant", answer)
|
| 540 |
+
|
| 541 |
+
sources_list = citation_tracker.get_sources_list()
|
| 542 |
+
sources = [Source(**src) for src in sources_list]
|
| 543 |
+
processing_time = time.time() - start_time
|
| 544 |
+
|
| 545 |
+
response_dict = {
|
| 546 |
+
"answer": answer,
|
| 547 |
+
"sources": [src.dict() if hasattr(src, 'dict') else src for src in sources],
|
| 548 |
+
"query": request.query,
|
| 549 |
+
"processing_time": round(processing_time, 2),
|
| 550 |
+
"expanded_queries": expanded_queries if len(expanded_queries) > 1 else None,
|
| 551 |
+
"num_documents_retrieved": len(compressed_docs),
|
| 552 |
+
"session_id": session_id
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
# Cache response (only for queries without session history)
|
| 556 |
+
if settings.ENABLE_RESPONSE_CACHE and not request.session_id:
|
| 557 |
+
cache_manager.response_cache.set(
|
| 558 |
+
query=request.query,
|
| 559 |
+
response=response_dict,
|
| 560 |
+
ticker=request.ticker,
|
| 561 |
+
doc_types=request.doc_types,
|
| 562 |
+
top_k=request.top_k
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
return QueryResponse(**response_dict)
|
| 566 |
+
|
| 567 |
+
except Exception as e:
|
| 568 |
+
print(f"RAG chain error: {e}")
|
| 569 |
+
raise
|
backend/app/rag/compressor.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Contextual compression to extract relevant sentences from retrieved chunks."""
|
| 2 |
+
from typing import List
|
| 3 |
+
from langchain_core.documents import Document
|
| 4 |
+
from langchain_openai import ChatOpenAI
|
| 5 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 6 |
+
from app.config import settings
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ContextualCompressor:
|
| 10 |
+
"""Compresses retrieved documents by extracting only relevant content."""
|
| 11 |
+
|
| 12 |
+
def __init__(self):
|
| 13 |
+
"""Initialize LLM for compression."""
|
| 14 |
+
self.llm = ChatOpenAI(
|
| 15 |
+
model=settings.OPENAI_MODEL,
|
| 16 |
+
temperature=0,
|
| 17 |
+
openai_api_key=settings.OPENAI_API_KEY
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
self.prompt = ChatPromptTemplate.from_messages([
|
| 21 |
+
("system", """You are a precise information extraction assistant.
|
| 22 |
+
|
| 23 |
+
Given a query and a document chunk, extract ONLY the sentences that are directly relevant to answering the query.
|
| 24 |
+
|
| 25 |
+
Rules:
|
| 26 |
+
1. Extract complete sentences (don't cut off mid-sentence)
|
| 27 |
+
2. Maintain the original wording - do not paraphrase
|
| 28 |
+
3. Keep financial figures and context together
|
| 29 |
+
4. If the entire chunk is relevant, return it as-is
|
| 30 |
+
5. If nothing is relevant, return "NOT_RELEVANT"
|
| 31 |
+
6. Preserve numerical data and labels exactly as written
|
| 32 |
+
|
| 33 |
+
Return only the extracted sentences, separated by spaces."""),
|
| 34 |
+
("user", """Query: {query}
|
| 35 |
+
|
| 36 |
+
Document:
|
| 37 |
+
{document}
|
| 38 |
+
|
| 39 |
+
Relevant sentences:""")
|
| 40 |
+
])
|
| 41 |
+
|
| 42 |
+
def compress(
|
| 43 |
+
self,
|
| 44 |
+
query: str,
|
| 45 |
+
documents: List[Document]
|
| 46 |
+
) -> List[Document]:
|
| 47 |
+
"""
|
| 48 |
+
Compress documents by extracting relevant sentences.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
query: Original query text
|
| 52 |
+
documents: List of documents to compress
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
List of compressed documents
|
| 56 |
+
"""
|
| 57 |
+
if not documents:
|
| 58 |
+
return []
|
| 59 |
+
|
| 60 |
+
compressed_docs = []
|
| 61 |
+
|
| 62 |
+
for doc in documents:
|
| 63 |
+
try:
|
| 64 |
+
# Skip very short documents (already concise)
|
| 65 |
+
if len(doc.page_content) < 200:
|
| 66 |
+
compressed_docs.append(doc)
|
| 67 |
+
continue
|
| 68 |
+
|
| 69 |
+
# Extract relevant content
|
| 70 |
+
chain = self.prompt | self.llm
|
| 71 |
+
response = chain.invoke({
|
| 72 |
+
"query": query,
|
| 73 |
+
"document": doc.page_content
|
| 74 |
+
})
|
| 75 |
+
|
| 76 |
+
extracted = response.content.strip()
|
| 77 |
+
|
| 78 |
+
# Skip if nothing relevant found
|
| 79 |
+
if extracted == "NOT_RELEVANT" or not extracted:
|
| 80 |
+
continue
|
| 81 |
+
|
| 82 |
+
# Create compressed document with same metadata
|
| 83 |
+
compressed_doc = Document(
|
| 84 |
+
page_content=extracted,
|
| 85 |
+
metadata=doc.metadata.copy()
|
| 86 |
+
)
|
| 87 |
+
compressed_docs.append(compressed_doc)
|
| 88 |
+
|
| 89 |
+
except Exception as e:
|
| 90 |
+
print(f"Compression error for doc: {e}")
|
| 91 |
+
# Fallback: include original document
|
| 92 |
+
compressed_docs.append(doc)
|
| 93 |
+
|
| 94 |
+
return compressed_docs
|
| 95 |
+
|
| 96 |
+
def compress_batch(
|
| 97 |
+
self,
|
| 98 |
+
query: str,
|
| 99 |
+
documents: List[Document],
|
| 100 |
+
batch_size: int = 5
|
| 101 |
+
) -> List[Document]:
|
| 102 |
+
"""
|
| 103 |
+
Compress documents in batches for better performance.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
query: Original query text
|
| 107 |
+
documents: List of documents to compress
|
| 108 |
+
batch_size: Number of documents to process at once
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
List of compressed documents
|
| 112 |
+
"""
|
| 113 |
+
if not documents:
|
| 114 |
+
return []
|
| 115 |
+
|
| 116 |
+
compressed_docs = []
|
| 117 |
+
|
| 118 |
+
# Process in batches
|
| 119 |
+
for i in range(0, len(documents), batch_size):
|
| 120 |
+
batch = documents[i:i + batch_size]
|
| 121 |
+
compressed_batch = self.compress(query, batch)
|
| 122 |
+
compressed_docs.extend(compressed_batch)
|
| 123 |
+
|
| 124 |
+
return compressed_docs
|
| 125 |
+
|
| 126 |
+
async def acompress(
|
| 127 |
+
self,
|
| 128 |
+
query: str,
|
| 129 |
+
documents: List[Document]
|
| 130 |
+
) -> List[Document]:
|
| 131 |
+
"""
|
| 132 |
+
Async version of compress method.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
query: Original query text
|
| 136 |
+
documents: List of documents to compress
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
List of compressed documents
|
| 140 |
+
"""
|
| 141 |
+
if not documents:
|
| 142 |
+
return []
|
| 143 |
+
|
| 144 |
+
compressed_docs = []
|
| 145 |
+
|
| 146 |
+
for doc in documents:
|
| 147 |
+
try:
|
| 148 |
+
if len(doc.page_content) < 200:
|
| 149 |
+
compressed_docs.append(doc)
|
| 150 |
+
continue
|
| 151 |
+
|
| 152 |
+
chain = self.prompt | self.llm
|
| 153 |
+
response = await chain.ainvoke({
|
| 154 |
+
"query": query,
|
| 155 |
+
"document": doc.page_content
|
| 156 |
+
})
|
| 157 |
+
|
| 158 |
+
extracted = response.content.strip()
|
| 159 |
+
|
| 160 |
+
if extracted == "NOT_RELEVANT" or not extracted:
|
| 161 |
+
continue
|
| 162 |
+
|
| 163 |
+
compressed_doc = Document(
|
| 164 |
+
page_content=extracted,
|
| 165 |
+
metadata=doc.metadata.copy()
|
| 166 |
+
)
|
| 167 |
+
compressed_docs.append(compressed_doc)
|
| 168 |
+
|
| 169 |
+
except Exception as e:
|
| 170 |
+
print(f"Compression error for doc: {e}")
|
| 171 |
+
compressed_docs.append(doc)
|
| 172 |
+
|
| 173 |
+
return compressed_docs
|
backend/app/rag/query_expander.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Query expansion for improved retrieval with multi-part question decomposition."""
|
| 2 |
+
from typing import List
|
| 3 |
+
from langchain_openai import ChatOpenAI
|
| 4 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 5 |
+
from app.config import settings
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class QueryExpander:
|
| 9 |
+
"""Expands queries into multiple variations and decomposes multi-part questions."""
|
| 10 |
+
|
| 11 |
+
def __init__(self):
|
| 12 |
+
"""Initialize LLM for query expansion."""
|
| 13 |
+
self.llm = ChatOpenAI(
|
| 14 |
+
model=settings.OPENAI_MODEL,
|
| 15 |
+
temperature=0.3,
|
| 16 |
+
openai_api_key=settings.OPENAI_API_KEY
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
# Prompt for detecting and decomposing multi-part questions
|
| 20 |
+
self.decompose_prompt = ChatPromptTemplate.from_messages([
|
| 21 |
+
("system", """You are a financial query analyzer. Analyze if this is a multi-part question with distinct sub-questions.
|
| 22 |
+
|
| 23 |
+
If the query contains multiple DISTINCT questions (numbered or clearly separated topics), break it down into individual sub-queries.
|
| 24 |
+
If it's a single complex question, return it as-is.
|
| 25 |
+
|
| 26 |
+
Rules:
|
| 27 |
+
- Each sub-query should be standalone and answerable independently
|
| 28 |
+
- Preserve the ticker/company context in each sub-query
|
| 29 |
+
- Keep financial terminology intact
|
| 30 |
+
- Number sub-queries if there are multiple
|
| 31 |
+
|
| 32 |
+
Examples:
|
| 33 |
+
Input: "For ACM: 1. What was revenue? 2. What are the risks?"
|
| 34 |
+
Output:
|
| 35 |
+
1. What was ACM's revenue for the most recent fiscal year?
|
| 36 |
+
2. What are the major risks for ACM according to the latest 10-K?
|
| 37 |
+
|
| 38 |
+
Input: "What was ACM's revenue growth rate?"
|
| 39 |
+
Output:
|
| 40 |
+
What was ACM's revenue growth rate?
|
| 41 |
+
|
| 42 |
+
Return ONLY the decomposed queries, one per line. If single query, return it unchanged."""),
|
| 43 |
+
("user", "Query: {query}")
|
| 44 |
+
])
|
| 45 |
+
|
| 46 |
+
# Prompt for expanding individual queries
|
| 47 |
+
self.expansion_prompt = ChatPromptTemplate.from_messages([
|
| 48 |
+
("system", """You are a financial query expansion expert.
|
| 49 |
+
Generate {num_variations} alternative phrasings of the user's query that would help retrieve relevant financial information.
|
| 50 |
+
|
| 51 |
+
Focus on:
|
| 52 |
+
- Different financial terminology (e.g., "revenue" vs "sales" vs "contract revenue")
|
| 53 |
+
- Different ways to ask about financial metrics
|
| 54 |
+
- Explicit mention of financial statement types if relevant (balance sheet, income statement, cash flow, 10-K)
|
| 55 |
+
- Keeping the core intent of the original query
|
| 56 |
+
|
| 57 |
+
Return ONLY the query variations, one per line, without numbering or explanations."""),
|
| 58 |
+
("user", "Original query: {query}")
|
| 59 |
+
])
|
| 60 |
+
|
| 61 |
+
def expand(self, query: str, num_variations: int = 2) -> List[str]:
|
| 62 |
+
"""
|
| 63 |
+
Expand a query into multiple variations.
|
| 64 |
+
Handles multi-part questions by decomposing them first.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
query: Original query text
|
| 68 |
+
num_variations: Number of variations to generate per sub-query (default: 2)
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
List of query variations including the original and decomposed parts
|
| 72 |
+
"""
|
| 73 |
+
# Always include the original query first
|
| 74 |
+
all_queries = [query]
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
# Step 1: Check if this is a multi-part question and decompose
|
| 78 |
+
sub_queries = self._decompose_query(query)
|
| 79 |
+
|
| 80 |
+
# Step 2: If decomposed into multiple parts, expand each part
|
| 81 |
+
if len(sub_queries) > 1:
|
| 82 |
+
print(f"Decomposed into {len(sub_queries)} sub-queries")
|
| 83 |
+
for sub_query in sub_queries:
|
| 84 |
+
# Add the sub-query itself
|
| 85 |
+
if sub_query not in all_queries:
|
| 86 |
+
all_queries.append(sub_query)
|
| 87 |
+
|
| 88 |
+
# Generate variations for this sub-query if it's complex enough
|
| 89 |
+
if self._should_expand(sub_query):
|
| 90 |
+
variations = self._generate_variations(sub_query, num_variations)
|
| 91 |
+
all_queries.extend([v for v in variations if v not in all_queries])
|
| 92 |
+
else:
|
| 93 |
+
# Single query - just expand normally if complex enough
|
| 94 |
+
if self._should_expand(query):
|
| 95 |
+
variations = self._generate_variations(query, num_variations)
|
| 96 |
+
all_queries.extend([v for v in variations if v not in all_queries])
|
| 97 |
+
|
| 98 |
+
return all_queries
|
| 99 |
+
|
| 100 |
+
except Exception as e:
|
| 101 |
+
print(f"Query expansion error: {e}")
|
| 102 |
+
# Fallback to original query only
|
| 103 |
+
return [query]
|
| 104 |
+
|
| 105 |
+
def _decompose_query(self, query: str) -> List[str]:
|
| 106 |
+
"""
|
| 107 |
+
Decompose a multi-part query into individual sub-queries.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
query: Original query text
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
List of sub-queries (or single query if not multi-part)
|
| 114 |
+
"""
|
| 115 |
+
try:
|
| 116 |
+
# Check if query has clear multi-part indicators
|
| 117 |
+
multi_part_indicators = [
|
| 118 |
+
'1.' in query and '2.' in query,
|
| 119 |
+
'1)' in query and '2)' in query,
|
| 120 |
+
query.count('?') > 1, # Multiple question marks
|
| 121 |
+
' and ' in query.lower() and len(query.split()) > 15 # Long query with 'and'
|
| 122 |
+
]
|
| 123 |
+
|
| 124 |
+
if not any(multi_part_indicators):
|
| 125 |
+
return [query]
|
| 126 |
+
|
| 127 |
+
# Use LLM to decompose
|
| 128 |
+
chain = self.decompose_prompt | self.llm
|
| 129 |
+
response = chain.invoke({"query": query})
|
| 130 |
+
|
| 131 |
+
# Parse response
|
| 132 |
+
sub_queries = response.content.strip().split('\n')
|
| 133 |
+
sub_queries = [q.strip() for q in sub_queries if q.strip()]
|
| 134 |
+
|
| 135 |
+
# Remove numbering if present (1., 2., etc.)
|
| 136 |
+
cleaned_queries = []
|
| 137 |
+
for q in sub_queries:
|
| 138 |
+
# Remove leading numbering like "1. " or "1) "
|
| 139 |
+
import re
|
| 140 |
+
cleaned = re.sub(r'^\d+[\.\)]\s*', '', q)
|
| 141 |
+
if cleaned:
|
| 142 |
+
cleaned_queries.append(cleaned)
|
| 143 |
+
|
| 144 |
+
return cleaned_queries if cleaned_queries else [query]
|
| 145 |
+
|
| 146 |
+
except Exception as e:
|
| 147 |
+
print(f"Query decomposition error: {e}")
|
| 148 |
+
return [query]
|
| 149 |
+
|
| 150 |
+
def _generate_variations(self, query: str, num_variations: int) -> List[str]:
|
| 151 |
+
"""
|
| 152 |
+
Generate variations of a single query.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
query: Query text to expand
|
| 156 |
+
num_variations: Number of variations to generate
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
List of query variations
|
| 160 |
+
"""
|
| 161 |
+
try:
|
| 162 |
+
chain = self.expansion_prompt | self.llm
|
| 163 |
+
response = chain.invoke({
|
| 164 |
+
"query": query,
|
| 165 |
+
"num_variations": num_variations
|
| 166 |
+
})
|
| 167 |
+
|
| 168 |
+
# Parse response
|
| 169 |
+
variations = response.content.strip().split('\n')
|
| 170 |
+
variations = [v.strip() for v in variations if v.strip()]
|
| 171 |
+
|
| 172 |
+
return variations[:num_variations]
|
| 173 |
+
|
| 174 |
+
except Exception as e:
|
| 175 |
+
print(f"Variation generation error: {e}")
|
| 176 |
+
return []
|
| 177 |
+
|
| 178 |
+
def _should_expand(self, query: str) -> bool:
|
| 179 |
+
"""
|
| 180 |
+
Determine if query should be expanded.
|
| 181 |
+
|
| 182 |
+
Simple queries (< 5 words) or yes/no questions typically don't need expansion.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
query: Original query text
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
True if query should be expanded
|
| 189 |
+
"""
|
| 190 |
+
# Don't expand very short queries
|
| 191 |
+
word_count = len(query.split())
|
| 192 |
+
if word_count < 5:
|
| 193 |
+
return False
|
| 194 |
+
|
| 195 |
+
# Don't expand simple what/when/where questions
|
| 196 |
+
query_lower = query.lower().strip()
|
| 197 |
+
simple_patterns = [
|
| 198 |
+
query_lower.startswith("what is"),
|
| 199 |
+
query_lower.startswith("what was"),
|
| 200 |
+
query_lower.startswith("when did"),
|
| 201 |
+
query_lower.startswith("where is"),
|
| 202 |
+
"yes or no" in query_lower,
|
| 203 |
+
]
|
| 204 |
+
|
| 205 |
+
if any(simple_patterns):
|
| 206 |
+
return False
|
| 207 |
+
|
| 208 |
+
# Expand complex queries
|
| 209 |
+
return True
|
| 210 |
+
|
| 211 |
+
async def aexpand(self, query: str, num_variations: int = 2) -> List[str]:
|
| 212 |
+
"""
|
| 213 |
+
Async version of expand method.
|
| 214 |
+
|
| 215 |
+
Args:
|
| 216 |
+
query: Original query text
|
| 217 |
+
num_variations: Number of variations to generate per sub-query
|
| 218 |
+
|
| 219 |
+
Returns:
|
| 220 |
+
List of query variations including the original and decomposed parts
|
| 221 |
+
"""
|
| 222 |
+
all_queries = [query]
|
| 223 |
+
|
| 224 |
+
try:
|
| 225 |
+
# Decompose if multi-part
|
| 226 |
+
sub_queries = await self._adecompose_query(query)
|
| 227 |
+
|
| 228 |
+
if len(sub_queries) > 1:
|
| 229 |
+
print(f"Decomposed into {len(sub_queries)} sub-queries")
|
| 230 |
+
for sub_query in sub_queries:
|
| 231 |
+
if sub_query not in all_queries:
|
| 232 |
+
all_queries.append(sub_query)
|
| 233 |
+
|
| 234 |
+
if self._should_expand(sub_query):
|
| 235 |
+
variations = await self._agenerate_variations(sub_query, num_variations)
|
| 236 |
+
all_queries.extend([v for v in variations if v not in all_queries])
|
| 237 |
+
else:
|
| 238 |
+
if self._should_expand(query):
|
| 239 |
+
variations = await self._agenerate_variations(query, num_variations)
|
| 240 |
+
all_queries.extend([v for v in variations if v not in all_queries])
|
| 241 |
+
|
| 242 |
+
return all_queries
|
| 243 |
+
|
| 244 |
+
except Exception as e:
|
| 245 |
+
print(f"Query expansion error: {e}")
|
| 246 |
+
return [query]
|
| 247 |
+
|
| 248 |
+
async def _adecompose_query(self, query: str) -> List[str]:
|
| 249 |
+
"""Async version of _decompose_query."""
|
| 250 |
+
try:
|
| 251 |
+
multi_part_indicators = [
|
| 252 |
+
'1.' in query and '2.' in query,
|
| 253 |
+
'1)' in query and '2)' in query,
|
| 254 |
+
query.count('?') > 1,
|
| 255 |
+
' and ' in query.lower() and len(query.split()) > 15
|
| 256 |
+
]
|
| 257 |
+
|
| 258 |
+
if not any(multi_part_indicators):
|
| 259 |
+
return [query]
|
| 260 |
+
|
| 261 |
+
chain = self.decompose_prompt | self.llm
|
| 262 |
+
response = await chain.ainvoke({"query": query})
|
| 263 |
+
|
| 264 |
+
sub_queries = response.content.strip().split('\n')
|
| 265 |
+
sub_queries = [q.strip() for q in sub_queries if q.strip()]
|
| 266 |
+
|
| 267 |
+
# Remove numbering
|
| 268 |
+
import re
|
| 269 |
+
cleaned_queries = []
|
| 270 |
+
for q in sub_queries:
|
| 271 |
+
cleaned = re.sub(r'^\d+[\.\)]\s*', '', q)
|
| 272 |
+
if cleaned:
|
| 273 |
+
cleaned_queries.append(cleaned)
|
| 274 |
+
|
| 275 |
+
return cleaned_queries if cleaned_queries else [query]
|
| 276 |
+
|
| 277 |
+
except Exception as e:
|
| 278 |
+
print(f"Query decomposition error: {e}")
|
| 279 |
+
return [query]
|
| 280 |
+
|
| 281 |
+
async def _agenerate_variations(self, query: str, num_variations: int) -> List[str]:
|
| 282 |
+
"""Async version of _generate_variations."""
|
| 283 |
+
try:
|
| 284 |
+
chain = self.expansion_prompt | self.llm
|
| 285 |
+
response = await chain.ainvoke({
|
| 286 |
+
"query": query,
|
| 287 |
+
"num_variations": num_variations
|
| 288 |
+
})
|
| 289 |
+
|
| 290 |
+
variations = response.content.strip().split('\n')
|
| 291 |
+
variations = [v.strip() for v in variations if v.strip()]
|
| 292 |
+
|
| 293 |
+
return variations[:num_variations]
|
| 294 |
+
|
| 295 |
+
except Exception as e:
|
| 296 |
+
print(f"Variation generation error: {e}")
|
| 297 |
+
return []
|
backend/app/rag/reranker.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Reranking retrieved documents using MMR (Maximal Marginal Relevance)."""
|
| 2 |
+
from typing import List
|
| 3 |
+
import numpy as np
|
| 4 |
+
from langchain_core.documents import Document
|
| 5 |
+
from langchain_openai import OpenAIEmbeddings
|
| 6 |
+
from app.config import settings
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class MMRReranker:
|
| 10 |
+
"""Reranks documents using Maximal Marginal Relevance algorithm."""
|
| 11 |
+
|
| 12 |
+
def __init__(self):
|
| 13 |
+
"""Initialize embeddings for MMR computation."""
|
| 14 |
+
self.embeddings = OpenAIEmbeddings(
|
| 15 |
+
model=settings.OPENAI_EMBEDDING_MODEL,
|
| 16 |
+
openai_api_key=settings.OPENAI_API_KEY,
|
| 17 |
+
dimensions=settings.OPENAI_EMBEDDING_DIMENSION
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
def rerank(
|
| 21 |
+
self,
|
| 22 |
+
query: str,
|
| 23 |
+
documents: List[Document],
|
| 24 |
+
top_k: int = 10,
|
| 25 |
+
diversity_score: float = 0.3
|
| 26 |
+
) -> List[Document]:
|
| 27 |
+
"""
|
| 28 |
+
Rerank documents using MMR to balance relevance and diversity.
|
| 29 |
+
|
| 30 |
+
MMR Formula:
|
| 31 |
+
MMR = argmax [λ * Sim(D_i, Q) - (1-λ) * max Sim(D_i, D_j)]
|
| 32 |
+
where D_j are already selected documents
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
query: Original query text
|
| 36 |
+
documents: List of retrieved documents
|
| 37 |
+
top_k: Number of documents to return
|
| 38 |
+
diversity_score: Lambda parameter (0 = max diversity, 1 = max relevance)
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
Reranked list of top_k documents
|
| 42 |
+
"""
|
| 43 |
+
if not documents:
|
| 44 |
+
return []
|
| 45 |
+
|
| 46 |
+
# If we have fewer documents than top_k, return all
|
| 47 |
+
if len(documents) <= top_k:
|
| 48 |
+
return documents
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
# Get embeddings
|
| 52 |
+
query_embedding = self.embeddings.embed_query(query)
|
| 53 |
+
doc_texts = [doc.page_content for doc in documents]
|
| 54 |
+
doc_embeddings = self.embeddings.embed_documents(doc_texts)
|
| 55 |
+
|
| 56 |
+
# Convert to numpy arrays
|
| 57 |
+
query_vec = np.array(query_embedding)
|
| 58 |
+
doc_vecs = np.array(doc_embeddings)
|
| 59 |
+
|
| 60 |
+
# Compute similarity to query for all documents
|
| 61 |
+
query_similarities = self._cosine_similarity(query_vec, doc_vecs)
|
| 62 |
+
|
| 63 |
+
# MMR selection
|
| 64 |
+
selected_indices = []
|
| 65 |
+
remaining_indices = list(range(len(documents)))
|
| 66 |
+
|
| 67 |
+
for _ in range(min(top_k, len(documents))):
|
| 68 |
+
if not remaining_indices:
|
| 69 |
+
break
|
| 70 |
+
|
| 71 |
+
mmr_scores = []
|
| 72 |
+
|
| 73 |
+
for idx in remaining_indices:
|
| 74 |
+
# Relevance to query
|
| 75 |
+
relevance = query_similarities[idx]
|
| 76 |
+
|
| 77 |
+
# Redundancy with already selected documents
|
| 78 |
+
if selected_indices:
|
| 79 |
+
selected_vecs = doc_vecs[selected_indices]
|
| 80 |
+
redundancy = np.max(
|
| 81 |
+
self._cosine_similarity(doc_vecs[idx], selected_vecs)
|
| 82 |
+
)
|
| 83 |
+
else:
|
| 84 |
+
redundancy = 0
|
| 85 |
+
|
| 86 |
+
# MMR score
|
| 87 |
+
mmr = diversity_score * relevance - (1 - diversity_score) * redundancy
|
| 88 |
+
mmr_scores.append((idx, mmr))
|
| 89 |
+
|
| 90 |
+
# Select document with highest MMR score
|
| 91 |
+
best_idx = max(mmr_scores, key=lambda x: x[1])[0]
|
| 92 |
+
selected_indices.append(best_idx)
|
| 93 |
+
remaining_indices.remove(best_idx)
|
| 94 |
+
|
| 95 |
+
# Return reranked documents
|
| 96 |
+
return [documents[i] for i in selected_indices]
|
| 97 |
+
|
| 98 |
+
except Exception as e:
|
| 99 |
+
print(f"Reranking error: {e}")
|
| 100 |
+
# Fallback: return top_k by original similarity score
|
| 101 |
+
return self._fallback_rerank(documents, top_k)
|
| 102 |
+
|
| 103 |
+
def _cosine_similarity(
|
| 104 |
+
self,
|
| 105 |
+
vec1: np.ndarray,
|
| 106 |
+
vec2: np.ndarray
|
| 107 |
+
) -> np.ndarray:
|
| 108 |
+
"""
|
| 109 |
+
Compute cosine similarity between vectors.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
vec1: Single vector or array of vectors
|
| 113 |
+
vec2: Array of vectors
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
Similarity scores
|
| 117 |
+
"""
|
| 118 |
+
if vec1.ndim == 1:
|
| 119 |
+
vec1 = vec1.reshape(1, -1)
|
| 120 |
+
if vec2.ndim == 1:
|
| 121 |
+
vec2 = vec2.reshape(1, -1)
|
| 122 |
+
|
| 123 |
+
# Normalize vectors
|
| 124 |
+
vec1_norm = vec1 / np.linalg.norm(vec1, axis=1, keepdims=True)
|
| 125 |
+
vec2_norm = vec2 / np.linalg.norm(vec2, axis=1, keepdims=True)
|
| 126 |
+
|
| 127 |
+
# Compute dot product
|
| 128 |
+
similarity = np.dot(vec1_norm, vec2_norm.T)
|
| 129 |
+
|
| 130 |
+
return similarity.flatten() if similarity.shape[0] == 1 else similarity
|
| 131 |
+
|
| 132 |
+
def _fallback_rerank(
|
| 133 |
+
self,
|
| 134 |
+
documents: List[Document],
|
| 135 |
+
top_k: int
|
| 136 |
+
) -> List[Document]:
|
| 137 |
+
"""
|
| 138 |
+
Fallback reranking using existing similarity scores.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
documents: List of documents with similarity_score in metadata
|
| 142 |
+
top_k: Number of documents to return
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
Top-k documents sorted by similarity score
|
| 146 |
+
"""
|
| 147 |
+
# Sort by similarity score (higher is better)
|
| 148 |
+
sorted_docs = sorted(
|
| 149 |
+
documents,
|
| 150 |
+
key=lambda d: d.metadata.get('similarity_score', 0),
|
| 151 |
+
reverse=True
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
return sorted_docs[:top_k]
|
| 155 |
+
|
| 156 |
+
async def arerank(
|
| 157 |
+
self,
|
| 158 |
+
query: str,
|
| 159 |
+
documents: List[Document],
|
| 160 |
+
top_k: int = 10,
|
| 161 |
+
diversity_score: float = 0.3
|
| 162 |
+
) -> List[Document]:
|
| 163 |
+
"""
|
| 164 |
+
Async version of rerank method.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
query: Original query text
|
| 168 |
+
documents: List of retrieved documents
|
| 169 |
+
top_k: Number of documents to return
|
| 170 |
+
diversity_score: Lambda parameter for MMR
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
Reranked list of top_k documents
|
| 174 |
+
"""
|
| 175 |
+
import asyncio
|
| 176 |
+
return await asyncio.to_thread(
|
| 177 |
+
self.rerank,
|
| 178 |
+
query,
|
| 179 |
+
documents,
|
| 180 |
+
top_k,
|
| 181 |
+
diversity_score
|
| 182 |
+
)
|
backend/app/rag/retriever.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Zilliz retriever with hybrid search capabilities."""
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
from langchain_core.documents import Document
|
| 4 |
+
from langchain_milvus import Milvus
|
| 5 |
+
from langchain_openai import OpenAIEmbeddings
|
| 6 |
+
from app.config import settings
|
| 7 |
+
from app.utils.cache import cache_manager
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class CachedEmbeddings(OpenAIEmbeddings):
|
| 11 |
+
"""Wrapper for OpenAI embeddings with caching."""
|
| 12 |
+
|
| 13 |
+
def embed_query(self, text: str) -> List[float]:
|
| 14 |
+
"""Embed query with caching."""
|
| 15 |
+
if settings.ENABLE_EMBEDDING_CACHE:
|
| 16 |
+
cached = cache_manager.embedding_cache.get(text)
|
| 17 |
+
if cached is not None:
|
| 18 |
+
return cached
|
| 19 |
+
|
| 20 |
+
# Get embedding from OpenAI
|
| 21 |
+
embedding = super().embed_query(text)
|
| 22 |
+
|
| 23 |
+
# Cache it
|
| 24 |
+
if settings.ENABLE_EMBEDDING_CACHE:
|
| 25 |
+
cache_manager.embedding_cache.set(text, embedding)
|
| 26 |
+
|
| 27 |
+
return embedding
|
| 28 |
+
|
| 29 |
+
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
| 30 |
+
"""Embed documents with caching."""
|
| 31 |
+
embeddings = []
|
| 32 |
+
uncached_texts = []
|
| 33 |
+
uncached_indices = []
|
| 34 |
+
|
| 35 |
+
# Check cache for each text
|
| 36 |
+
for i, text in enumerate(texts):
|
| 37 |
+
if settings.ENABLE_EMBEDDING_CACHE:
|
| 38 |
+
cached = cache_manager.embedding_cache.get(text)
|
| 39 |
+
if cached is not None:
|
| 40 |
+
embeddings.append(cached)
|
| 41 |
+
else:
|
| 42 |
+
uncached_texts.append(text)
|
| 43 |
+
uncached_indices.append(i)
|
| 44 |
+
embeddings.append(None) # Placeholder
|
| 45 |
+
else:
|
| 46 |
+
uncached_texts.append(text)
|
| 47 |
+
uncached_indices.append(i)
|
| 48 |
+
embeddings.append(None)
|
| 49 |
+
|
| 50 |
+
# Get embeddings for uncached texts
|
| 51 |
+
if uncached_texts:
|
| 52 |
+
new_embeddings = super().embed_documents(uncached_texts)
|
| 53 |
+
|
| 54 |
+
# Fill in the placeholders and cache
|
| 55 |
+
for idx, embedding in zip(uncached_indices, new_embeddings):
|
| 56 |
+
embeddings[idx] = embedding
|
| 57 |
+
if settings.ENABLE_EMBEDDING_CACHE:
|
| 58 |
+
cache_manager.embedding_cache.set(texts[idx], embedding)
|
| 59 |
+
|
| 60 |
+
return embeddings
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class ZillizRetriever:
|
| 64 |
+
"""Retriever for Zilliz vector database with metadata filtering."""
|
| 65 |
+
|
| 66 |
+
def __init__(self):
|
| 67 |
+
"""Initialize Zilliz connection and embeddings."""
|
| 68 |
+
# Initialize OpenAI embeddings with caching
|
| 69 |
+
self.embeddings = CachedEmbeddings(
|
| 70 |
+
model=settings.OPENAI_EMBEDDING_MODEL,
|
| 71 |
+
openai_api_key=settings.OPENAI_API_KEY,
|
| 72 |
+
dimensions=settings.OPENAI_EMBEDDING_DIMENSION
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# Initialize Milvus vector store
|
| 76 |
+
self.vector_store = Milvus(
|
| 77 |
+
embedding_function=self.embeddings,
|
| 78 |
+
collection_name=settings.COLLECTION_NAME,
|
| 79 |
+
connection_args={
|
| 80 |
+
"uri": settings.ZILLIZ_URI,
|
| 81 |
+
"token": settings.ZILLIZ_TOKEN,
|
| 82 |
+
},
|
| 83 |
+
auto_id=True,
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
def retrieve(
|
| 87 |
+
self,
|
| 88 |
+
query: str,
|
| 89 |
+
ticker: Optional[str] = None,
|
| 90 |
+
doc_types: Optional[List[str]] = None,
|
| 91 |
+
top_k: int = 30
|
| 92 |
+
) -> List[Document]:
|
| 93 |
+
"""
|
| 94 |
+
Retrieve documents using hybrid search (semantic + metadata filtering).
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
query: User query text
|
| 98 |
+
ticker: Optional ticker symbol filter (e.g., 'ACM')
|
| 99 |
+
doc_types: Optional list of document types to filter
|
| 100 |
+
top_k: Number of documents to retrieve
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
List of Document objects with metadata and similarity scores
|
| 104 |
+
"""
|
| 105 |
+
# Check document cache
|
| 106 |
+
if settings.ENABLE_DOCUMENT_CACHE:
|
| 107 |
+
cached_docs = cache_manager.document_cache.get(query, ticker, doc_types)
|
| 108 |
+
if cached_docs is not None:
|
| 109 |
+
return cached_docs[:top_k] # Return requested number
|
| 110 |
+
|
| 111 |
+
# Build metadata filter expression
|
| 112 |
+
filter_expr = self._build_filter_expression(ticker, doc_types)
|
| 113 |
+
|
| 114 |
+
# Perform similarity search with metadata filtering
|
| 115 |
+
if filter_expr:
|
| 116 |
+
results = self.vector_store.similarity_search_with_score(
|
| 117 |
+
query=query,
|
| 118 |
+
k=top_k,
|
| 119 |
+
expr=filter_expr
|
| 120 |
+
)
|
| 121 |
+
else:
|
| 122 |
+
results = self.vector_store.similarity_search_with_score(
|
| 123 |
+
query=query,
|
| 124 |
+
k=top_k
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# Convert results to Documents with similarity scores in metadata
|
| 128 |
+
documents = []
|
| 129 |
+
for doc, score in results:
|
| 130 |
+
# Add similarity score to metadata
|
| 131 |
+
doc.metadata['similarity_score'] = float(score)
|
| 132 |
+
documents.append(doc)
|
| 133 |
+
|
| 134 |
+
# Cache the results
|
| 135 |
+
if settings.ENABLE_DOCUMENT_CACHE:
|
| 136 |
+
cache_manager.document_cache.set(query, documents, ticker, doc_types)
|
| 137 |
+
|
| 138 |
+
return documents
|
| 139 |
+
|
| 140 |
+
def _build_filter_expression(
|
| 141 |
+
self,
|
| 142 |
+
ticker: Optional[str],
|
| 143 |
+
doc_types: Optional[List[str]]
|
| 144 |
+
) -> Optional[str]:
|
| 145 |
+
"""
|
| 146 |
+
Build Milvus filter expression from parameters.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
ticker: Optional ticker symbol
|
| 150 |
+
doc_types: Optional list of document types
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
Filter expression string or None
|
| 154 |
+
"""
|
| 155 |
+
conditions = []
|
| 156 |
+
|
| 157 |
+
if ticker:
|
| 158 |
+
conditions.append(f'ticker == "{ticker}"')
|
| 159 |
+
|
| 160 |
+
if doc_types:
|
| 161 |
+
# Build OR condition for multiple doc types
|
| 162 |
+
doc_type_conditions = [f'doc_type == "{dt}"' for dt in doc_types]
|
| 163 |
+
conditions.append(f'({" or ".join(doc_type_conditions)})')
|
| 164 |
+
|
| 165 |
+
if not conditions:
|
| 166 |
+
return None
|
| 167 |
+
|
| 168 |
+
# Combine with AND
|
| 169 |
+
return " and ".join(conditions)
|
| 170 |
+
|
| 171 |
+
async def aretrieve(
|
| 172 |
+
self,
|
| 173 |
+
query: str,
|
| 174 |
+
ticker: Optional[str] = None,
|
| 175 |
+
doc_types: Optional[List[str]] = None,
|
| 176 |
+
top_k: int = 30
|
| 177 |
+
) -> List[Document]:
|
| 178 |
+
"""
|
| 179 |
+
Async version of retrieve method.
|
| 180 |
+
|
| 181 |
+
Note: Current Milvus client is synchronous, so this wraps the sync call.
|
| 182 |
+
For true async, would need async Milvus client.
|
| 183 |
+
"""
|
| 184 |
+
import asyncio
|
| 185 |
+
return await asyncio.to_thread(
|
| 186 |
+
self.retrieve,
|
| 187 |
+
query,
|
| 188 |
+
ticker,
|
| 189 |
+
doc_types,
|
| 190 |
+
top_k
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
def get_collection_stats(self) -> dict:
|
| 194 |
+
"""
|
| 195 |
+
Get statistics about the collection.
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
Dictionary with collection statistics
|
| 199 |
+
"""
|
| 200 |
+
try:
|
| 201 |
+
# Get collection info
|
| 202 |
+
collection = self.vector_store.col
|
| 203 |
+
|
| 204 |
+
stats = {
|
| 205 |
+
"collection_name": settings.COLLECTION_NAME,
|
| 206 |
+
"total_documents": collection.num_entities,
|
| 207 |
+
"embedding_dimension": settings.OPENAI_EMBEDDING_DIMENSION,
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
return stats
|
| 211 |
+
except Exception as e:
|
| 212 |
+
return {
|
| 213 |
+
"error": str(e),
|
| 214 |
+
"collection_name": settings.COLLECTION_NAME
|
| 215 |
+
}
|
backend/app/utils/__init__.py
ADDED
|
File without changes
|
backend/app/utils/cache.py
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Caching system for embeddings, queries, and responses."""
|
| 2 |
+
import hashlib
|
| 3 |
+
import time
|
| 4 |
+
from typing import Optional, Dict, Any, List
|
| 5 |
+
from datetime import datetime, timedelta
|
| 6 |
+
import json
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class CacheEntry:
|
| 10 |
+
"""Represents a cached item with expiration."""
|
| 11 |
+
|
| 12 |
+
def __init__(self, value: Any, ttl: int = 3600):
|
| 13 |
+
"""
|
| 14 |
+
Initialize cache entry.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
value: Value to cache
|
| 18 |
+
ttl: Time to live in seconds (default: 1 hour)
|
| 19 |
+
"""
|
| 20 |
+
self.value = value
|
| 21 |
+
self.created_at = time.time()
|
| 22 |
+
self.ttl = ttl
|
| 23 |
+
self.hit_count = 0
|
| 24 |
+
|
| 25 |
+
def is_expired(self) -> bool:
|
| 26 |
+
"""Check if entry has expired."""
|
| 27 |
+
return time.time() - self.created_at > self.ttl
|
| 28 |
+
|
| 29 |
+
def increment_hits(self):
|
| 30 |
+
"""Increment hit counter."""
|
| 31 |
+
self.hit_count += 1
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class EmbeddingCache:
|
| 35 |
+
"""Cache for query embeddings to avoid re-computing."""
|
| 36 |
+
|
| 37 |
+
def __init__(self, max_size: int = 1000, ttl: int = 86400):
|
| 38 |
+
"""
|
| 39 |
+
Initialize embedding cache.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
max_size: Maximum number of entries (default: 1000)
|
| 43 |
+
ttl: Time to live in seconds (default: 24 hours)
|
| 44 |
+
"""
|
| 45 |
+
self.cache: Dict[str, CacheEntry] = {}
|
| 46 |
+
self.max_size = max_size
|
| 47 |
+
self.ttl = ttl
|
| 48 |
+
self.hits = 0
|
| 49 |
+
self.misses = 0
|
| 50 |
+
|
| 51 |
+
def _generate_key(self, text: str) -> str:
|
| 52 |
+
"""Generate cache key from text."""
|
| 53 |
+
return hashlib.md5(text.lower().strip().encode()).hexdigest()
|
| 54 |
+
|
| 55 |
+
def get(self, text: str) -> Optional[List[float]]:
|
| 56 |
+
"""
|
| 57 |
+
Get cached embedding.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
text: Query text
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
Cached embedding vector or None
|
| 64 |
+
"""
|
| 65 |
+
key = self._generate_key(text)
|
| 66 |
+
|
| 67 |
+
if key in self.cache:
|
| 68 |
+
entry = self.cache[key]
|
| 69 |
+
if not entry.is_expired():
|
| 70 |
+
entry.increment_hits()
|
| 71 |
+
self.hits += 1
|
| 72 |
+
return entry.value
|
| 73 |
+
else:
|
| 74 |
+
# Remove expired entry
|
| 75 |
+
del self.cache[key]
|
| 76 |
+
|
| 77 |
+
self.misses += 1
|
| 78 |
+
return None
|
| 79 |
+
|
| 80 |
+
def set(self, text: str, embedding: List[float]):
|
| 81 |
+
"""
|
| 82 |
+
Cache an embedding.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
text: Query text
|
| 86 |
+
embedding: Embedding vector
|
| 87 |
+
"""
|
| 88 |
+
key = self._generate_key(text)
|
| 89 |
+
|
| 90 |
+
# If cache is full, remove oldest entries
|
| 91 |
+
if len(self.cache) >= self.max_size:
|
| 92 |
+
self._evict_oldest()
|
| 93 |
+
|
| 94 |
+
self.cache[key] = CacheEntry(embedding, ttl=self.ttl)
|
| 95 |
+
|
| 96 |
+
def _evict_oldest(self):
|
| 97 |
+
"""Remove oldest 10% of entries."""
|
| 98 |
+
num_to_remove = max(1, self.max_size // 10)
|
| 99 |
+
|
| 100 |
+
# Sort by creation time and remove oldest
|
| 101 |
+
sorted_keys = sorted(
|
| 102 |
+
self.cache.keys(),
|
| 103 |
+
key=lambda k: self.cache[k].created_at
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
for key in sorted_keys[:num_to_remove]:
|
| 107 |
+
del self.cache[key]
|
| 108 |
+
|
| 109 |
+
def clear(self):
|
| 110 |
+
"""Clear all cached embeddings."""
|
| 111 |
+
self.cache.clear()
|
| 112 |
+
self.hits = 0
|
| 113 |
+
self.misses = 0
|
| 114 |
+
|
| 115 |
+
def get_stats(self) -> Dict[str, Any]:
|
| 116 |
+
"""Get cache statistics."""
|
| 117 |
+
total_requests = self.hits + self.misses
|
| 118 |
+
hit_rate = (self.hits / total_requests * 100) if total_requests > 0 else 0
|
| 119 |
+
|
| 120 |
+
return {
|
| 121 |
+
"size": len(self.cache),
|
| 122 |
+
"max_size": self.max_size,
|
| 123 |
+
"hits": self.hits,
|
| 124 |
+
"misses": self.misses,
|
| 125 |
+
"hit_rate": round(hit_rate, 2),
|
| 126 |
+
"total_requests": total_requests
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class QueryResponseCache:
|
| 131 |
+
"""Cache for complete query responses."""
|
| 132 |
+
|
| 133 |
+
def __init__(self, max_size: int = 500, ttl: int = 3600):
|
| 134 |
+
"""
|
| 135 |
+
Initialize response cache.
|
| 136 |
+
|
| 137 |
+
Args:
|
| 138 |
+
max_size: Maximum number of entries (default: 500)
|
| 139 |
+
ttl: Time to live in seconds (default: 1 hour)
|
| 140 |
+
"""
|
| 141 |
+
self.cache: Dict[str, CacheEntry] = {}
|
| 142 |
+
self.max_size = max_size
|
| 143 |
+
self.ttl = ttl
|
| 144 |
+
self.hits = 0
|
| 145 |
+
self.misses = 0
|
| 146 |
+
|
| 147 |
+
def _generate_key(
|
| 148 |
+
self,
|
| 149 |
+
query: str,
|
| 150 |
+
ticker: Optional[str] = None,
|
| 151 |
+
doc_types: Optional[List[str]] = None,
|
| 152 |
+
top_k: int = 10
|
| 153 |
+
) -> str:
|
| 154 |
+
"""Generate cache key from query parameters."""
|
| 155 |
+
# Normalize inputs
|
| 156 |
+
query_normalized = query.lower().strip()
|
| 157 |
+
ticker_normalized = ticker.lower() if ticker else ""
|
| 158 |
+
doc_types_normalized = sorted(doc_types) if doc_types else []
|
| 159 |
+
|
| 160 |
+
# Create key string
|
| 161 |
+
key_parts = [
|
| 162 |
+
query_normalized,
|
| 163 |
+
ticker_normalized,
|
| 164 |
+
",".join(doc_types_normalized),
|
| 165 |
+
str(top_k)
|
| 166 |
+
]
|
| 167 |
+
key_string = "|".join(key_parts)
|
| 168 |
+
|
| 169 |
+
return hashlib.md5(key_string.encode()).hexdigest()
|
| 170 |
+
|
| 171 |
+
def get(
|
| 172 |
+
self,
|
| 173 |
+
query: str,
|
| 174 |
+
ticker: Optional[str] = None,
|
| 175 |
+
doc_types: Optional[List[str]] = None,
|
| 176 |
+
top_k: int = 10
|
| 177 |
+
) -> Optional[Dict[str, Any]]:
|
| 178 |
+
"""
|
| 179 |
+
Get cached response.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
query: Query text
|
| 183 |
+
ticker: Ticker filter
|
| 184 |
+
doc_types: Document type filters
|
| 185 |
+
top_k: Number of results
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
Cached response or None
|
| 189 |
+
"""
|
| 190 |
+
key = self._generate_key(query, ticker, doc_types, top_k)
|
| 191 |
+
|
| 192 |
+
if key in self.cache:
|
| 193 |
+
entry = self.cache[key]
|
| 194 |
+
if not entry.is_expired():
|
| 195 |
+
entry.increment_hits()
|
| 196 |
+
self.hits += 1
|
| 197 |
+
return entry.value
|
| 198 |
+
else:
|
| 199 |
+
del self.cache[key]
|
| 200 |
+
|
| 201 |
+
self.misses += 1
|
| 202 |
+
return None
|
| 203 |
+
|
| 204 |
+
def set(
|
| 205 |
+
self,
|
| 206 |
+
query: str,
|
| 207 |
+
response: Dict[str, Any],
|
| 208 |
+
ticker: Optional[str] = None,
|
| 209 |
+
doc_types: Optional[List[str]] = None,
|
| 210 |
+
top_k: int = 10
|
| 211 |
+
):
|
| 212 |
+
"""
|
| 213 |
+
Cache a response.
|
| 214 |
+
|
| 215 |
+
Args:
|
| 216 |
+
query: Query text
|
| 217 |
+
response: Response to cache
|
| 218 |
+
ticker: Ticker filter
|
| 219 |
+
doc_types: Document type filters
|
| 220 |
+
top_k: Number of results
|
| 221 |
+
"""
|
| 222 |
+
key = self._generate_key(query, ticker, doc_types, top_k)
|
| 223 |
+
|
| 224 |
+
if len(self.cache) >= self.max_size:
|
| 225 |
+
self._evict_lru()
|
| 226 |
+
|
| 227 |
+
self.cache[key] = CacheEntry(response, ttl=self.ttl)
|
| 228 |
+
|
| 229 |
+
def _evict_lru(self):
|
| 230 |
+
"""Remove least recently used 10% of entries."""
|
| 231 |
+
num_to_remove = max(1, self.max_size // 10)
|
| 232 |
+
|
| 233 |
+
# Sort by last access time (hit count and creation time)
|
| 234 |
+
sorted_keys = sorted(
|
| 235 |
+
self.cache.keys(),
|
| 236 |
+
key=lambda k: (self.cache[k].hit_count, self.cache[k].created_at)
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
for key in sorted_keys[:num_to_remove]:
|
| 240 |
+
del self.cache[key]
|
| 241 |
+
|
| 242 |
+
def clear(self):
|
| 243 |
+
"""Clear all cached responses."""
|
| 244 |
+
self.cache.clear()
|
| 245 |
+
self.hits = 0
|
| 246 |
+
self.misses = 0
|
| 247 |
+
|
| 248 |
+
def get_stats(self) -> Dict[str, Any]:
|
| 249 |
+
"""Get cache statistics."""
|
| 250 |
+
total_requests = self.hits + self.misses
|
| 251 |
+
hit_rate = (self.hits / total_requests * 100) if total_requests > 0 else 0
|
| 252 |
+
|
| 253 |
+
# Calculate cost savings (assuming $0.0001 per query)
|
| 254 |
+
cost_per_query = 0.0001 # Approximate cost per LLM call
|
| 255 |
+
estimated_savings = self.hits * cost_per_query
|
| 256 |
+
|
| 257 |
+
return {
|
| 258 |
+
"size": len(self.cache),
|
| 259 |
+
"max_size": self.max_size,
|
| 260 |
+
"hits": self.hits,
|
| 261 |
+
"misses": self.misses,
|
| 262 |
+
"hit_rate": round(hit_rate, 2),
|
| 263 |
+
"total_requests": total_requests,
|
| 264 |
+
"estimated_savings_usd": round(estimated_savings, 4)
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
class DocumentCache:
|
| 269 |
+
"""Cache for retrieved documents to avoid vector searches."""
|
| 270 |
+
|
| 271 |
+
def __init__(self, max_size: int = 200, ttl: int = 7200):
|
| 272 |
+
"""
|
| 273 |
+
Initialize document cache.
|
| 274 |
+
|
| 275 |
+
Args:
|
| 276 |
+
max_size: Maximum number of entries (default: 200)
|
| 277 |
+
ttl: Time to live in seconds (default: 2 hours)
|
| 278 |
+
"""
|
| 279 |
+
self.cache: Dict[str, CacheEntry] = {}
|
| 280 |
+
self.max_size = max_size
|
| 281 |
+
self.ttl = ttl
|
| 282 |
+
self.hits = 0
|
| 283 |
+
self.misses = 0
|
| 284 |
+
|
| 285 |
+
def _generate_key(
|
| 286 |
+
self,
|
| 287 |
+
query: str,
|
| 288 |
+
ticker: Optional[str] = None,
|
| 289 |
+
doc_types: Optional[List[str]] = None
|
| 290 |
+
) -> str:
|
| 291 |
+
"""Generate cache key from search parameters."""
|
| 292 |
+
query_normalized = query.lower().strip()
|
| 293 |
+
ticker_normalized = ticker.lower() if ticker else ""
|
| 294 |
+
doc_types_normalized = sorted(doc_types) if doc_types else []
|
| 295 |
+
|
| 296 |
+
key_string = f"{query_normalized}|{ticker_normalized}|{','.join(doc_types_normalized)}"
|
| 297 |
+
return hashlib.md5(key_string.encode()).hexdigest()
|
| 298 |
+
|
| 299 |
+
def get(
|
| 300 |
+
self,
|
| 301 |
+
query: str,
|
| 302 |
+
ticker: Optional[str] = None,
|
| 303 |
+
doc_types: Optional[List[str]] = None
|
| 304 |
+
) -> Optional[List[Any]]:
|
| 305 |
+
"""Get cached documents."""
|
| 306 |
+
key = self._generate_key(query, ticker, doc_types)
|
| 307 |
+
|
| 308 |
+
if key in self.cache:
|
| 309 |
+
entry = self.cache[key]
|
| 310 |
+
if not entry.is_expired():
|
| 311 |
+
entry.increment_hits()
|
| 312 |
+
self.hits += 1
|
| 313 |
+
return entry.value
|
| 314 |
+
else:
|
| 315 |
+
del self.cache[key]
|
| 316 |
+
|
| 317 |
+
self.misses += 1
|
| 318 |
+
return None
|
| 319 |
+
|
| 320 |
+
def set(
|
| 321 |
+
self,
|
| 322 |
+
query: str,
|
| 323 |
+
documents: List[Any],
|
| 324 |
+
ticker: Optional[str] = None,
|
| 325 |
+
doc_types: Optional[List[str]] = None
|
| 326 |
+
):
|
| 327 |
+
"""Cache retrieved documents."""
|
| 328 |
+
key = self._generate_key(query, ticker, doc_types)
|
| 329 |
+
|
| 330 |
+
if len(self.cache) >= self.max_size:
|
| 331 |
+
self._evict_oldest()
|
| 332 |
+
|
| 333 |
+
self.cache[key] = CacheEntry(documents, ttl=self.ttl)
|
| 334 |
+
|
| 335 |
+
def _evict_oldest(self):
|
| 336 |
+
"""Remove oldest 10% of entries."""
|
| 337 |
+
num_to_remove = max(1, self.max_size // 10)
|
| 338 |
+
sorted_keys = sorted(
|
| 339 |
+
self.cache.keys(),
|
| 340 |
+
key=lambda k: self.cache[k].created_at
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
for key in sorted_keys[:num_to_remove]:
|
| 344 |
+
del self.cache[key]
|
| 345 |
+
|
| 346 |
+
def clear(self):
|
| 347 |
+
"""Clear all cached documents."""
|
| 348 |
+
self.cache.clear()
|
| 349 |
+
self.hits = 0
|
| 350 |
+
self.misses = 0
|
| 351 |
+
|
| 352 |
+
def get_stats(self) -> Dict[str, Any]:
|
| 353 |
+
"""Get cache statistics."""
|
| 354 |
+
total_requests = self.hits + self.misses
|
| 355 |
+
hit_rate = (self.hits / total_requests * 100) if total_requests > 0 else 0
|
| 356 |
+
|
| 357 |
+
return {
|
| 358 |
+
"size": len(self.cache),
|
| 359 |
+
"max_size": self.max_size,
|
| 360 |
+
"hits": self.hits,
|
| 361 |
+
"misses": self.misses,
|
| 362 |
+
"hit_rate": round(hit_rate, 2),
|
| 363 |
+
"total_requests": total_requests
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
class CacheManager:
|
| 368 |
+
"""Centralized cache management."""
|
| 369 |
+
|
| 370 |
+
def __init__(self):
|
| 371 |
+
"""Initialize all caches."""
|
| 372 |
+
self.embedding_cache = EmbeddingCache(max_size=1000, ttl=86400) # 24h
|
| 373 |
+
self.response_cache = QueryResponseCache(max_size=500, ttl=3600) # 1h
|
| 374 |
+
self.document_cache = DocumentCache(max_size=200, ttl=7200) # 2h
|
| 375 |
+
|
| 376 |
+
def clear_all(self):
|
| 377 |
+
"""Clear all caches."""
|
| 378 |
+
self.embedding_cache.clear()
|
| 379 |
+
self.response_cache.clear()
|
| 380 |
+
self.document_cache.clear()
|
| 381 |
+
|
| 382 |
+
def get_all_stats(self) -> Dict[str, Any]:
|
| 383 |
+
"""Get statistics for all caches."""
|
| 384 |
+
return {
|
| 385 |
+
"embedding_cache": self.embedding_cache.get_stats(),
|
| 386 |
+
"response_cache": self.response_cache.get_stats(),
|
| 387 |
+
"document_cache": self.document_cache.get_stats(),
|
| 388 |
+
"timestamp": datetime.now().isoformat()
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
# Global cache manager instance
|
| 393 |
+
cache_manager = CacheManager()
|
backend/app/utils/citations.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utilities for tracking and formatting source citations."""
|
| 2 |
+
from typing import List, Dict, Any
|
| 3 |
+
from langchain_core.documents import Document
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class CitationTracker:
|
| 7 |
+
"""Tracks sources and generates citation references."""
|
| 8 |
+
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.sources: List[Document] = []
|
| 11 |
+
self.source_map: Dict[str, int] = {}
|
| 12 |
+
|
| 13 |
+
def add_document(self, doc: Document) -> int:
|
| 14 |
+
"""
|
| 15 |
+
Add a document and return its source ID.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
doc: LangChain Document with metadata
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
Source ID (1-indexed)
|
| 22 |
+
"""
|
| 23 |
+
# Create unique key from metadata
|
| 24 |
+
doc_key = self._create_doc_key(doc)
|
| 25 |
+
|
| 26 |
+
# Return existing ID if already added
|
| 27 |
+
if doc_key in self.source_map:
|
| 28 |
+
return self.source_map[doc_key]
|
| 29 |
+
|
| 30 |
+
# Add new source
|
| 31 |
+
source_id = len(self.sources) + 1
|
| 32 |
+
self.sources.append(doc)
|
| 33 |
+
self.source_map[doc_key] = source_id
|
| 34 |
+
|
| 35 |
+
return source_id
|
| 36 |
+
|
| 37 |
+
def _create_doc_key(self, doc: Document) -> str:
|
| 38 |
+
"""Create unique key for document deduplication."""
|
| 39 |
+
metadata = doc.metadata
|
| 40 |
+
filename = metadata.get('filename', 'unknown')
|
| 41 |
+
chunk_id = metadata.get('chunk_id', 'unknown')
|
| 42 |
+
return f"{filename}_{chunk_id}"
|
| 43 |
+
|
| 44 |
+
def format_context_with_citations(self, documents: List[Document]) -> str:
|
| 45 |
+
"""
|
| 46 |
+
Format documents into context string with source markers.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
documents: List of LangChain Documents
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
Formatted context string with [Source N] markers
|
| 53 |
+
"""
|
| 54 |
+
context_parts = []
|
| 55 |
+
|
| 56 |
+
for doc in documents:
|
| 57 |
+
source_id = self.add_document(doc)
|
| 58 |
+
|
| 59 |
+
# Format: [Source N] content
|
| 60 |
+
context_parts.append(f"[Source {source_id}] {doc.page_content}")
|
| 61 |
+
|
| 62 |
+
return "\n\n".join(context_parts)
|
| 63 |
+
|
| 64 |
+
def get_sources_list(self) -> List[Dict[str, Any]]:
|
| 65 |
+
"""
|
| 66 |
+
Get formatted list of all sources.
|
| 67 |
+
|
| 68 |
+
Returns:
|
| 69 |
+
List of source dictionaries with metadata
|
| 70 |
+
"""
|
| 71 |
+
sources_list = []
|
| 72 |
+
|
| 73 |
+
for idx, doc in enumerate(self.sources, start=1):
|
| 74 |
+
metadata = doc.metadata
|
| 75 |
+
|
| 76 |
+
# Get text preview (first 200 chars)
|
| 77 |
+
text_preview = doc.page_content[:200]
|
| 78 |
+
if len(doc.page_content) > 200:
|
| 79 |
+
text_preview += "..."
|
| 80 |
+
|
| 81 |
+
# Convert chunk_id to string if it exists (FIXED)
|
| 82 |
+
chunk_id = metadata.get('chunk_id')
|
| 83 |
+
if chunk_id is not None:
|
| 84 |
+
chunk_id = str(chunk_id)
|
| 85 |
+
|
| 86 |
+
source_info = {
|
| 87 |
+
"source_id": idx,
|
| 88 |
+
"filename": metadata.get('filename', 'unknown'),
|
| 89 |
+
"doc_type": metadata.get('doc_type', 'unknown'),
|
| 90 |
+
"ticker": metadata.get('ticker'),
|
| 91 |
+
"similarity_score": float(metadata.get('similarity_score', 0.0)),
|
| 92 |
+
"chunk_id": chunk_id, # Now properly converted to string
|
| 93 |
+
"text_preview": text_preview
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
sources_list.append(source_info)
|
| 97 |
+
|
| 98 |
+
return sources_list
|
| 99 |
+
|
| 100 |
+
def clear(self):
|
| 101 |
+
"""Clear all tracked sources."""
|
| 102 |
+
self.sources.clear()
|
| 103 |
+
self.source_map.clear()
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def extract_citations_from_answer(answer: str) -> List[int]:
|
| 107 |
+
"""
|
| 108 |
+
Extract citation numbers from answer text.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
answer: Generated answer with [Source N] citations
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
List of unique source IDs mentioned in answer
|
| 115 |
+
"""
|
| 116 |
+
import re
|
| 117 |
+
|
| 118 |
+
# Find all [Source N] patterns
|
| 119 |
+
pattern = r'\[Source (\d+)\]'
|
| 120 |
+
matches = re.findall(pattern, answer)
|
| 121 |
+
|
| 122 |
+
# Convert to integers and remove duplicates
|
| 123 |
+
cited_sources = sorted(set(int(m) for m in matches))
|
| 124 |
+
|
| 125 |
+
return cited_sources
|
backend/app/utils/conversation.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Conversation memory management for multi-turn interactions."""
|
| 2 |
+
from typing import List, Dict, Any, Optional
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
import uuid
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ConversationMessage:
|
| 8 |
+
"""Single message in a conversation."""
|
| 9 |
+
|
| 10 |
+
def __init__(self, role: str, content: str, timestamp: Optional[datetime] = None):
|
| 11 |
+
self.role = role # 'user' or 'assistant'
|
| 12 |
+
self.content = content
|
| 13 |
+
self.timestamp = timestamp or datetime.now()
|
| 14 |
+
|
| 15 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 16 |
+
"""Convert to dictionary."""
|
| 17 |
+
return {
|
| 18 |
+
"role": self.role,
|
| 19 |
+
"content": self.content,
|
| 20 |
+
"timestamp": self.timestamp.isoformat()
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ConversationHistory:
|
| 25 |
+
"""Manages conversation history with context window management."""
|
| 26 |
+
|
| 27 |
+
def __init__(self, max_tokens: int = 4000):
|
| 28 |
+
"""
|
| 29 |
+
Initialize conversation history.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
max_tokens: Maximum tokens to keep in history (rough estimate)
|
| 33 |
+
"""
|
| 34 |
+
self.messages: List[ConversationMessage] = []
|
| 35 |
+
self.max_tokens = max_tokens
|
| 36 |
+
|
| 37 |
+
def add_message(self, role: str, content: str):
|
| 38 |
+
"""
|
| 39 |
+
Add a message to conversation history.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
role: 'user' or 'assistant'
|
| 43 |
+
content: Message content
|
| 44 |
+
"""
|
| 45 |
+
message = ConversationMessage(role, content)
|
| 46 |
+
self.messages.append(message)
|
| 47 |
+
|
| 48 |
+
# Trim history if needed
|
| 49 |
+
self._trim_history()
|
| 50 |
+
|
| 51 |
+
def get_messages(self) -> List[Dict[str, str]]:
|
| 52 |
+
"""
|
| 53 |
+
Get messages in LangChain format.
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
List of message dictionaries with role and content
|
| 57 |
+
"""
|
| 58 |
+
return [
|
| 59 |
+
{"role": msg.role, "content": msg.content}
|
| 60 |
+
for msg in self.messages
|
| 61 |
+
]
|
| 62 |
+
|
| 63 |
+
def get_context_summary(self) -> str:
|
| 64 |
+
"""
|
| 65 |
+
Get a text summary of conversation context.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
Formatted conversation history
|
| 69 |
+
"""
|
| 70 |
+
if not self.messages:
|
| 71 |
+
return ""
|
| 72 |
+
|
| 73 |
+
context_parts = []
|
| 74 |
+
for msg in self.messages[-6:]: # Last 3 exchanges (6 messages)
|
| 75 |
+
prefix = "User" if msg.role == "user" else "Assistant"
|
| 76 |
+
context_parts.append(f"{prefix}: {msg.content[:200]}...")
|
| 77 |
+
|
| 78 |
+
return "\n\n".join(context_parts)
|
| 79 |
+
|
| 80 |
+
def _trim_history(self):
|
| 81 |
+
"""
|
| 82 |
+
Trim history to stay within token limits.
|
| 83 |
+
|
| 84 |
+
Uses a simple heuristic: ~4 chars per token
|
| 85 |
+
Keeps system messages and recent conversation
|
| 86 |
+
"""
|
| 87 |
+
if not self.messages:
|
| 88 |
+
return
|
| 89 |
+
|
| 90 |
+
# Estimate total tokens (rough: 4 chars per token)
|
| 91 |
+
total_chars = sum(len(msg.content) for msg in self.messages)
|
| 92 |
+
estimated_tokens = total_chars / 4
|
| 93 |
+
|
| 94 |
+
# If under limit, keep all
|
| 95 |
+
if estimated_tokens <= self.max_tokens:
|
| 96 |
+
return
|
| 97 |
+
|
| 98 |
+
# Keep most recent messages that fit within limit
|
| 99 |
+
chars_limit = self.max_tokens * 4
|
| 100 |
+
cumulative_chars = 0
|
| 101 |
+
keep_from_index = len(self.messages)
|
| 102 |
+
|
| 103 |
+
# Work backwards from most recent
|
| 104 |
+
for i in range(len(self.messages) - 1, -1, -1):
|
| 105 |
+
msg_chars = len(self.messages[i].content)
|
| 106 |
+
if cumulative_chars + msg_chars > chars_limit:
|
| 107 |
+
keep_from_index = i + 1
|
| 108 |
+
break
|
| 109 |
+
cumulative_chars += msg_chars
|
| 110 |
+
|
| 111 |
+
# Always keep at least the last 2 messages (1 exchange)
|
| 112 |
+
keep_from_index = min(keep_from_index, len(self.messages) - 2)
|
| 113 |
+
|
| 114 |
+
# Trim older messages
|
| 115 |
+
if keep_from_index > 0:
|
| 116 |
+
self.messages = self.messages[keep_from_index:]
|
| 117 |
+
|
| 118 |
+
def clear(self):
|
| 119 |
+
"""Clear all conversation history."""
|
| 120 |
+
self.messages.clear()
|
| 121 |
+
|
| 122 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 123 |
+
"""Export conversation to dictionary."""
|
| 124 |
+
return {
|
| 125 |
+
"messages": [msg.to_dict() for msg in self.messages],
|
| 126 |
+
"max_tokens": self.max_tokens
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class SessionManager:
|
| 131 |
+
"""Manages multiple conversation sessions."""
|
| 132 |
+
|
| 133 |
+
def __init__(self):
|
| 134 |
+
self.sessions: Dict[str, ConversationHistory] = {}
|
| 135 |
+
|
| 136 |
+
def create_session(self, session_id: Optional[str] = None) -> str:
|
| 137 |
+
"""
|
| 138 |
+
Create a new conversation session.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
session_id: Optional session ID, generates one if not provided
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
Session ID
|
| 145 |
+
"""
|
| 146 |
+
if session_id is None:
|
| 147 |
+
session_id = str(uuid.uuid4())
|
| 148 |
+
|
| 149 |
+
self.sessions[session_id] = ConversationHistory()
|
| 150 |
+
return session_id
|
| 151 |
+
|
| 152 |
+
def get_session(self, session_id: str) -> Optional[ConversationHistory]:
|
| 153 |
+
"""
|
| 154 |
+
Get a conversation session.
|
| 155 |
+
|
| 156 |
+
Args:
|
| 157 |
+
session_id: Session identifier
|
| 158 |
+
|
| 159 |
+
Returns:
|
| 160 |
+
ConversationHistory or None if not found
|
| 161 |
+
"""
|
| 162 |
+
return self.sessions.get(session_id)
|
| 163 |
+
|
| 164 |
+
def delete_session(self, session_id: str):
|
| 165 |
+
"""
|
| 166 |
+
Delete a conversation session.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
session_id: Session identifier
|
| 170 |
+
"""
|
| 171 |
+
if session_id in self.sessions:
|
| 172 |
+
del self.sessions[session_id]
|
| 173 |
+
|
| 174 |
+
def clear_all_sessions(self):
|
| 175 |
+
"""Clear all sessions."""
|
| 176 |
+
self.sessions.clear()
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# Global session manager instance
|
| 180 |
+
session_manager = SessionManager()
|
backend/requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
python-dotenv
|
| 4 |
+
pydantic
|
| 5 |
+
pydantic-settings
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
langchain
|
| 9 |
+
langchain-openai
|
| 10 |
+
langchain-milvus
|
| 11 |
+
langchain-community
|
| 12 |
+
langchain-core
|
| 13 |
+
|
| 14 |
+
pymilvus
|
| 15 |
+
|
| 16 |
+
openai
|
| 17 |
+
|
| 18 |
+
python-multipart
|
| 19 |
+
aiohttp
|
| 20 |
+
tiktoken
|
| 21 |
+
numpy
|
frontend/index.html
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>FinSage Analytics - AI-Powered Financial Analysis</title>
|
| 7 |
+
<link rel="stylesheet" href="style.css">
|
| 8 |
+
</head>
|
| 9 |
+
<body>
|
| 10 |
+
<!-- Top Navigation Bar -->
|
| 11 |
+
<nav class="top-nav">
|
| 12 |
+
<div class="nav-left">
|
| 13 |
+
<div class="logo">
|
| 14 |
+
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
| 15 |
+
<path d="M12 2L2 7L12 12L22 7L12 2Z" fill="#3B82F6"/>
|
| 16 |
+
<path d="M2 17L12 22L22 17" stroke="#3B82F6" stroke-width="2"/>
|
| 17 |
+
</svg>
|
| 18 |
+
<div>
|
| 19 |
+
<div class="logo-title">FinSage Analytics</div>
|
| 20 |
+
<div class="logo-subtitle">AI-Powered Fundamental Analysis for S&P MidCap 400</div>
|
| 21 |
+
</div>
|
| 22 |
+
</div>
|
| 23 |
+
</div>
|
| 24 |
+
<div class="nav-right">
|
| 25 |
+
<button class="icon-btn" id="exportBtn" title="Export to PDF">
|
| 26 |
+
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 27 |
+
<path d="M21 15v4a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-4"/>
|
| 28 |
+
<polyline points="7 10 12 15 17 10"/>
|
| 29 |
+
<line x1="12" y1="15" x2="12" y2="3"/>
|
| 30 |
+
</svg>
|
| 31 |
+
</button>
|
| 32 |
+
<button class="icon-btn" id="githubBtn" title="View Documentation">
|
| 33 |
+
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
|
| 34 |
+
<path d="M9 19c-5 1.5-5-2.5-7-3m14 6v-3.87a3.37 3.37 0 0 0-.94-2.61c3.14-.35 6.44-1.54 6.44-7A5.44 5.44 0 0 0 20 4.77 5.07 5.07 0 0 0 19.91 1S18.73.65 16 2.48a13.38 13.38 0 0 0-7 0C6.27.65 5.09 1 5.09 1A5.07 5.07 0 0 0 5 4.77a5.44 5.44 0 0 0-1.5 3.78c0 5.42 3.3 6.61 6.44 7A3.37 3.37 0 0 0 9 18.13V22"/>
|
| 35 |
+
</svg>
|
| 36 |
+
</button>
|
| 37 |
+
<a href="https://www.linkedin.com/in/adediran-adeyemi-17103b114/" target="_blank" class="icon-btn user-btn" title="Connect on LinkedIn">
|
| 38 |
+
<div class="user-avatar">A</div>
|
| 39 |
+
</a>
|
| 40 |
+
</div>
|
| 41 |
+
</nav>
|
| 42 |
+
|
| 43 |
+
<!-- Main Container -->
|
| 44 |
+
<div class="main-container">
|
| 45 |
+
<!-- Left Panel - Query Input -->
|
| 46 |
+
<div class="left-panel">
|
| 47 |
+
<h2 class="panel-title">Financial Query</h2>
|
| 48 |
+
|
| 49 |
+
<div class="query-box">
|
| 50 |
+
<textarea
|
| 51 |
+
id="queryInput"
|
| 52 |
+
placeholder="Ask about company financials..."
|
| 53 |
+
rows="6"
|
| 54 |
+
></textarea>
|
| 55 |
+
|
| 56 |
+
<button id="submitBtn" class="submit-button">
|
| 57 |
+
<span class="btn-text">Submit</span>
|
| 58 |
+
<span class="loader" style="display: none;"></span>
|
| 59 |
+
</button>
|
| 60 |
+
</div>
|
| 61 |
+
|
| 62 |
+
<!-- Example Queries Section -->
|
| 63 |
+
<div class="examples-section">
|
| 64 |
+
<h3>Example Queries</h3>
|
| 65 |
+
<div class="example-queries-list">
|
| 66 |
+
<div class="example-item" data-query="What is ACM's current ratio for 2023?">
|
| 67 |
+
e.g., What is ACM's current ratio for 2023?
|
| 68 |
+
</div>
|
| 69 |
+
<div class="example-item" data-query="Compare ACM's debt-to-equity ratio over 8 years.">
|
| 70 |
+
e.g., Compare ACM's debt-to-equity ratio over 8 years.
|
| 71 |
+
</div>
|
| 72 |
+
<div class="example-item" data-query="Show ACM's revenue growth trend.">
|
| 73 |
+
e.g., Show ACM's revenue growth trend.
|
| 74 |
+
</div>
|
| 75 |
+
</div>
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
<!-- Recent Queries History -->
|
| 79 |
+
<div class="recent-queries">
|
| 80 |
+
<h3>Recent queries history</h3>
|
| 81 |
+
<select id="historySelect" class="history-dropdown">
|
| 82 |
+
<option value="">Select a recent query</option>
|
| 83 |
+
</select>
|
| 84 |
+
</div>
|
| 85 |
+
</div>
|
| 86 |
+
|
| 87 |
+
<!-- Right Panel - AI Answer -->
|
| 88 |
+
<div class="right-panel">
|
| 89 |
+
<!-- Loading State -->
|
| 90 |
+
<div id="loadingState" class="loading-state" style="display: none;">
|
| 91 |
+
<div class="loading-spinner"></div>
|
| 92 |
+
<span>Analyzing...</span>
|
| 93 |
+
</div>
|
| 94 |
+
|
| 95 |
+
<!-- Response Section -->
|
| 96 |
+
<div id="responseSection" style="display: none;">
|
| 97 |
+
<h2 class="panel-title">AI Answer</h2>
|
| 98 |
+
|
| 99 |
+
<div id="answer" class="answer-content"></div>
|
| 100 |
+
|
| 101 |
+
<!-- Query Variations -->
|
| 102 |
+
<div id="expandedQueries" class="expanded-section" style="display: none;">
|
| 103 |
+
<h3>Query Variations Used:</h3>
|
| 104 |
+
<ul id="queriesList"></ul>
|
| 105 |
+
</div>
|
| 106 |
+
|
| 107 |
+
<!-- Sources Section -->
|
| 108 |
+
<div class="sources-section">
|
| 109 |
+
<h3 class="sources-title">Sources</h3>
|
| 110 |
+
<div id="sources" class="sources-list"></div>
|
| 111 |
+
</div>
|
| 112 |
+
</div>
|
| 113 |
+
|
| 114 |
+
<!-- Initial State -->
|
| 115 |
+
<div id="initialState" class="initial-state">
|
| 116 |
+
<div class="empty-state">
|
| 117 |
+
<svg width="64" height="64" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1">
|
| 118 |
+
<path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z"/>
|
| 119 |
+
</svg>
|
| 120 |
+
<p>Ask a question to get started</p>
|
| 121 |
+
</div>
|
| 122 |
+
</div>
|
| 123 |
+
</div>
|
| 124 |
+
</div>
|
| 125 |
+
|
| 126 |
+
<!-- Error Toast -->
|
| 127 |
+
<div id="errorToast" class="error-toast" style="display: none;">
|
| 128 |
+
<span id="errorMessage"></span>
|
| 129 |
+
</div>
|
| 130 |
+
|
| 131 |
+
<!-- Include jsPDF library for PDF export -->
|
| 132 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/jspdf/2.5.1/jspdf.umd.min.js"></script>
|
| 133 |
+
<script src="script.js"></script>
|
| 134 |
+
</body>
|
| 135 |
+
</html>
|
frontend/script.js
ADDED
|
@@ -0,0 +1,524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// API Configuration - Auto-detect URL for Hugging Face
|
| 2 |
+
const API_BASE_URL = window.location.origin;
|
| 3 |
+
|
| 4 |
+
// Session Management
|
| 5 |
+
let currentSessionId = null;
|
| 6 |
+
let currentAnswer = null;
|
| 7 |
+
let currentSources = null;
|
| 8 |
+
let currentQuery = null;
|
| 9 |
+
let queryHistory = [];
|
| 10 |
+
|
| 11 |
+
// DOM Elements
|
| 12 |
+
const queryInput = document.getElementById('queryInput');
|
| 13 |
+
const submitBtn = document.getElementById('submitBtn');
|
| 14 |
+
const btnText = submitBtn.querySelector('.btn-text');
|
| 15 |
+
const loader = submitBtn.querySelector('.loader');
|
| 16 |
+
const responseSection = document.getElementById('responseSection');
|
| 17 |
+
const initialState = document.getElementById('initialState');
|
| 18 |
+
const loadingState = document.getElementById('loadingState');
|
| 19 |
+
const answerDiv = document.getElementById('answer');
|
| 20 |
+
const sourcesDiv = document.getElementById('sources');
|
| 21 |
+
const errorToast = document.getElementById('errorToast');
|
| 22 |
+
const errorMessage = document.getElementById('errorMessage');
|
| 23 |
+
const expandedQueriesDiv = document.getElementById('expandedQueries');
|
| 24 |
+
const queriesList = document.getElementById('queriesList');
|
| 25 |
+
const exportBtn = document.getElementById('exportBtn');
|
| 26 |
+
const githubBtn = document.getElementById('githubBtn');
|
| 27 |
+
const historySelect = document.getElementById('historySelect');
|
| 28 |
+
|
| 29 |
+
// Initialize
|
| 30 |
+
function init() {
|
| 31 |
+
currentSessionId = sessionStorage.getItem('sessionId') || generateSessionId();
|
| 32 |
+
sessionStorage.setItem('sessionId', currentSessionId);
|
| 33 |
+
|
| 34 |
+
// Load history
|
| 35 |
+
const saved = sessionStorage.getItem('queryHistory');
|
| 36 |
+
if (saved) {
|
| 37 |
+
try {
|
| 38 |
+
queryHistory = JSON.parse(saved);
|
| 39 |
+
updateHistoryDropdown();
|
| 40 |
+
} catch (e) {
|
| 41 |
+
queryHistory = [];
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
setupEventListeners();
|
| 46 |
+
checkHealth();
|
| 47 |
+
|
| 48 |
+
// Log API URL for debugging
|
| 49 |
+
console.log('API Base URL:', API_BASE_URL);
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
function generateSessionId() {
|
| 53 |
+
return `session_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
function setupEventListeners() {
|
| 57 |
+
// Submit button
|
| 58 |
+
if (submitBtn) {
|
| 59 |
+
submitBtn.addEventListener('click', handleSubmit);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
// Enter key (Ctrl+Enter)
|
| 63 |
+
if (queryInput) {
|
| 64 |
+
queryInput.addEventListener('keydown', (e) => {
|
| 65 |
+
if (e.key === 'Enter' && e.ctrlKey) {
|
| 66 |
+
handleSubmit();
|
| 67 |
+
}
|
| 68 |
+
});
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
// Example queries
|
| 72 |
+
document.querySelectorAll('.example-item').forEach(item => {
|
| 73 |
+
item.addEventListener('click', () => {
|
| 74 |
+
const query = item.getAttribute('data-query');
|
| 75 |
+
if (query && queryInput) {
|
| 76 |
+
queryInput.value = query;
|
| 77 |
+
handleSubmit();
|
| 78 |
+
}
|
| 79 |
+
});
|
| 80 |
+
});
|
| 81 |
+
|
| 82 |
+
// Export button
|
| 83 |
+
if (exportBtn) {
|
| 84 |
+
exportBtn.addEventListener('click', exportToPDF);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
// GitHub button
|
| 88 |
+
if (githubBtn) {
|
| 89 |
+
githubBtn.addEventListener('click', () => {
|
| 90 |
+
window.open('https://github.com/Adeyemi0/FinSight-RAG-Application-', '_blank');
|
| 91 |
+
});
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
// History dropdown
|
| 95 |
+
if (historySelect) {
|
| 96 |
+
historySelect.addEventListener('change', (e) => {
|
| 97 |
+
const query = e.target.value;
|
| 98 |
+
if (query && queryInput) {
|
| 99 |
+
queryInput.value = query;
|
| 100 |
+
}
|
| 101 |
+
});
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
// Update history dropdown
|
| 106 |
+
function updateHistoryDropdown() {
|
| 107 |
+
if (!historySelect) return;
|
| 108 |
+
|
| 109 |
+
historySelect.innerHTML = '<option value="">Select a recent query</option>';
|
| 110 |
+
|
| 111 |
+
queryHistory.slice().reverse().forEach((item, index) => {
|
| 112 |
+
const option = document.createElement('option');
|
| 113 |
+
option.value = item.query;
|
| 114 |
+
option.textContent = item.query.substring(0, 60) + (item.query.length > 60 ? '...' : '');
|
| 115 |
+
historySelect.appendChild(option);
|
| 116 |
+
});
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
// Save to history
|
| 120 |
+
function saveToHistory(query, answer) {
|
| 121 |
+
queryHistory.push({
|
| 122 |
+
query,
|
| 123 |
+
answer: answer.substring(0, 500),
|
| 124 |
+
timestamp: new Date().toISOString()
|
| 125 |
+
});
|
| 126 |
+
|
| 127 |
+
// Keep last 20
|
| 128 |
+
if (queryHistory.length > 20) {
|
| 129 |
+
queryHistory = queryHistory.slice(-20);
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
try {
|
| 133 |
+
sessionStorage.setItem('queryHistory', JSON.stringify(queryHistory));
|
| 134 |
+
updateHistoryDropdown();
|
| 135 |
+
} catch (e) {
|
| 136 |
+
console.error('Failed to save history:', e);
|
| 137 |
+
}
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
// Main submit handler
|
| 141 |
+
async function handleSubmit() {
|
| 142 |
+
if (!queryInput) return;
|
| 143 |
+
|
| 144 |
+
const query = queryInput.value.trim();
|
| 145 |
+
|
| 146 |
+
if (!query) {
|
| 147 |
+
showError('Please enter a question');
|
| 148 |
+
return;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
// Show loading
|
| 152 |
+
setLoading(true);
|
| 153 |
+
hideError();
|
| 154 |
+
|
| 155 |
+
// Hide initial state, show loading
|
| 156 |
+
if (initialState) initialState.style.display = 'none';
|
| 157 |
+
if (loadingState) loadingState.style.display = 'flex';
|
| 158 |
+
if (responseSection) responseSection.style.display = 'none';
|
| 159 |
+
|
| 160 |
+
try {
|
| 161 |
+
const requestData = {
|
| 162 |
+
query,
|
| 163 |
+
ticker: 'ACM', // Fixed ticker
|
| 164 |
+
doc_types: null, // No filter
|
| 165 |
+
top_k: 10,
|
| 166 |
+
session_id: currentSessionId
|
| 167 |
+
};
|
| 168 |
+
|
| 169 |
+
// Use API_BASE_URL which auto-detects for Hugging Face
|
| 170 |
+
const response = await fetch(`${API_BASE_URL}/query`, {
|
| 171 |
+
method: 'POST',
|
| 172 |
+
headers: { 'Content-Type': 'application/json' },
|
| 173 |
+
body: JSON.stringify(requestData)
|
| 174 |
+
});
|
| 175 |
+
|
| 176 |
+
if (!response.ok) {
|
| 177 |
+
const errorData = await response.json().catch(() => ({}));
|
| 178 |
+
throw new Error(errorData.detail || `HTTP ${response.status}`);
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
const data = await response.json();
|
| 182 |
+
|
| 183 |
+
if (!data) {
|
| 184 |
+
throw new Error('Empty response from server');
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
// Store current data for export
|
| 188 |
+
currentQuery = query;
|
| 189 |
+
currentAnswer = data.answer;
|
| 190 |
+
currentSources = data.sources;
|
| 191 |
+
|
| 192 |
+
// Save to history
|
| 193 |
+
saveToHistory(query, data.answer || '');
|
| 194 |
+
|
| 195 |
+
// Display results
|
| 196 |
+
displayResults(data);
|
| 197 |
+
|
| 198 |
+
// Clear input
|
| 199 |
+
queryInput.value = '';
|
| 200 |
+
|
| 201 |
+
} catch (error) {
|
| 202 |
+
console.error('Error:', error);
|
| 203 |
+
showError(error.message || 'Failed to process query');
|
| 204 |
+
|
| 205 |
+
// Show initial state again
|
| 206 |
+
if (loadingState) loadingState.style.display = 'none';
|
| 207 |
+
if (initialState) initialState.style.display = 'flex';
|
| 208 |
+
|
| 209 |
+
} finally {
|
| 210 |
+
setLoading(false);
|
| 211 |
+
}
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
// Display results
|
| 215 |
+
function displayResults(data) {
|
| 216 |
+
if (!data) return;
|
| 217 |
+
|
| 218 |
+
// Hide loading, show response
|
| 219 |
+
if (loadingState) loadingState.style.display = 'none';
|
| 220 |
+
if (responseSection) responseSection.style.display = 'block';
|
| 221 |
+
|
| 222 |
+
// Display answer with cache indicator
|
| 223 |
+
if (answerDiv) {
|
| 224 |
+
let answerHTML = formatAnswer(data.answer || 'No answer available');
|
| 225 |
+
|
| 226 |
+
// Add cache indicator if from cache
|
| 227 |
+
if (data.from_cache) {
|
| 228 |
+
const cacheAge = Math.floor((data.cache_age_seconds || 0) / 60);
|
| 229 |
+
const ageText = cacheAge < 1 ? 'just now' : `${cacheAge}m ago`;
|
| 230 |
+
answerHTML = `
|
| 231 |
+
<div class="cache-indicator">
|
| 232 |
+
<span class="cache-badge">⚡ Cached</span>
|
| 233 |
+
<span class="cache-details">Retrieved ${ageText} • Hit ${data.cache_hits || 1}x</span>
|
| 234 |
+
</div>
|
| 235 |
+
` + answerHTML;
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
answerDiv.innerHTML = answerHTML;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
// Display expanded queries
|
| 242 |
+
if (expandedQueriesDiv && queriesList) {
|
| 243 |
+
if (data.expanded_queries && data.expanded_queries.length > 1) {
|
| 244 |
+
expandedQueriesDiv.style.display = 'block';
|
| 245 |
+
queriesList.innerHTML = data.expanded_queries
|
| 246 |
+
.map(q => `<li>${escapeHtml(q)}</li>`)
|
| 247 |
+
.join('');
|
| 248 |
+
} else {
|
| 249 |
+
expandedQueriesDiv.style.display = 'none';
|
| 250 |
+
}
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
// Display sources
|
| 254 |
+
if (sourcesDiv) {
|
| 255 |
+
displaySources(data.sources || []);
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
// Scroll to top of right panel
|
| 259 |
+
const rightPanel = document.querySelector('.right-panel');
|
| 260 |
+
if (rightPanel) {
|
| 261 |
+
rightPanel.scrollTop = 0;
|
| 262 |
+
}
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
// Format answer with enhanced styling
|
| 266 |
+
function formatAnswer(answer) {
|
| 267 |
+
if (!answer) return '';
|
| 268 |
+
|
| 269 |
+
let formatted = escapeHtml(answer);
|
| 270 |
+
|
| 271 |
+
// Line breaks
|
| 272 |
+
formatted = formatted.replace(/\n/g, '<br>');
|
| 273 |
+
|
| 274 |
+
// Citations
|
| 275 |
+
formatted = formatted.replace(/\[Source (\d+)\]/g,
|
| 276 |
+
'<span class="citation">[Source $1]</span>');
|
| 277 |
+
|
| 278 |
+
// Highlight numbers (currency, percentages, ratios)
|
| 279 |
+
formatted = formatted.replace(/\$[\d,]+\.?\d*[BM]?/g,
|
| 280 |
+
match => `<span class="highlight-number">${match}</span>`);
|
| 281 |
+
formatted = formatted.replace(/\d+\.?\d*%/g,
|
| 282 |
+
match => `<span class="highlight-number">${match}</span>`);
|
| 283 |
+
|
| 284 |
+
// Color-code metrics
|
| 285 |
+
formatted = formatted.replace(/(\d+\.?\d+)(x|:1)/g,
|
| 286 |
+
'<span class="metric-green">$1$2</span>');
|
| 287 |
+
|
| 288 |
+
// Bold headers (lines ending with:)
|
| 289 |
+
formatted = formatted.replace(/^(.+:)$/gm, '<strong>$1</strong>');
|
| 290 |
+
|
| 291 |
+
// Create calculation boxes for formulas
|
| 292 |
+
formatted = formatted.replace(/Formula: ([^\n]+)/g,
|
| 293 |
+
'<div class="calculation-box"><h4>Formula</h4><div class="calculation-step">$1</div></div>');
|
| 294 |
+
|
| 295 |
+
return formatted;
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
// Display sources with collapsible cards
|
| 299 |
+
function displaySources(sources) {
|
| 300 |
+
if (!sourcesDiv) return;
|
| 301 |
+
|
| 302 |
+
if (!sources || sources.length === 0) {
|
| 303 |
+
sourcesDiv.innerHTML = '<p style="color: var(--text-muted);">No sources available</p>';
|
| 304 |
+
return;
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
sourcesDiv.innerHTML = sources.map((source, index) => {
|
| 308 |
+
if (!source) return '';
|
| 309 |
+
|
| 310 |
+
const docTypeLabel = source.doc_type === '10k' ? '10-K Report' :
|
| 311 |
+
source.doc_type.replace('_', ' ').replace(/\b\w/g, l => l.toUpperCase());
|
| 312 |
+
|
| 313 |
+
return `
|
| 314 |
+
<div class="source-card" id="source-${index}">
|
| 315 |
+
<div class="source-header" onclick="toggleSource(${index})">
|
| 316 |
+
<div class="source-title">
|
| 317 |
+
<span class="source-badge">${docTypeLabel}</span>
|
| 318 |
+
${escapeHtml(source.filename || 'Unknown')}
|
| 319 |
+
</div>
|
| 320 |
+
<div class="source-similarity">
|
| 321 |
+
Similarity Score: <strong>${source.similarity_score ? (source.similarity_score * 100).toFixed(0) + '%' : 'N/A'}</strong>
|
| 322 |
+
</div>
|
| 323 |
+
</div>
|
| 324 |
+
<div class="source-content">
|
| 325 |
+
<div class="source-details">
|
| 326 |
+
${source.ticker ? `<span class="source-detail"><strong>Ticker:</strong> ${escapeHtml(source.ticker)}</span>` : ''}
|
| 327 |
+
${source.chunk_id ? `<span class="source-detail"><strong>Chunk:</strong> ${escapeHtml(source.chunk_id)}</span>` : ''}
|
| 328 |
+
</div>
|
| 329 |
+
<div class="source-preview">
|
| 330 |
+
"${escapeHtml(source.text_preview || 'No preview available')}"
|
| 331 |
+
</div>
|
| 332 |
+
</div>
|
| 333 |
+
</div>
|
| 334 |
+
`;
|
| 335 |
+
}).filter(Boolean).join('');
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
// Toggle source expansion
|
| 339 |
+
function toggleSource(index) {
|
| 340 |
+
const card = document.getElementById(`source-${index}`);
|
| 341 |
+
if (card) {
|
| 342 |
+
card.classList.toggle('expanded');
|
| 343 |
+
}
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
// Export to PDF function
|
| 347 |
+
function exportToPDF() {
|
| 348 |
+
if (!currentAnswer) {
|
| 349 |
+
showError('No answer to export. Please ask a question first.');
|
| 350 |
+
return;
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
try {
|
| 354 |
+
// Check if jsPDF is loaded
|
| 355 |
+
if (typeof window.jspdf === 'undefined') {
|
| 356 |
+
showError('PDF library not loaded. Please refresh the page.');
|
| 357 |
+
return;
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
const { jsPDF } = window.jspdf;
|
| 361 |
+
const doc = new jsPDF();
|
| 362 |
+
|
| 363 |
+
// Set title
|
| 364 |
+
doc.setFontSize(18);
|
| 365 |
+
doc.setFont(undefined, 'bold');
|
| 366 |
+
doc.text('FinSight Analytics Report', 20, 20);
|
| 367 |
+
|
| 368 |
+
// Set subtitle
|
| 369 |
+
doc.setFontSize(10);
|
| 370 |
+
doc.setFont(undefined, 'normal');
|
| 371 |
+
doc.setTextColor(100);
|
| 372 |
+
doc.text(`Generated on ${new Date().toLocaleString()}`, 20, 28);
|
| 373 |
+
|
| 374 |
+
// Query section
|
| 375 |
+
doc.setFontSize(12);
|
| 376 |
+
doc.setFont(undefined, 'bold');
|
| 377 |
+
doc.setTextColor(0);
|
| 378 |
+
doc.text('Query:', 20, 40);
|
| 379 |
+
|
| 380 |
+
doc.setFont(undefined, 'normal');
|
| 381 |
+
doc.setFontSize(10);
|
| 382 |
+
const queryLines = doc.splitTextToSize(currentQuery || '', 170);
|
| 383 |
+
doc.text(queryLines, 20, 48);
|
| 384 |
+
|
| 385 |
+
// Answer section
|
| 386 |
+
let yPos = 48 + (queryLines.length * 7) + 10;
|
| 387 |
+
doc.setFontSize(12);
|
| 388 |
+
doc.setFont(undefined, 'bold');
|
| 389 |
+
doc.text('Answer:', 20, yPos);
|
| 390 |
+
|
| 391 |
+
yPos += 8;
|
| 392 |
+
doc.setFont(undefined, 'normal');
|
| 393 |
+
doc.setFontSize(10);
|
| 394 |
+
|
| 395 |
+
// Clean answer text (remove HTML tags and format)
|
| 396 |
+
let cleanAnswer = currentAnswer
|
| 397 |
+
.replace(/<[^>]*>/g, '') // Remove HTML tags
|
| 398 |
+
.replace(/\[Source \d+\]/g, '') // Remove citation markers
|
| 399 |
+
.replace(/ /g, ' ')
|
| 400 |
+
.replace(/</g, '<')
|
| 401 |
+
.replace(/>/g, '>')
|
| 402 |
+
.replace(/&/g, '&');
|
| 403 |
+
|
| 404 |
+
const answerLines = doc.splitTextToSize(cleanAnswer, 170);
|
| 405 |
+
|
| 406 |
+
// Add answer with page breaks if needed
|
| 407 |
+
answerLines.forEach((line, index) => {
|
| 408 |
+
if (yPos > 270) {
|
| 409 |
+
doc.addPage();
|
| 410 |
+
yPos = 20;
|
| 411 |
+
}
|
| 412 |
+
doc.text(line, 20, yPos);
|
| 413 |
+
yPos += 7;
|
| 414 |
+
});
|
| 415 |
+
|
| 416 |
+
// Sources section
|
| 417 |
+
if (currentSources && currentSources.length > 0) {
|
| 418 |
+
yPos += 10;
|
| 419 |
+
if (yPos > 250) {
|
| 420 |
+
doc.addPage();
|
| 421 |
+
yPos = 20;
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
doc.setFontSize(12);
|
| 425 |
+
doc.setFont(undefined, 'bold');
|
| 426 |
+
doc.text('Sources:', 20, yPos);
|
| 427 |
+
yPos += 8;
|
| 428 |
+
|
| 429 |
+
doc.setFontSize(9);
|
| 430 |
+
doc.setFont(undefined, 'normal');
|
| 431 |
+
|
| 432 |
+
currentSources.forEach((source, index) => {
|
| 433 |
+
if (yPos > 270) {
|
| 434 |
+
doc.addPage();
|
| 435 |
+
yPos = 20;
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
doc.setFont(undefined, 'bold');
|
| 439 |
+
doc.text(`[${index + 1}] ${source.filename || 'Unknown'}`, 20, yPos);
|
| 440 |
+
yPos += 5;
|
| 441 |
+
|
| 442 |
+
doc.setFont(undefined, 'normal');
|
| 443 |
+
doc.setTextColor(100);
|
| 444 |
+
doc.text(`Type: ${source.doc_type || 'N/A'} | Similarity: ${source.similarity_score ? (source.similarity_score * 100).toFixed(0) + '%' : 'N/A'}`, 20, yPos);
|
| 445 |
+
yPos += 8;
|
| 446 |
+
doc.setTextColor(0);
|
| 447 |
+
});
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
// Save PDF
|
| 451 |
+
const filename = `FinSight_Analysis_${Date.now()}.pdf`;
|
| 452 |
+
doc.save(filename);
|
| 453 |
+
|
| 454 |
+
} catch (error) {
|
| 455 |
+
console.error('PDF Export Error:', error);
|
| 456 |
+
showError('Failed to export PDF. Please try again.');
|
| 457 |
+
}
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
// Loading state
|
| 461 |
+
function setLoading(isLoading) {
|
| 462 |
+
if (!submitBtn) return;
|
| 463 |
+
|
| 464 |
+
submitBtn.disabled = isLoading;
|
| 465 |
+
|
| 466 |
+
if (btnText && loader) {
|
| 467 |
+
btnText.style.display = isLoading ? 'none' : 'inline';
|
| 468 |
+
loader.style.display = isLoading ? 'inline-block' : 'none';
|
| 469 |
+
}
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
// Error handling
|
| 473 |
+
function showError(message) {
|
| 474 |
+
if (!errorToast || !errorMessage) {
|
| 475 |
+
alert(message);
|
| 476 |
+
return;
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
errorMessage.textContent = message;
|
| 480 |
+
errorToast.style.display = 'block';
|
| 481 |
+
|
| 482 |
+
setTimeout(() => {
|
| 483 |
+
errorToast.style.display = 'none';
|
| 484 |
+
}, 5000);
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
function hideError() {
|
| 488 |
+
if (errorToast) {
|
| 489 |
+
errorToast.style.display = 'none';
|
| 490 |
+
}
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
// Utility: Escape HTML
|
| 494 |
+
function escapeHtml(text) {
|
| 495 |
+
if (!text) return '';
|
| 496 |
+
const div = document.createElement('div');
|
| 497 |
+
div.textContent = text;
|
| 498 |
+
return div.innerHTML;
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
// Health check
|
| 502 |
+
async function checkHealth() {
|
| 503 |
+
try {
|
| 504 |
+
const response = await fetch(`${API_BASE_URL}/health`, {
|
| 505 |
+
signal: AbortSignal.timeout(5000)
|
| 506 |
+
});
|
| 507 |
+
|
| 508 |
+
if (!response.ok) {
|
| 509 |
+
console.warn('API health check failed');
|
| 510 |
+
} else {
|
| 511 |
+
console.log('API health check passed');
|
| 512 |
+
}
|
| 513 |
+
} catch (error) {
|
| 514 |
+
console.error('Cannot connect to API:', error);
|
| 515 |
+
// Don't show error on page load for Hugging Face
|
| 516 |
+
// The space might still be starting up
|
| 517 |
+
}
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
// Make toggleSource available globally
|
| 521 |
+
window.toggleSource = toggleSource;
|
| 522 |
+
|
| 523 |
+
// Initialize on load
|
| 524 |
+
init();
|
frontend/style.css
ADDED
|
@@ -0,0 +1,610 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Reset and Base Styles */
|
| 2 |
+
* {
|
| 3 |
+
margin: 0;
|
| 4 |
+
padding: 0;
|
| 5 |
+
box-sizing: border-box;
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
:root {
|
| 9 |
+
--bg-primary: #0A0A0A;
|
| 10 |
+
--bg-secondary: #141414;
|
| 11 |
+
--bg-tertiary: #1E1E1E;
|
| 12 |
+
--bg-hover: #252525;
|
| 13 |
+
--border-color: #2A2A2A;
|
| 14 |
+
--text-primary: #E5E5E5;
|
| 15 |
+
--text-secondary: #A0A0A0;
|
| 16 |
+
--text-muted: #6B6B6B;
|
| 17 |
+
--accent-blue: #3B82F6;
|
| 18 |
+
--accent-blue-hover: #2563EB;
|
| 19 |
+
--accent-yellow: #F59E0B;
|
| 20 |
+
--accent-red: #EF4444;
|
| 21 |
+
--accent-green: #10B981;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
body {
|
| 25 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', sans-serif;
|
| 26 |
+
background-color: var(--bg-primary);
|
| 27 |
+
color: var(--text-primary);
|
| 28 |
+
min-height: 100vh;
|
| 29 |
+
overflow-x: hidden;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
/* Top Navigation */
|
| 33 |
+
.top-nav {
|
| 34 |
+
background-color: var(--bg-secondary);
|
| 35 |
+
border-bottom: 1px solid var(--border-color);
|
| 36 |
+
padding: 12px 24px;
|
| 37 |
+
display: flex;
|
| 38 |
+
justify-content: space-between;
|
| 39 |
+
align-items: center;
|
| 40 |
+
position: sticky;
|
| 41 |
+
top: 0;
|
| 42 |
+
z-index: 100;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
.nav-left .logo {
|
| 46 |
+
display: flex;
|
| 47 |
+
align-items: center;
|
| 48 |
+
gap: 12px;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
.logo-title {
|
| 52 |
+
font-size: 16px;
|
| 53 |
+
font-weight: 700;
|
| 54 |
+
color: var(--text-primary);
|
| 55 |
+
line-height: 1.2;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
.logo-subtitle {
|
| 59 |
+
font-size: 11px;
|
| 60 |
+
font-weight: 400;
|
| 61 |
+
color: var(--text-secondary);
|
| 62 |
+
margin-top: 2px;
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
.nav-right {
|
| 66 |
+
display: flex;
|
| 67 |
+
align-items: center;
|
| 68 |
+
gap: 8px;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
.icon-btn {
|
| 72 |
+
background: transparent;
|
| 73 |
+
border: none;
|
| 74 |
+
color: var(--text-secondary);
|
| 75 |
+
padding: 8px;
|
| 76 |
+
border-radius: 6px;
|
| 77 |
+
cursor: pointer;
|
| 78 |
+
transition: all 0.2s;
|
| 79 |
+
display: flex;
|
| 80 |
+
align-items: center;
|
| 81 |
+
justify-content: center;
|
| 82 |
+
text-decoration: none;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
.icon-btn:hover {
|
| 86 |
+
background-color: var(--bg-hover);
|
| 87 |
+
color: var(--text-primary);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
.user-btn .user-avatar {
|
| 91 |
+
width: 28px;
|
| 92 |
+
height: 28px;
|
| 93 |
+
border-radius: 50%;
|
| 94 |
+
background: linear-gradient(135deg, #3B82F6 0%, #2563EB 100%);
|
| 95 |
+
display: flex;
|
| 96 |
+
align-items: center;
|
| 97 |
+
justify-content: center;
|
| 98 |
+
font-size: 13px;
|
| 99 |
+
font-weight: 600;
|
| 100 |
+
color: white;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
/* Main Container */
|
| 104 |
+
.main-container {
|
| 105 |
+
display: grid;
|
| 106 |
+
grid-template-columns: 490px 1fr;
|
| 107 |
+
height: calc(100vh - 57px);
|
| 108 |
+
gap: 0;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
/* Left Panel */
|
| 112 |
+
.left-panel {
|
| 113 |
+
background-color: var(--bg-secondary);
|
| 114 |
+
border-right: 1px solid var(--border-color);
|
| 115 |
+
padding: 32px 24px;
|
| 116 |
+
overflow-y: auto;
|
| 117 |
+
display: flex;
|
| 118 |
+
flex-direction: column;
|
| 119 |
+
gap: 24px;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
.panel-title {
|
| 123 |
+
font-size: 20px;
|
| 124 |
+
font-weight: 600;
|
| 125 |
+
color: var(--text-primary);
|
| 126 |
+
margin-bottom: 8px;
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
/* Query Box */
|
| 130 |
+
.query-box {
|
| 131 |
+
background-color: var(--bg-tertiary);
|
| 132 |
+
border: 1px solid var(--border-color);
|
| 133 |
+
border-radius: 12px;
|
| 134 |
+
padding: 16px;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
#queryInput {
|
| 138 |
+
width: 100%;
|
| 139 |
+
background-color: transparent;
|
| 140 |
+
border: none;
|
| 141 |
+
color: var(--text-primary);
|
| 142 |
+
font-size: 14px;
|
| 143 |
+
font-family: inherit;
|
| 144 |
+
resize: none;
|
| 145 |
+
outline: none;
|
| 146 |
+
line-height: 1.6;
|
| 147 |
+
margin-bottom: 16px;
|
| 148 |
+
min-height: 120px;
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
#queryInput::placeholder {
|
| 152 |
+
color: var(--text-muted);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
.submit-button {
|
| 156 |
+
width: 100%;
|
| 157 |
+
background-color: var(--accent-blue);
|
| 158 |
+
color: white;
|
| 159 |
+
border: none;
|
| 160 |
+
padding: 12px;
|
| 161 |
+
border-radius: 8px;
|
| 162 |
+
font-size: 14px;
|
| 163 |
+
font-weight: 600;
|
| 164 |
+
cursor: pointer;
|
| 165 |
+
transition: background-color 0.2s;
|
| 166 |
+
display: flex;
|
| 167 |
+
align-items: center;
|
| 168 |
+
justify-content: center;
|
| 169 |
+
gap: 8px;
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
.submit-button:hover:not(:disabled) {
|
| 173 |
+
background-color: var(--accent-blue-hover);
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
.submit-button:disabled {
|
| 177 |
+
opacity: 0.5;
|
| 178 |
+
cursor: not-allowed;
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
/* Examples Section */
|
| 182 |
+
.examples-section {
|
| 183 |
+
margin-top: 8px;
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
.examples-section h3 {
|
| 187 |
+
font-size: 14px;
|
| 188 |
+
color: var(--text-secondary);
|
| 189 |
+
margin-bottom: 12px;
|
| 190 |
+
font-weight: 600;
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
.example-queries-list {
|
| 194 |
+
display: flex;
|
| 195 |
+
flex-direction: column;
|
| 196 |
+
gap: 8px;
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
.example-item {
|
| 200 |
+
color: var(--text-muted);
|
| 201 |
+
font-size: 13px;
|
| 202 |
+
cursor: pointer;
|
| 203 |
+
padding: 10px 12px;
|
| 204 |
+
border-radius: 6px;
|
| 205 |
+
transition: all 0.2s;
|
| 206 |
+
background-color: var(--bg-tertiary);
|
| 207 |
+
border: 1px solid var(--border-color);
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
.example-item:hover {
|
| 211 |
+
background-color: var(--bg-hover);
|
| 212 |
+
color: var(--text-secondary);
|
| 213 |
+
border-color: var(--accent-blue);
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
/* Recent Queries */
|
| 217 |
+
.recent-queries {
|
| 218 |
+
margin-top: 16px;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
.recent-queries h3 {
|
| 222 |
+
font-size: 14px;
|
| 223 |
+
color: var(--text-secondary);
|
| 224 |
+
margin-bottom: 12px;
|
| 225 |
+
font-weight: 600;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
.history-dropdown {
|
| 229 |
+
width: 100%;
|
| 230 |
+
background-color: var(--bg-tertiary);
|
| 231 |
+
border: 1px solid var(--border-color);
|
| 232 |
+
color: var(--text-primary);
|
| 233 |
+
padding: 10px 12px;
|
| 234 |
+
border-radius: 8px;
|
| 235 |
+
font-size: 13px;
|
| 236 |
+
cursor: pointer;
|
| 237 |
+
outline: none;
|
| 238 |
+
transition: border-color 0.2s;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
.history-dropdown:focus {
|
| 242 |
+
border-color: var(--accent-blue);
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
.history-dropdown option {
|
| 246 |
+
background-color: var(--bg-tertiary);
|
| 247 |
+
color: var(--text-primary);
|
| 248 |
+
padding: 8px;
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
/* Right Panel */
|
| 252 |
+
.right-panel {
|
| 253 |
+
background-color: var(--bg-primary);
|
| 254 |
+
padding: 32px 48px;
|
| 255 |
+
overflow-y: auto;
|
| 256 |
+
position: relative;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
/* Loading State */
|
| 260 |
+
.loading-state {
|
| 261 |
+
display: flex;
|
| 262 |
+
align-items: center;
|
| 263 |
+
gap: 12px;
|
| 264 |
+
color: var(--text-secondary);
|
| 265 |
+
font-size: 14px;
|
| 266 |
+
margin-top: 24px;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
.loading-spinner {
|
| 270 |
+
width: 20px;
|
| 271 |
+
height: 20px;
|
| 272 |
+
border: 2px solid var(--border-color);
|
| 273 |
+
border-top-color: var(--accent-blue);
|
| 274 |
+
border-radius: 50%;
|
| 275 |
+
animation: spin 0.8s linear infinite;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
@keyframes spin {
|
| 279 |
+
to { transform: rotate(360deg); }
|
| 280 |
+
}
|
| 281 |
+
|
| 282 |
+
/* Initial Empty State */
|
| 283 |
+
.initial-state {
|
| 284 |
+
display: flex;
|
| 285 |
+
align-items: center;
|
| 286 |
+
justify-content: center;
|
| 287 |
+
height: 100%;
|
| 288 |
+
min-height: 400px;
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
.empty-state {
|
| 292 |
+
text-align: center;
|
| 293 |
+
color: var(--text-muted);
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
.empty-state svg {
|
| 297 |
+
margin-bottom: 16px;
|
| 298 |
+
opacity: 0.5;
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
.empty-state p {
|
| 302 |
+
font-size: 15px;
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
/* Answer Content */
|
| 306 |
+
.answer-content {
|
| 307 |
+
background-color: var(--bg-secondary);
|
| 308 |
+
border: 1px solid var(--border-color);
|
| 309 |
+
border-radius: 12px;
|
| 310 |
+
padding: 24px;
|
| 311 |
+
margin-top: 16px;
|
| 312 |
+
line-height: 1.8;
|
| 313 |
+
font-size: 14px;
|
| 314 |
+
color: var(--text-primary);
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
.answer-content strong {
|
| 318 |
+
color: var(--text-primary);
|
| 319 |
+
font-weight: 600;
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
.answer-content .highlight-number {
|
| 323 |
+
background-color: var(--accent-yellow);
|
| 324 |
+
color: var(--bg-primary);
|
| 325 |
+
padding: 2px 6px;
|
| 326 |
+
border-radius: 4px;
|
| 327 |
+
font-weight: 700;
|
| 328 |
+
font-size: 13px;
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
.answer-content .metric-red {
|
| 332 |
+
color: var(--accent-red);
|
| 333 |
+
font-weight: 600;
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
.answer-content .metric-green {
|
| 337 |
+
color: var(--accent-green);
|
| 338 |
+
font-weight: 600;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
/* Citation Styling */
|
| 342 |
+
.citation {
|
| 343 |
+
background-color: var(--accent-blue);
|
| 344 |
+
color: white;
|
| 345 |
+
padding: 2px 8px;
|
| 346 |
+
border-radius: 4px;
|
| 347 |
+
font-size: 12px;
|
| 348 |
+
font-weight: 600;
|
| 349 |
+
white-space: nowrap;
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
/* Calculation Boxes */
|
| 353 |
+
.calculation-box {
|
| 354 |
+
background-color: var(--bg-tertiary);
|
| 355 |
+
border-left: 3px solid var(--accent-blue);
|
| 356 |
+
padding: 16px;
|
| 357 |
+
margin: 16px 0;
|
| 358 |
+
border-radius: 6px;
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
.calculation-box h4 {
|
| 362 |
+
font-size: 13px;
|
| 363 |
+
color: var(--text-secondary);
|
| 364 |
+
margin-bottom: 8px;
|
| 365 |
+
font-weight: 600;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
.calculation-step {
|
| 369 |
+
font-family: 'Courier New', monospace;
|
| 370 |
+
color: var(--text-primary);
|
| 371 |
+
font-size: 13px;
|
| 372 |
+
margin: 4px 0;
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
/* Expanded Queries */
|
| 376 |
+
.expanded-section {
|
| 377 |
+
margin: 24px 0;
|
| 378 |
+
padding: 16px;
|
| 379 |
+
background-color: var(--bg-secondary);
|
| 380 |
+
border: 1px solid var(--border-color);
|
| 381 |
+
border-radius: 8px;
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
.expanded-section h3 {
|
| 385 |
+
font-size: 14px;
|
| 386 |
+
color: var(--text-secondary);
|
| 387 |
+
margin-bottom: 12px;
|
| 388 |
+
font-weight: 600;
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
.expanded-section ul {
|
| 392 |
+
list-style: none;
|
| 393 |
+
padding: 0;
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
.expanded-section li {
|
| 397 |
+
padding: 8px 0;
|
| 398 |
+
color: var(--text-secondary);
|
| 399 |
+
font-size: 13px;
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
.expanded-section li:before {
|
| 403 |
+
content: "→ ";
|
| 404 |
+
color: var(--accent-blue);
|
| 405 |
+
margin-right: 8px;
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
/* Sources Section */
|
| 409 |
+
.sources-section {
|
| 410 |
+
margin-top: 32px;
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
.sources-title {
|
| 414 |
+
font-size: 16px;
|
| 415 |
+
color: var(--text-primary);
|
| 416 |
+
margin-bottom: 16px;
|
| 417 |
+
font-weight: 600;
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
.sources-list {
|
| 421 |
+
display: flex;
|
| 422 |
+
flex-direction: column;
|
| 423 |
+
gap: 12px;
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
/* Source Card */
|
| 427 |
+
.source-card {
|
| 428 |
+
background-color: var(--bg-secondary);
|
| 429 |
+
border: 1px solid var(--border-color);
|
| 430 |
+
border-radius: 10px;
|
| 431 |
+
overflow: hidden;
|
| 432 |
+
transition: all 0.2s;
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
.source-card:hover {
|
| 436 |
+
border-color: var(--accent-blue);
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
.source-header {
|
| 440 |
+
padding: 16px;
|
| 441 |
+
display: flex;
|
| 442 |
+
justify-content: space-between;
|
| 443 |
+
align-items: center;
|
| 444 |
+
cursor: pointer;
|
| 445 |
+
user-select: none;
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
.source-title {
|
| 449 |
+
font-size: 14px;
|
| 450 |
+
font-weight: 600;
|
| 451 |
+
color: var(--text-primary);
|
| 452 |
+
display: flex;
|
| 453 |
+
align-items: center;
|
| 454 |
+
gap: 8px;
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
.source-badge {
|
| 458 |
+
background-color: var(--accent-blue);
|
| 459 |
+
color: white;
|
| 460 |
+
padding: 2px 8px;
|
| 461 |
+
border-radius: 4px;
|
| 462 |
+
font-size: 11px;
|
| 463 |
+
font-weight: 600;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
.source-similarity {
|
| 467 |
+
font-size: 13px;
|
| 468 |
+
color: var(--text-secondary);
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
.source-similarity strong {
|
| 472 |
+
color: var(--accent-green);
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
.source-content {
|
| 476 |
+
padding: 0 16px 16px 16px;
|
| 477 |
+
display: none;
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
.source-card.expanded .source-content {
|
| 481 |
+
display: block;
|
| 482 |
+
}
|
| 483 |
+
|
| 484 |
+
.source-details {
|
| 485 |
+
margin-bottom: 12px;
|
| 486 |
+
display: flex;
|
| 487 |
+
flex-wrap: wrap;
|
| 488 |
+
gap: 12px;
|
| 489 |
+
}
|
| 490 |
+
|
| 491 |
+
.source-detail {
|
| 492 |
+
font-size: 12px;
|
| 493 |
+
color: var(--text-muted);
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
.source-detail strong {
|
| 497 |
+
color: var(--text-secondary);
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
.source-preview {
|
| 501 |
+
background-color: var(--bg-tertiary);
|
| 502 |
+
padding: 12px;
|
| 503 |
+
border-radius: 6px;
|
| 504 |
+
font-size: 13px;
|
| 505 |
+
color: var(--text-secondary);
|
| 506 |
+
line-height: 1.6;
|
| 507 |
+
font-style: italic;
|
| 508 |
+
}
|
| 509 |
+
|
| 510 |
+
.view-context-btn {
|
| 511 |
+
margin-top: 8px;
|
| 512 |
+
color: var(--accent-blue);
|
| 513 |
+
background: none;
|
| 514 |
+
border: none;
|
| 515 |
+
font-size: 12px;
|
| 516 |
+
cursor: pointer;
|
| 517 |
+
padding: 4px 0;
|
| 518 |
+
display: inline-flex;
|
| 519 |
+
align-items: center;
|
| 520 |
+
gap: 4px;
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
.view-context-btn:hover {
|
| 524 |
+
text-decoration: underline;
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
+
/* Error Toast */
|
| 528 |
+
.error-toast {
|
| 529 |
+
position: fixed;
|
| 530 |
+
bottom: 24px;
|
| 531 |
+
right: 24px;
|
| 532 |
+
background-color: var(--accent-red);
|
| 533 |
+
color: white;
|
| 534 |
+
padding: 16px 24px;
|
| 535 |
+
border-radius: 8px;
|
| 536 |
+
box-shadow: 0 10px 40px rgba(0, 0, 0, 0.3);
|
| 537 |
+
z-index: 1000;
|
| 538 |
+
animation: slideIn 0.3s ease-out;
|
| 539 |
+
}
|
| 540 |
+
|
| 541 |
+
@keyframes slideIn {
|
| 542 |
+
from {
|
| 543 |
+
transform: translateY(100px);
|
| 544 |
+
opacity: 0;
|
| 545 |
+
}
|
| 546 |
+
to {
|
| 547 |
+
transform: translateY(0);
|
| 548 |
+
opacity: 1;
|
| 549 |
+
}
|
| 550 |
+
}
|
| 551 |
+
|
| 552 |
+
/* Loader Spinner */
|
| 553 |
+
.loader {
|
| 554 |
+
width: 16px;
|
| 555 |
+
height: 16px;
|
| 556 |
+
border: 2px solid rgba(255, 255, 255, 0.3);
|
| 557 |
+
border-top-color: white;
|
| 558 |
+
border-radius: 50%;
|
| 559 |
+
animation: spin 0.8s linear infinite;
|
| 560 |
+
}
|
| 561 |
+
|
| 562 |
+
/* Scrollbar Styling */
|
| 563 |
+
::-webkit-scrollbar {
|
| 564 |
+
width: 8px;
|
| 565 |
+
height: 8px;
|
| 566 |
+
}
|
| 567 |
+
|
| 568 |
+
::-webkit-scrollbar-track {
|
| 569 |
+
background: var(--bg-primary);
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
::-webkit-scrollbar-thumb {
|
| 573 |
+
background: var(--border-color);
|
| 574 |
+
border-radius: 4px;
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
::-webkit-scrollbar-thumb:hover {
|
| 578 |
+
background: var(--bg-hover);
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
/* Responsive Design */
|
| 582 |
+
@media (max-width: 1200px) {
|
| 583 |
+
.main-container {
|
| 584 |
+
grid-template-columns: 420px 1fr;
|
| 585 |
+
}
|
| 586 |
+
|
| 587 |
+
.right-panel {
|
| 588 |
+
padding: 24px 32px;
|
| 589 |
+
}
|
| 590 |
+
}
|
| 591 |
+
|
| 592 |
+
@media (max-width: 968px) {
|
| 593 |
+
.main-container {
|
| 594 |
+
grid-template-columns: 1fr;
|
| 595 |
+
height: auto;
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
.left-panel {
|
| 599 |
+
border-right: none;
|
| 600 |
+
border-bottom: 1px solid var(--border-color);
|
| 601 |
+
}
|
| 602 |
+
|
| 603 |
+
.right-panel {
|
| 604 |
+
min-height: calc(100vh - 57px);
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
.logo-subtitle {
|
| 608 |
+
display: none;
|
| 609 |
+
}
|
| 610 |
+
}
|