# utils/database.py # Update the imports first from langchain_community.chat_models import ChatOpenAI from langchain_core.messages import ( HumanMessage, AIMessage, SystemMessage, BaseMessage ) from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables import RunnablePassthrough from langchain.chains import ConversationalRetrievalChain from langchain.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, Tool, create_openai_tools_agent from langchain.agents.format_scratchpad.tools import format_to_tool_messages from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import PyPDFLoader from langchain.vectorstores import FAISS import os import streamlit as st import sqlite3 import traceback import time import io import tempfile from sqlite3 import Error from threading import Lock from typing import Dict, List, Optional, Any from datetime import datetime from threading import Lock # Create a lock for database connection conn_lock = Lock() def create_connection(db_file): """ Create a database connection to the SQLite database. Args: db_file (str): Path to the SQLite database file. Returns: sqlite3.Connection: Database connection object or None if an error occurs. """ conn = None try: conn = sqlite3.connect(db_file, check_same_thread=False) return conn except Error as e: st.error("Failed to connect to database. Please try again or contact support.") return None def create_connection(db_file: str) -> Optional[sqlite3.Connection]: """Create a database connection.""" try: conn = sqlite3.connect(db_file, check_same_thread=False) return conn except sqlite3.Error as e: st.error(f"Error connecting to database: {e}") return None # utils/database.py # Add this version of create_tables (replacing the existing one) # utils/database.py def create_tables(conn: sqlite3.Connection) -> None: """Create all necessary tables in the database.""" try: with conn_lock: cursor = conn.cursor() # Force create collections tables first collections_tables = [ ''' CREATE TABLE IF NOT EXISTS collections ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL UNIQUE, description TEXT, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) ''', ''' CREATE TABLE IF NOT EXISTS document_collections ( document_id INTEGER, collection_id INTEGER, added_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (document_id, collection_id), FOREIGN KEY (document_id) REFERENCES documents (id) ON DELETE CASCADE, FOREIGN KEY (collection_id) REFERENCES collections (id) ON DELETE CASCADE ) ''' ] # Execute collections tables creation separately for table_sql in collections_tables: try: cursor.execute(table_sql) conn.commit() except sqlite3.Error as e: st.error(f"Error creating collections table: {e}") st.error(f"SQL that failed: {table_sql}") raise # Create other tables other_tables = [ ''' CREATE TABLE IF NOT EXISTS documents ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, content TEXT NOT NULL, upload_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) ''', ''' CREATE TABLE IF NOT EXISTS queries ( id INTEGER PRIMARY KEY AUTOINCREMENT, query TEXT NOT NULL, response TEXT NOT NULL, document_id INTEGER, query_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (document_id) REFERENCES documents (id) ON DELETE CASCADE ) ''', ''' CREATE TABLE IF NOT EXISTS annotations ( id INTEGER PRIMARY KEY AUTOINCREMENT, document_id INTEGER NOT NULL, annotation TEXT NOT NULL, page_number INTEGER, annotation_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (document_id) REFERENCES documents (id) ON DELETE CASCADE ) ''' ] # Execute other tables creation for table_sql in other_tables: try: cursor.execute(table_sql) conn.commit() except sqlite3.Error as e: st.error(f"Error creating table: {e}") st.error(f"SQL that failed: {table_sql}") raise # Create indices indices = [ 'CREATE INDEX IF NOT EXISTS idx_doc_name ON documents(name)', 'CREATE INDEX IF NOT EXISTS idx_collection_name ON collections(name)', 'CREATE INDEX IF NOT EXISTS idx_doc_collections ON document_collections(collection_id)' ] # Execute indices creation for index_sql in indices: try: cursor.execute(index_sql) conn.commit() except sqlite3.Error as e: st.error(f"Error creating index: {e}") st.error(f"SQL that failed: {index_sql}") # Verify collections table was created cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='collections'") if not cursor.fetchone(): st.error("Failed to create collections table despite no errors") raise Exception("Collections table creation failed silently") conn.commit() except sqlite3.Error as e: st.error(f"Error in create_tables: {e}") raise except Exception as e: st.error(f"Unexpected error in create_tables: {e}") raise def create_chat_tables(conn: sqlite3.Connection) -> None: """Create necessary tables for chat management.""" try: with conn_lock: cursor = conn.cursor() # Create tags table first cursor.execute(''' CREATE TABLE IF NOT EXISTS document_tags ( id INTEGER PRIMARY KEY AUTOINCREMENT, document_id INTEGER NOT NULL, tag TEXT NOT NULL, confidence FLOAT, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (document_id) REFERENCES documents (id) ON DELETE CASCADE, UNIQUE(document_id, tag) ) ''') # Create chats table with collection_id cursor.execute(''' CREATE TABLE IF NOT EXISTS chats ( id INTEGER PRIMARY KEY AUTOINCREMENT, title TEXT NOT NULL, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, collection_id INTEGER, FOREIGN KEY (collection_id) REFERENCES collections (id) ON DELETE SET NULL ) ''') # Create chat messages table cursor.execute(''' CREATE TABLE IF NOT EXISTS chat_messages ( id INTEGER PRIMARY KEY AUTOINCREMENT, chat_id INTEGER NOT NULL, role TEXT NOT NULL, content TEXT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, metadata TEXT, FOREIGN KEY (chat_id) REFERENCES chats (id) ON DELETE CASCADE ) ''') conn.commit() except sqlite3.Error as e: st.error(f"Error creating chat tables: {e}") raise async def generate_document_tags(content: str) -> List[str]: """Generate tags for a document using AI.""" try: llm = ChatOpenAI(temperature=0.2, model="gpt-3.5-turbo") prompt = """Analyze the following document content and generate relevant tags/keywords. Focus on key themes, topics, and important terminology. Return only the tags as a comma-separated list. Content: {content}""" response = await llm.ainvoke(prompt.format(content=content[:2000])) # Use first 2000 chars tags = [tag.strip() for tag in response.split(',')] return tags except Exception as e: st.error(f"Error generating tags: {e}") return [] def generate_document_tags(content: str) -> List[str]: """Generate tags for a document using AI.""" try: llm = ChatOpenAI(temperature=0.2, model="gpt-3.5-turbo") prompt = """Analyze the following document content and generate relevant tags/keywords. Focus on key themes, topics, and important terminology. Return only the tags as a comma-separated list. Content: {content}""" response = llm.invoke([ SystemMessage(content="You are a document analysis assistant. Generate relevant tags as a comma-separated list only."), HumanMessage(content=prompt.format(content=content[:2000])) ]) # Extract content from the AI message tags_text = response.content # Split the comma-separated string into a list tags = [tag.strip() for tag in tags_text.split(',')] return tags except Exception as e: st.error(f"Error generating tags: {e}") return [] def add_document_to_collection(conn: sqlite3.Connection, document_id: int, collection_id: int) -> bool: """Link a document to a collection.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' INSERT OR IGNORE INTO document_collections (document_id, collection_id) VALUES (?, ?) ''', (document_id, collection_id)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error linking document to collection: {e}") return False def get_collection_documents(conn: sqlite3.Connection, collection_id: int) -> List[Dict]: """Get all documents in a collection.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date FROM documents d JOIN document_collections dc ON d.id = dc.document_id WHERE dc.collection_id = ? ORDER BY d.upload_date DESC ''', (collection_id,)) documents = [] for row in cursor.fetchall(): documents.append({ 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3] }) return documents except sqlite3.Error as e: st.error(f"Error retrieving collection documents: {e}") return [] def get_collections(conn: sqlite3.Connection) -> List[Dict]: """Get all collections with document counts.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT c.id, c.name, c.description, c.created_at, COUNT(DISTINCT dc.document_id) as doc_count FROM collections c LEFT JOIN document_collections dc ON c.id = dc.collection_id GROUP BY c.id ORDER BY c.name ''') collections = [] for row in cursor.fetchall(): collections.append({ 'id': row[0], 'name': row[1], 'description': row[2], 'created_at': row[3], 'doc_count': row[4] }) return collections except sqlite3.Error as e: st.error(f"Error retrieving collections: {e}") return [] def add_document_tags(conn: sqlite3.Connection, document_id: int, tags: List[str]) -> bool: """Add tags to a document.""" try: with conn_lock: cursor = conn.cursor() for tag in tags: cursor.execute(''' INSERT OR IGNORE INTO document_tags (document_id, tag) VALUES (?, ?) ''', (document_id, tag)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error adding tags: {e}") return False def get_document_tags(conn: sqlite3.Connection, document_id: int) -> List[str]: """Get all tags for a document.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT tag FROM document_tags WHERE document_id = ? ORDER BY tag ''', (document_id,)) return [row[0] for row in cursor.fetchall()] except sqlite3.Error as e: st.error(f"Error retrieving tags: {e}") return [] def search_documents_in_collection(conn: sqlite3.Connection, collection_id: int, query: str) -> List[Dict]: """Search for documents within a collection.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT d.* FROM documents d JOIN document_collections dc ON d.id = dc.document_id WHERE dc.collection_id = ? AND (d.name LIKE ? OR d.content LIKE ?) ''', (collection_id, f'%{query}%', f'%{query}%')) return [dict(row) for row in cursor.fetchall()] except sqlite3.Error as e: st.error(f"Error searching documents: {e}") return [] def force_recreate_collections_tables(conn: sqlite3.Connection) -> bool: """Force recreate collections tables if they're missing.""" try: with conn_lock: cursor = conn.cursor() # Drop existing tables if they exist cursor.execute("DROP TABLE IF EXISTS document_collections") cursor.execute("DROP TABLE IF EXISTS collections") # Create collections table cursor.execute(''' CREATE TABLE collections ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL UNIQUE, description TEXT, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) ''') # Create document_collections table cursor.execute(''' CREATE TABLE document_collections ( document_id INTEGER, collection_id INTEGER, added_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (document_id, collection_id), FOREIGN KEY (document_id) REFERENCES documents (id) ON DELETE CASCADE, FOREIGN KEY (collection_id) REFERENCES collections (id) ON DELETE CASCADE ) ''') # Create indices cursor.execute('CREATE INDEX IF NOT EXISTS idx_collection_name ON collections(name)') cursor.execute('CREATE INDEX IF NOT EXISTS idx_doc_collections ON document_collections(collection_id)') conn.commit() return True except sqlite3.Error as e: st.error(f"Error recreating collections tables: {e}") return False def get_existing_vector_store(document_ids: List[int]) -> Optional[FAISS]: """Retrieve existing vector store if available.""" try: if st.session_state.get('vector_store'): current_docs = set(document_ids) stored_docs = set( metadata['document_id'] for metadata in st.session_state.vector_store.docstore.get_all_metadatas() ) # If the document sets match, reuse existing vector store if current_docs == stored_docs: return st.session_state.vector_store return None except Exception as e: st.error(f"Error checking vector store: {e}") return None def get_uncategorized_documents(conn: sqlite3.Connection) -> List[Dict]: """Get documents that aren't in any collection.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date FROM documents d LEFT JOIN document_collections dc ON d.id = dc.document_id WHERE dc.collection_id IS NULL ORDER BY d.upload_date DESC ''') return [{ 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3], 'collections': [] } for row in cursor.fetchall()] except sqlite3.Error as e: st.error(f"Error retrieving uncategorized documents: {e}") return [] def get_documents_for_chat(conn: sqlite3.Connection, collection_id: Optional[int] = None) -> List[Dict]: """Get documents for chat, either from a collection or all documents.""" try: with conn_lock: cursor = conn.cursor() if collection_id: cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date FROM documents d JOIN document_collections dc ON d.id = dc.document_id WHERE dc.collection_id = ? ORDER BY d.upload_date DESC ''', (collection_id,)) else: cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date FROM documents d ORDER BY d.upload_date DESC ''') return [{ 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3] } for row in cursor.fetchall()] except sqlite3.Error as e: st.error(f"Error retrieving documents for chat: {e}") return [] def get_all_documents(conn: sqlite3.Connection) -> List[Dict]: """Get all documents with their metadata and collections.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date, GROUP_CONCAT(c.name) as collections FROM documents d LEFT JOIN document_collections dc ON d.id = dc.document_id LEFT JOIN collections c ON dc.collection_id = c.id GROUP BY d.id ORDER BY d.upload_date DESC ''') documents = [] for row in cursor.fetchall(): documents.append({ 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3], 'collections': row[4].split(',') if row[4] else [] }) return documents except sqlite3.Error as e: st.error(f"Error retrieving documents: {e}") return [] def get_document_queries(conn: sqlite3.Connection, document_id: int) -> List[Dict]: """Get all queries associated with a document.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT id, query, response, query_date FROM queries WHERE document_id = ? ORDER BY query_date DESC ''', (document_id,)) queries = [] for row in cursor.fetchall(): queries.append({ 'id': row[0], 'query': row[1], 'response': row[2], 'query_date': row[3] }) return queries except sqlite3.Error as e: st.error(f"Error retrieving document queries: {e}") return [] def add_query(conn: sqlite3.Connection, query: str, response: str, document_id: Optional[int] = None) -> bool: """Add a new query and its response.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' INSERT INTO queries (query, response, document_id) VALUES (?, ?, ?) ''', (query, response, document_id)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error adding query: {e}") return False def create_chat_tables(conn: sqlite3.Connection) -> None: """Create necessary tables for chat management.""" try: with conn_lock: cursor = conn.cursor() # Create chats table cursor.execute(''' CREATE TABLE IF NOT EXISTS chats ( id INTEGER PRIMARY KEY AUTOINCREMENT, title TEXT NOT NULL, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, collection_id INTEGER, FOREIGN KEY (collection_id) REFERENCES collections (id) ON DELETE SET NULL ) ''') # Create chat messages table cursor.execute(''' CREATE TABLE IF NOT EXISTS chat_messages ( id INTEGER PRIMARY KEY AUTOINCREMENT, chat_id INTEGER NOT NULL, role TEXT NOT NULL, content TEXT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, metadata TEXT, -- Store metadata as JSON string FOREIGN KEY (chat_id) REFERENCES chats (id) ON DELETE CASCADE ) ''') conn.commit() except sqlite3.Error as e: st.error(f"Error creating chat tables: {e}") raise def create_new_chat(conn: sqlite3.Connection, title: str, collection_id: Optional[int] = None) -> Optional[int]: """Create a new chat session.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' INSERT INTO chats (title, collection_id, created_at, last_updated) VALUES (?, ?, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP) ''', (title, collection_id)) conn.commit() return cursor.lastrowid except sqlite3.Error as e: st.error(f"Error creating new chat: {e}") return None def save_chat_message(conn: sqlite3.Connection, chat_id: int, role: str, content: str, metadata: Optional[Dict] = None) -> Optional[int]: """Save a chat message to the database.""" try: with conn_lock: cursor = conn.cursor() # Convert metadata to JSON string if present metadata_str = json.dumps(metadata) if metadata else None # Insert message cursor.execute(''' INSERT INTO chat_messages (chat_id, role, content, metadata, timestamp) VALUES (?, ?, ?, ?, CURRENT_TIMESTAMP) ''', (chat_id, role, content, metadata_str)) # Update chat last_updated timestamp cursor.execute(''' UPDATE chats SET last_updated = CURRENT_TIMESTAMP WHERE id = ? ''', (chat_id,)) conn.commit() return cursor.lastrowid except sqlite3.Error as e: st.error(f"Error saving chat message: {e}") return None def get_all_chats(conn: sqlite3.Connection) -> List[Dict]: """Retrieve all chat sessions.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT c.id, c.title, c.created_at, c.last_updated, c.collection_id, COUNT(m.id) as message_count, MAX(m.timestamp) as last_message FROM chats c LEFT JOIN chat_messages m ON c.id = m.chat_id GROUP BY c.id ORDER BY c.last_updated DESC ''') chats = [] for row in cursor.fetchall(): chats.append({ 'id': row[0], 'title': row[1], 'created_at': row[2], 'last_updated': row[3], 'collection_id': row[4], 'message_count': row[5], 'last_message': row[6] }) return chats except sqlite3.Error as e: st.error(f"Error retrieving chats: {e}") return [] def get_chat_messages(conn: sqlite3.Connection, chat_id: int) -> List[Dict]: """Retrieve all messages for a specific chat.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT id, role, content, metadata, timestamp FROM chat_messages WHERE chat_id = ? ORDER BY timestamp ''', (chat_id,)) messages = [] for row in cursor.fetchall(): # Parse metadata JSON if present metadata = json.loads(row[3]) if row[3] else None # Convert to appropriate message type if row[1] == 'human': message = HumanMessage(content=row[2]) else: message = AIMessage(content=row[2], additional_kwargs={'metadata': metadata}) messages.append(message) return messages except sqlite3.Error as e: st.error(f"Error retrieving chat messages: {e}") return [] def delete_chat(conn: sqlite3.Connection, chat_id: int) -> bool: """Delete a chat session and all its messages.""" try: with conn_lock: cursor = conn.cursor() # Messages will be automatically deleted due to CASCADE cursor.execute('DELETE FROM chats WHERE id = ?', (chat_id,)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error deleting chat: {e}") return False def update_chat_title(conn: sqlite3.Connection, chat_id: int, new_title: str) -> bool: """Update the title of a chat session.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' UPDATE chats SET title = ?, last_updated = CURRENT_TIMESTAMP WHERE id = ? ''', (new_title, chat_id)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error updating chat title: {e}") return False def get_chat_by_id(conn: sqlite3.Connection, chat_id: int) -> Optional[Dict]: """Retrieve a specific chat session by ID.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT c.id, c.title, c.created_at, c.last_updated, c.collection_id, COUNT(m.id) as message_count FROM chats c LEFT JOIN chat_messages m ON c.id = m.chat_id WHERE c.id = ? GROUP BY c.id ''', (chat_id,)) row = cursor.fetchone() if row: return { 'id': row[0], 'title': row[1], 'created_at': row[2], 'last_updated': row[3], 'collection_id': row[4], 'message_count': row[5] } return None except sqlite3.Error as e: st.error(f"Error retrieving chat: {e}") return None def export_chat_history(conn: sqlite3.Connection, chat_id: int) -> Optional[Dict]: """Export a chat session with all its messages.""" try: chat = get_chat_by_id(conn, chat_id) if not chat: return None messages = get_chat_messages(conn, chat_id) return { 'chat_info': chat, 'messages': [ { 'role': 'human' if isinstance(msg, HumanMessage) else 'assistant', 'content': msg.content, 'metadata': msg.additional_kwargs.get('metadata') if isinstance(msg, AIMessage) else None } for msg in messages ] } except Exception as e: st.error(f"Error exporting chat history: {e}") return None def import_chat_history(conn: sqlite3.Connection, chat_data: Dict) -> Optional[int]: """Import a chat session from exported data.""" try: with conn_lock: # Create new chat chat_id = create_new_chat( conn, chat_data['chat_info']['title'], chat_data['chat_info'].get('collection_id') ) if not chat_id: return None # Import messages for msg in chat_data['messages']: save_chat_message( conn, chat_id, msg['role'], msg['content'], msg.get('metadata') ) return chat_id except Exception as e: st.error(f"Error importing chat history: {e}") return None # utils/database.py def create_chat_tables(conn): """Create tables for chat management.""" cursor = conn.cursor() cursor.execute(''' CREATE TABLE IF NOT EXISTS chats ( id INTEGER PRIMARY KEY AUTOINCREMENT, title TEXT NOT NULL, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS chat_messages ( id INTEGER PRIMARY KEY AUTOINCREMENT, chat_id INTEGER, role TEXT NOT NULL, content TEXT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (chat_id) REFERENCES chats (id) ON DELETE CASCADE ) ''') conn.commit() def save_chat(conn, chat_title: str, messages: List[Dict]): """Save chat history.""" cursor = conn.cursor() cursor.execute('INSERT INTO chats (title) VALUES (?)', (chat_title,)) chat_id = cursor.lastrowid for msg in messages: cursor.execute(''' INSERT INTO chat_messages (chat_id, role, content) VALUES (?, ?, ?) ''', (chat_id, msg['role'], msg['content'])) conn.commit() return chat_id # components/chat.py def display_chat_manager(): """Display chat management interface.""" st.sidebar.markdown("### Chat Management") # New chat button if st.sidebar.button("New Chat"): st.session_state.messages = [] st.session_state.current_chat_id = None # Save current chat if st.session_state.messages and st.sidebar.button("Save Chat"): chat_title = st.sidebar.text_input("Chat Title", value=f"Chat {datetime.now().strftime('%Y-%m-%d %H:%M')}") if chat_title: save_chat(st.session_state.db_conn, chat_title, st.session_state.messages) st.sidebar.success("Chat saved!") # Load previous chats chats = get_all_chats(st.session_state.db_conn) if chats: st.sidebar.markdown("### Previous Chats") for chat in chats: if st.sidebar.button(f"📜 {chat['title']}", key=f"chat_{chat['id']}"): st.session_state.messages = get_chat_messages(st.session_state.db_conn, chat['id']) st.session_state.current_chat_id = chat['id'] st.rerun() def add_annotation(conn: sqlite3.Connection, document_id: int, annotation: str, page_number: Optional[int] = None) -> bool: """Add an annotation to a document.""" try: with conn_lock: cursor = conn.cursor() cursor.execute(''' INSERT INTO annotations (document_id, annotation, page_number) VALUES (?, ?, ?) ''', (document_id, annotation, page_number)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error adding annotation: {e}") return False def create_tables(conn): """ Create necessary tables in the database. Args: conn (sqlite3.Connection): SQLite database connection. """ try: sql_create_documents_table = ''' CREATE TABLE IF NOT EXISTS documents ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, content TEXT NOT NULL, upload_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP ); ''' sql_create_queries_table = ''' CREATE TABLE IF NOT EXISTS queries ( id INTEGER PRIMARY KEY AUTOINCREMENT, query TEXT NOT NULL, response TEXT NOT NULL, document_id INTEGER, query_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (document_id) REFERENCES documents (id) ); ''' sql_create_annotations_table = ''' CREATE TABLE IF NOT EXISTS annotations ( id INTEGER PRIMARY KEY AUTOINCREMENT, document_id INTEGER NOT NULL, annotation TEXT NOT NULL, page_number INTEGER, annotation_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (document_id) REFERENCES documents (id) ); ''' conn.execute(sql_create_documents_table) conn.execute(sql_create_queries_table) conn.execute(sql_create_annotations_table) except Error as e: st.error(f"Error: {e}") def get_documents(conn): """ Retrieve all documents from the database. Args: conn (sqlite3.Connection): SQLite database connection. Returns: tuple: (list of document contents, list of document names). """ try: with conn_lock: cursor = conn.cursor() cursor.execute("SELECT content, name FROM documents") results = cursor.fetchall() if not results: return [], [] # Separate contents and names document_contents = [row[0] for row in results] document_names = [row[1] for row in results] return document_contents, document_names except Error as e: st.error(f"Error retrieving documents: {e}") return [], [] def insert_document(conn, name, content): """ Insert a new document into the database. Args: conn (sqlite3.Connection): SQLite database connection. name (str): Name of the document. content (str): Content of the document. Returns: int: ID of the inserted document, or None if insertion failed. """ try: with conn_lock: cursor = conn.cursor() sql = '''INSERT INTO documents (name, content) VALUES (?, ?)''' cursor.execute(sql, (name, content)) conn.commit() return cursor.lastrowid except Error as e: st.error(f"Error inserting document: {e}") return None def verify_database_tables(conn): """Verify that all required tables exist.""" try: cursor = conn.cursor() cursor.execute("SELECT name FROM sqlite_master WHERE type='table'") tables = [table[0] for table in cursor.fetchall()] # If collections table doesn't exist, force recreate it if 'collections' not in tables: if not force_recreate_collections_tables(conn): st.error("Failed to recreate collections tables!") return 'collections' in tables except Exception as e: st.error(f"Error verifying tables: {e}") return False def verify_vector_store(vector_store): """ Verify that the vector store has documents loaded. Args: vector_store (FAISS): FAISS vector store instance. Returns: bool: True if vector store is properly initialized with documents. """ try: # Try to perform a simple similarity search test_results = vector_store.similarity_search("test", k=1) return len(test_results) > 0 except Exception as e: st.error(f"Vector store verification failed: {e}") return False def handle_document_upload(uploaded_files, **kwargs): """ Handle document upload with progress tracking and collection support. Args: uploaded_files (list): List of uploaded files **kwargs: Additional arguments including: - collection_id (int, optional): ID of the collection to add documents to """ try: # Initialize session state variables if they don't exist if 'qa_system' not in st.session_state: st.session_state.qa_system = None if 'vector_store' not in st.session_state: st.session_state.vector_store = None # Create progress containers progress_container = st.empty() status_container = st.empty() details_container = st.empty() # Initialize progress bar progress_bar = progress_container.progress(0) status_container.info("🔄 Initializing document processing...") # Reset existing states st.session_state.vector_store = None st.session_state.qa_system = None # Initialize embeddings (10% progress) status_container.info("🔄 Initializing embeddings model...") embeddings = get_embeddings_model() if not embeddings: status_container.error("❌ Failed to initialize embeddings model") return False progress_bar.progress(10) # Process documents all_chunks = [] documents = [] document_names = [] progress_per_file = 70 / len(uploaded_files) current_progress = 10 collection_id = kwargs.get('collection_id') for idx, uploaded_file in enumerate(uploaded_files): file_name = uploaded_file.name status_container.info(f"🔄 Processing document {idx + 1}/{len(uploaded_files)}: {file_name}") # Create temporary file with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file: tmp_file.write(uploaded_file.getvalue()) tmp_file.flush() # Process document with chunking chunks, content = process_document(tmp_file.name) # Store in database doc_id = insert_document(st.session_state.db_conn, file_name, content) if not doc_id: status_container.error(f"❌ Failed to store document: {file_name}") continue # Add to collection if specified if collection_id: if not add_document_to_collection(st.session_state.db_conn, doc_id, collection_id): status_container.warning(f"âš ī¸ Failed to add document to collection: {file_name}") # Add chunks with metadata for chunk in chunks: chunk.metadata.update({ "source": file_name, "document_id": doc_id, "collection_id": collection_id if collection_id else None }) all_chunks.extend(chunks) documents.append(content) document_names.append(file_name) current_progress += progress_per_file progress_bar.progress(int(current_progress)) # Initialize vector store with chunks status_container.info("🔄 Initializing vector store...") vector_store = FAISS.from_documents( all_chunks, embeddings ) # Verify vector store status_container.info("🔄 Verifying document indexing...") details_container.text("✨ Performing final checks...") if not verify_vector_store(vector_store): status_container.error("❌ Vector store verification failed") return False # Initialize QA system (90-100% progress) status_container.info("🔄 Setting up QA system...") qa_system = initialize_qa_system(vector_store) if not qa_system: status_container.error("❌ Failed to initialize QA system") return False # Store in session state if collection_id: if 'vector_stores' not in st.session_state: st.session_state.vector_stores = {} st.session_state.vector_stores[collection_id] = vector_store else: st.session_state.vector_store = vector_store st.session_state.qa_system = qa_system # Complete! progress_bar.progress(100) status_container.success("✅ Documents processed successfully!") details_container.markdown( f""" 🎉 **Ready to chat!** - Documents processed: {len(documents)} - Total content size: {sum(len(doc) for doc in documents) / 1024:.2f} KB - {"Added to collection" if collection_id else "Processed as standalone documents"} You can now start asking questions about your documents! """ ) # Add notification st.balloons() # Clean up progress display after 3 seconds time.sleep(3) progress_container.empty() status_container.empty() details_container.empty() return True except Exception as e: st.error(f"❌ Error processing documents: {str(e)}") if status_container: status_container.error(traceback.format_exc()) # Reset states on error st.session_state.vector_store = None st.session_state.qa_system = None st.session_state.chat_ready = False return False # Add these to your database.py file def remove_from_collection(conn: sqlite3.Connection, document_id: int, collection_id: int) -> bool: """ Remove a document from a collection. Args: conn (sqlite3.Connection): Database connection document_id (int): ID of the document to remove collection_id (int): ID of the collection Returns: bool: True if successful """ try: with conn_lock: cursor = conn.cursor() cursor.execute(''' DELETE FROM document_collections WHERE document_id = ? AND collection_id = ? ''', (document_id, collection_id)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error removing document from collection: {e}") return False def update_collection(conn: sqlite3.Connection, collection_id: int, name: Optional[str] = None, description: Optional[str] = None) -> bool: """ Update collection details. Args: conn (sqlite3.Connection): Database connection collection_id (int): ID of the collection to update name (Optional[str]): New name for the collection description (Optional[str]): New description for the collection Returns: bool: True if successful """ try: with conn_lock: updates = [] params = [] if name is not None: updates.append("name = ?") params.append(name) if description is not None: updates.append("description = ?") params.append(description) if not updates: return True # Nothing to update params.append(collection_id) cursor = conn.cursor() cursor.execute(f''' UPDATE collections SET {", ".join(updates)} WHERE id = ? ''', params) conn.commit() return True except sqlite3.Error as e: st.error(f"Error updating collection: {e}") return False def search_documents(conn: sqlite3.Connection, query: str, collection_id: Optional[int] = None, filters: Optional[Dict] = None) -> List[Dict]: """ Search documents using fuzzy matching and filters. Args: conn (sqlite3.Connection): Database connection query (str): Search query collection_id (Optional[int]): Filter by collection filters (Optional[Dict]): Additional filters Returns: List[Dict]: List of matching documents """ try: with conn_lock: cursor = conn.cursor() # Base query sql = """ SELECT DISTINCT d.id, d.name, d.content, d.upload_date, GROUP_CONCAT(c.name) as collections FROM documents d LEFT JOIN document_collections dc ON d.id = dc.document_id LEFT JOIN collections c ON dc.collection_id = c.id """ params = [] where_clauses = [] # Add collection filter if specified if collection_id: where_clauses.append("dc.collection_id = ?") params.append(collection_id) # Add date filters if specified if filters and 'date_range' in filters: start_date, end_date = filters['date_range'] where_clauses.append("d.upload_date BETWEEN ? AND ?") params.extend([start_date, end_date]) # Add text search if query: where_clauses.append("(d.name LIKE ? OR d.content LIKE ?)") search_term = f"%{query}%" params.extend([search_term, search_term]) # Combine WHERE clauses if where_clauses: sql += " WHERE " + " AND ".join(where_clauses) sql += " GROUP BY d.id ORDER BY d.upload_date DESC" # Execute query cursor.execute(sql, params) documents = [] for row in cursor.fetchall(): documents.append({ 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3], 'collections': row[4].split(',') if row[4] else [] }) return documents except sqlite3.Error as e: st.error(f"Error searching documents: {e}") return [] def get_all_documents(conn: sqlite3.Connection) -> List[Dict]: """ Get all documents with their metadata and collection info. Args: conn (sqlite3.Connection): Database connection Returns: List[Dict]: List of documents with their metadata """ try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date, GROUP_CONCAT(c.name) as collections FROM documents d LEFT JOIN document_collections dc ON d.id = dc.document_id LEFT JOIN collections c ON dc.collection_id = c.id GROUP BY d.id ORDER BY d.upload_date DESC ''') documents = [] for row in cursor.fetchall(): documents.append({ 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3], 'collections': row[4].split(',') if row[4] else [] }) return documents except sqlite3.Error as e: st.error(f"Error retrieving documents: {e}") return [] def get_document_by_id(conn: sqlite3.Connection, document_id: int) -> Optional[Dict]: """ Get a single document by its ID. Args: conn (sqlite3.Connection): Database connection document_id (int): ID of the document to retrieve Returns: Optional[Dict]: Document data if found, None otherwise """ try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date, GROUP_CONCAT(c.name) as collections FROM documents d LEFT JOIN document_collections dc ON d.id = dc.document_id LEFT JOIN collections c ON dc.collection_id = c.id WHERE d.id = ? GROUP BY d.id ''', (document_id,)) row = cursor.fetchone() if row: return { 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3], 'collections': row[4].split(',') if row[4] else [] } return None except sqlite3.Error as e: st.error(f"Error retrieving document: {e}") return None def get_recent_documents(conn: sqlite3.Connection, limit: int = 5) -> List[Dict]: """ Get most recently uploaded documents. Args: conn (sqlite3.Connection): Database connection limit (int): Maximum number of documents to return Returns: List[Dict]: List of recent documents """ try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date, GROUP_CONCAT(c.name) as collections FROM documents d LEFT JOIN document_collections dc ON d.id = dc.document_id LEFT JOIN collections c ON dc.collection_id = c.id GROUP BY d.id ORDER BY d.upload_date DESC LIMIT ? ''', (limit,)) documents = [] for row in cursor.fetchall(): documents.append({ 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3], 'collections': row[4].split(',') if row[4] else [] }) return documents except sqlite3.Error as e: st.error(f"Error retrieving recent documents: {e}") return [] def get_collections(conn: sqlite3.Connection) -> List[Dict]: """ Get all collections with their document counts. Args: conn (sqlite3.Connection): Database connection Returns: List[Dict]: List of collections with metadata """ try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT c.id, c.name, c.description, c.created_at, COUNT(DISTINCT dc.document_id) as doc_count FROM collections c LEFT JOIN document_collections dc ON c.id = dc.collection_id GROUP BY c.id ORDER BY c.name ''') collections = [] for row in cursor.fetchall(): collections.append({ 'id': row[0], 'name': row[1], 'description': row[2], 'created_at': row[3], 'doc_count': row[4] }) return collections except sqlite3.Error as e: st.error(f"Error retrieving collections: {e}") return [] def get_collection_documents(conn: sqlite3.Connection, collection_id: int) -> List[Dict]: """ Get all documents in a specific collection. Args: conn (sqlite3.Connection): Database connection collection_id (int): ID of the collection Returns: List[Dict]: List of documents in the collection """ try: with conn_lock: cursor = conn.cursor() cursor.execute(''' SELECT d.id, d.name, d.content, d.upload_date FROM documents d JOIN document_collections dc ON d.id = dc.document_id WHERE dc.collection_id = ? ORDER BY d.upload_date DESC ''', (collection_id,)) documents = [] for row in cursor.fetchall(): documents.append({ 'id': row[0], 'name': row[1], 'content': row[2], 'upload_date': row[3] }) return documents except sqlite3.Error as e: st.error(f"Error retrieving collection documents: {e}") return [] def create_collection(conn: sqlite3.Connection, name: str, description: str = "") -> Optional[int]: """ Create a new collection. Args: conn (sqlite3.Connection): Database connection name (str): Name of the collection description (str): Optional description Returns: Optional[int]: ID of the created collection if successful """ try: with conn_lock: cursor = conn.cursor() cursor.execute(''' INSERT INTO collections (name, description) VALUES (?, ?) ''', (name, description)) conn.commit() return cursor.lastrowid except sqlite3.Error as e: st.error(f"Error creating collection: {e}") return None def add_document_to_collection(conn: sqlite3.Connection, document_id: int, collection_id: int) -> bool: """ Add a document to a collection. Args: conn (sqlite3.Connection): Database connection document_id (int): ID of the document collection_id (int): ID of the collection Returns: bool: True if successful """ try: with conn_lock: cursor = conn.cursor() cursor.execute(''' INSERT OR IGNORE INTO document_collections (document_id, collection_id) VALUES (?, ?) ''', (document_id, collection_id)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error adding document to collection: {e}") return False def process_document(file_path): """ Process a PDF document with proper chunking. Args: file_path (str): Path to the PDF file Returns: tuple: (list of document chunks, full content of the document) """ # Load PDF loader = PyPDFLoader(file_path) documents = loader.load() # Create text splitter text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, length_function=len, separators=["\n\n", "\n", " ", ""] ) # Split documents into chunks chunks = text_splitter.split_documents(documents) # Extract full content for database storage full_content = "\n".join(doc.page_content for doc in documents) return chunks, full_content def delete_collection(conn: sqlite3.Connection, collection_id: int) -> bool: """Delete a collection and its associations.""" try: with conn_lock: cursor = conn.cursor() # Delete the collection's document associations first cursor.execute(''' DELETE FROM document_collections WHERE collection_id = ? ''', (collection_id,)) # Then delete the collection itself cursor.execute(''' DELETE FROM collections WHERE id = ? ''', (collection_id,)) conn.commit() return True except sqlite3.Error as e: st.error(f"Error deleting collection: {e}") return False def display_vector_store_info(): """ Display information about the current vector store state. """ if 'vector_store' not in st.session_state: st.info("â„šī¸ No documents loaded yet.") return try: # Get the vector store from session state vector_store = st.session_state.vector_store # Get basic stats test_query = vector_store.similarity_search("test", k=1) doc_count = len(test_query) # Create an expander for detailed info with st.expander("📊 Knowledge Base Status"): col1, col2 = st.columns(2) with col1: st.metric( label="Documents Loaded", value=doc_count ) with col2: st.metric( label="System Status", value="Ready" if verify_vector_store(vector_store) else "Not Ready" ) # Display sample queries if verify_vector_store(vector_store): st.markdown("### 🔍 Sample Document Snippets") sample_docs = vector_store.similarity_search("", k=3) for i, doc in enumerate(sample_docs, 1): with st.container(): st.markdown(f"**Snippet {i}:**") st.text(doc.page_content[:200] + "...") except Exception as e: st.error(f"Error displaying vector store info: {e}") st.error(traceback.format_exc()) def process_and_store_document(uploaded_file) -> Optional[int]: """ Process an uploaded document and store it in the database. Args: uploaded_file: Streamlit's UploadedFile object Returns: Optional[int]: The ID of the stored document if successful, None otherwise """ try: # Create a temporary file to store the uploaded content with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file: tmp_file.write(uploaded_file.getvalue()) tmp_file.flush() # Load and process the PDF loader = PyPDFLoader(tmp_file.name) documents = loader.load() # Create text splitter for processing text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, length_function=len, separators=["\n\n", "\n", " ", ""] ) # Split documents into chunks chunks = text_splitter.split_documents(documents) # Extract full content for database storage full_content = "\n".join(doc.page_content for doc in documents) # Store in database with st.session_state.db_conn as conn: cursor = conn.cursor() # Insert document cursor.execute(''' INSERT INTO documents (name, content, upload_date) VALUES (?, ?, ?) ''', (uploaded_file.name, full_content, datetime.now())) # Get the document ID document_id = cursor.lastrowid conn.commit() return document_id except Exception as e: st.error(f"Error processing document {uploaded_file.name}: {str(e)}") import traceback st.error(traceback.format_exc()) return None finally: # Clean up temporary file import os try: os.unlink(tmp_file.name) except: pass def get_document_content(conn: sqlite3.Connection, document_id: int) -> Optional[str]: """ Retrieve the content of a specific document. Args: conn: Database connection document_id: ID of the document to retrieve Returns: Optional[str]: The document content if found, None otherwise """ try: cursor = conn.cursor() cursor.execute(''' SELECT content FROM documents WHERE id = ? ''', (document_id,)) result = cursor.fetchone() return result[0] if result else None except sqlite3.Error as e: st.error(f"Error retrieving document content: {e}") return None def get_context_with_sources(retriever, query): """Get context with source documents.""" docs = retriever.get_relevant_documents(query) formatted_docs = [] for doc in docs: source = doc.metadata.get('source', 'Unknown source') formatted_docs.append(f"\nFrom {source}:\n{doc.page_content}") return "\n".join(formatted_docs) def format_chat_history(chat_history): """Format chat history for the prompt.""" if not chat_history or not isinstance(chat_history, list): return [] return [msg for msg in chat_history if isinstance(msg, (HumanMessage, AIMessage))] def initialize_qa_system(vector_store): """Initialize QA system with optimized retrieval.""" try: llm = ChatOpenAI( temperature=0.5, model_name="gpt-4", max_tokens=4000, api_key=os.environ.get("OPENAI_API_KEY") ) # Optimize retriever settings retriever = vector_store.as_retriever( search_kwargs={ "k": 3, "fetch_k": 5, "include_metadata": True } ) # Create system prompt template prompt = ChatPromptTemplate.from_messages([ ("system", """ You are an expert consultant specializing in analyzing Request for Proposal (RFP) documents. Your goal is to assist users by providing clear, concise, and professional insights based on the content provided. Please adhere to the following guidelines: Begin with a summary that highlights the key findings or answers the main query. Use clear section headers to organize information logically. Utilize bullet points for lists or complex information. Cite specific sections or page numbers from the RFP document when referencing information. Maintain professional formatting using Markdown. Keep responses focused and directly related to the query. Acknowledge when information falls outside the provided context. Use formal and professional language. Ensure accuracy and completeness in responses. """), MessagesPlaceholder(variable_name="chat_history"), ("human", "{input}\n\nContext: {context}") ]) # Create the chain chain = ( { "context": lambda x: get_context_with_sources(retriever, x["input"]), "chat_history": lambda x: format_chat_history(x["chat_history"]), "input": lambda x: x["input"] } | prompt | llm ) return chain except Exception as e: st.error(f"Error initializing QA system: {e}") return None # FAISS vector store initialization def initialize_faiss(embeddings, documents, document_names): """ Initialize FAISS vector store. Args: embeddings (Embeddings): Embeddings model to use. documents (list): List of document contents. document_names (list): List of document names. Returns: FAISS: FAISS vector store instance or None if initialization fails. """ try: from langchain.vectorstores import FAISS vector_store = FAISS.from_texts( documents, embeddings, metadatas=[{"source": name} for name in document_names], ) return vector_store except Exception as e: st.error(f"Error initializing FAISS: {e}") return None # Embeddings model retrieval @st.cache_resource def get_embeddings_model(): """ Get the embeddings model. Returns: Embeddings: Embeddings model instance or None if loading fails. """ try: from langchain.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-MiniLM-L6-v2" embeddings = HuggingFaceEmbeddings(model_name=model_name) return embeddings except Exception as e: st.error(f"Error loading embeddings model: {e}") return None