{ "cells": [ { "cell_type": "code", "execution_count": 2, "id": "9304576f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "True" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import os\n", "from dotenv import load_dotenv\n", "load_dotenv()" ] }, { "cell_type": "code", "execution_count": 3, "id": "bbaf9dea", "metadata": {}, "outputs": [], "source": [ "from langchain_aws import ChatBedrockConverse\n", "LLM_MODEL_ID = \"us.meta.llama3-3-70b-instruct-v1:0\"\n", "LLM_REGION = \"us-east-1\"\n", "\n", "import logging\n", "llm = ChatBedrockConverse(\n", " model_id=LLM_MODEL_ID,\n", " region_name=LLM_REGION\n", ")\n", "logging.info(f\"LLM initialized with model_id={LLM_MODEL_ID}, region_name={LLM_REGION}\")" ] }, { "cell_type": "markdown", "id": "751de1c5", "metadata": {}, "source": [ "# Data Ingestion" ] }, { "cell_type": "code", "execution_count": 13, "id": "6dba7a05", "metadata": {}, "outputs": [], "source": [ "from langchain_community.document_loaders import TextLoader,DirectoryLoader\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", "\n", "from langchain_chroma import Chroma\n", "\n", "from langchain_ollama.embeddings import OllamaEmbeddings" ] }, { "cell_type": "code", "execution_count": 7, "id": "68a36c6c", "metadata": {}, "outputs": [], "source": [ "def load_documents(docs_path=\"docs\"):\n", " \"\"\"Load all text files from the docs directory\"\"\"\n", " print(f\"Loading documents from {docs_path}...\")\n", " \n", " # Check if docs directory exists\n", " if not os.path.exists(docs_path):\n", " raise FileNotFoundError(f\"The directory {docs_path} does not exist. Please create it and add your company files.\")\n", " \n", " # Load all .txt files from the docs directory\n", " loader = DirectoryLoader(\n", " path=docs_path,\n", " glob=\"*.txt\",\n", " loader_cls=TextLoader\n", " )\n", " \n", " documents = loader.load()\n", " \n", " if len(documents) == 0:\n", " raise FileNotFoundError(f\"No .txt files found in {docs_path}. Please add your company documents.\")\n", " \n", " \n", " for i, doc in enumerate(documents[:2]): # Show first 2 documents\n", " print(f\"\\nDocument {i+1}:\")\n", " print(f\" Source: {doc.metadata['source']}\")\n", " print(f\" Content length: {len(doc.page_content)} characters\")\n", " print(f\" Content preview: {doc.page_content[:100]}...\")\n", " print(f\" metadata: {doc.metadata}\")\n", "\n", " return documents" ] }, { "cell_type": "code", "execution_count": null, "id": "d4b05f9c", "metadata": {}, "outputs": [], "source": [ "def split_documents(documents, chunk_size=1000, chunk_overlap=0):\n", " \"\"\"Split documents into smaller chunks with overlap\"\"\"\n", " print(\"Splitting documents into chunks...\")\n", " \n", " text_splitter = RecursiveCharacterTextSplitter(\n", " chunk_size=chunk_size, \n", " chunk_overlap=chunk_overlap\n", " )\n", " \n", " chunks = text_splitter.split_documents(documents)\n", " \n", " if chunks:\n", " \n", " for i, chunk in enumerate(chunks[:5]):\n", " print(f\"\\n--- Chunk {i+1} ---\")\n", " print(f\"Source: {chunk.metadata['source']}\")\n", " print(f\"Length: {len(chunk.page_content)} characters\")\n", " print(f\"Content:\")\n", " print(chunk.page_content)\n", " print(\"-\" * 50)\n", " \n", " if len(chunks) > 5:\n", " print(f\"\\n... and {len(chunks) - 5} more chunks\")\n", " \n", " return chunks" ] }, { "cell_type": "code", "execution_count": 14, "id": "e9d2c201", "metadata": {}, "outputs": [], "source": [ "def create_vector_store(chunks, persist_directory=\"db/chroma_db\"):\n", " \"\"\"Create and persist ChromaDB vector store\"\"\"\n", " print(\"Creating embeddings and storing in ChromaDB...\")\n", " \n", " embedding_model = OllamaEmbeddings(model=\"gemma2:2b\")\n", " \n", " # Create ChromaDB vector store\n", " print(\"--- Creating vector store ---\")\n", " vectorstore = Chroma.from_documents(\n", " documents=chunks,\n", " embedding=embedding_model,\n", " persist_directory=persist_directory, \n", " collection_metadata={\"hnsw:space\": \"cosine\"}\n", " )\n", " print(\"--- Finished creating vector store ---\")\n", " \n", " print(f\"Vector store created and saved to {persist_directory}\")\n", " return vectorstore" ] }, { "cell_type": "code", "execution_count": 16, "id": "28dd89fe", "metadata": {}, "outputs": [], "source": [ "def main():\n", " \"\"\"Main ingestion pipeline\"\"\"\n", " print(\"=== RAG Document Ingestion Pipeline ===\\n\")\n", " \n", " # Define paths\n", " docs_path = \"data\"\n", " persistent_directory = \"db/chroma_db\"\n", " \n", " # Check if vector store already exists\n", " if os.path.exists(persistent_directory):\n", " print(\"āœ… Vector store already exists. No need to re-process documents.\")\n", " \n", " embedding_model = OllamaEmbeddings(model=\"gemma2:2b\")\n", " vectorstore = Chroma(\n", " persist_directory=persistent_directory,\n", " embedding_function=embedding_model, \n", " collection_metadata={\"hnsw:space\": \"cosine\"}\n", " )\n", " print(f\"Loaded existing vector store with {vectorstore._collection.count()} documents\")\n", " return vectorstore\n", " \n", " print(\"Persistent directory does not exist. Initializing vector store...\\n\")\n", " \n", " # Step 1: Load documents\n", " documents = load_documents(docs_path) \n", "\n", " # Step 2: Split into chunks\n", " chunks = split_documents(documents)\n", " \n", " # # Step 3: Create vector store\n", " vectorstore = create_vector_store(chunks, persistent_directory)\n", " \n", " print(\"\\nāœ… Ingestion complete! Your documents are now ready for RAG queries.\")\n", " return vectorstore" ] }, { "cell_type": "code", "execution_count": 17, "id": "0bbabc0a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "=== RAG Document Ingestion Pipeline ===\n", "\n", "Persistent directory does not exist. Initializing vector store...\n", "\n", "Loading documents from data...\n" ] }, { "ename": "FileNotFoundError", "evalue": "The directory data does not exist. Please create it and add your company files.", "output_type": "error", "traceback": [ "\u001b[31m---------------------------------------------------------------------------\u001b[39m", "\u001b[31mFileNotFoundError\u001b[39m Traceback (most recent call last)", "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[17]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m vectordb=\u001b[43mmain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 2\u001b[39m vectordb\n", "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[16]\u001b[39m\u001b[32m, line 25\u001b[39m, in \u001b[36mmain\u001b[39m\u001b[34m()\u001b[39m\n\u001b[32m 22\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mPersistent directory does not exist. Initializing vector store...\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 24\u001b[39m \u001b[38;5;66;03m# Step 1: Load documents\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m25\u001b[39m documents = \u001b[43mload_documents\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdocs_path\u001b[49m\u001b[43m)\u001b[49m \n\u001b[32m 27\u001b[39m \u001b[38;5;66;03m# Step 2: Split into chunks\u001b[39;00m\n\u001b[32m 28\u001b[39m chunks = split_documents(documents)\n", "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[7]\u001b[39m\u001b[32m, line 7\u001b[39m, in \u001b[36mload_documents\u001b[39m\u001b[34m(docs_path)\u001b[39m\n\u001b[32m 5\u001b[39m \u001b[38;5;66;03m# Check if docs directory exists\u001b[39;00m\n\u001b[32m 6\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m os.path.exists(docs_path):\n\u001b[32m----> \u001b[39m\u001b[32m7\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mFileNotFoundError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mThe directory \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdocs_path\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m does not exist. Please create it and add your company files.\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 9\u001b[39m \u001b[38;5;66;03m# Load all .txt files from the docs directory\u001b[39;00m\n\u001b[32m 10\u001b[39m loader = DirectoryLoader(\n\u001b[32m 11\u001b[39m path=docs_path,\n\u001b[32m 12\u001b[39m glob=\u001b[33m\"\u001b[39m\u001b[33m*.txt\u001b[39m\u001b[33m\"\u001b[39m,\n\u001b[32m 13\u001b[39m loader_cls=TextLoader\n\u001b[32m 14\u001b[39m )\n", "\u001b[31mFileNotFoundError\u001b[39m: The directory data does not exist. Please create it and add your company files." ] } ], "source": [ "vectordb=main()\n", "vectordb" ] }, { "cell_type": "code", "execution_count": 18, "id": "cb14033a", "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'embedding_model' is not defined", "output_type": "error", "traceback": [ "\u001b[31m---------------------------------------------------------------------------\u001b[39m", "\u001b[31mNameError\u001b[39m Traceback (most recent call last)", "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[18]\u001b[39m\u001b[32m, line 11\u001b[39m\n\u001b[32m 4\u001b[39m load_dotenv()\n\u001b[32m 6\u001b[39m persistent_directory = \u001b[33m\"\u001b[39m\u001b[33mdb/chroma_db\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 9\u001b[39m db = Chroma(\n\u001b[32m 10\u001b[39m persist_directory=persistent_directory,\n\u001b[32m---> \u001b[39m\u001b[32m11\u001b[39m embedding_function=\u001b[43membedding_model\u001b[49m,\n\u001b[32m 12\u001b[39m collection_metadata={\u001b[33m\"\u001b[39m\u001b[33mhnsw:space\u001b[39m\u001b[33m\"\u001b[39m: \u001b[33m\"\u001b[39m\u001b[33mcosine\u001b[39m\u001b[33m\"\u001b[39m} \n\u001b[32m 13\u001b[39m )\n\u001b[32m 15\u001b[39m \u001b[38;5;66;03m# Search for relevant documents\u001b[39;00m\n\u001b[32m 16\u001b[39m query = \u001b[33m\"\u001b[39m\u001b[33mHow much did Microsoft pay to acquire GitHub?\u001b[39m\u001b[33m\"\u001b[39m\n", "\u001b[31mNameError\u001b[39m: name 'embedding_model' is not defined" ] } ], "source": [ "from langchain_chroma import Chroma\n", "from dotenv import load_dotenv\n", "\n", "load_dotenv()\n", "\n", "persistent_directory = \"db/chroma_db\"\n", "\n", "\n", "db = Chroma(\n", " persist_directory=persistent_directory,\n", " embedding_function=embedding_model,\n", " collection_metadata={\"hnsw:space\": \"cosine\"} \n", ")\n", "\n", "# Search for relevant documents\n", "query = \"How much did Microsoft pay to acquire GitHub?\"\n", "\n", "retriever = db.as_retriever(search_kwargs={\"k\": 5})\n", "\n", "# retriever = db.as_retriever(\n", "# search_type=\"similarity_score_threshold\",\n", "# search_kwargs={\n", "# \"k\": 5,\n", "# \"score_threshold\": 0.3 # Only return chunks with cosine similarity ≄ 0.3\n", "# }\n", "# )\n", "\n", "relevant_docs = retriever.invoke(query)\n", "\n", "print(f\"User Query: {query}\")\n", "# Display results\n", "print(\"--- Context ---\")\n", "for i, doc in enumerate(relevant_docs, 1):\n", " print(f\"Document {i}:\\n{doc.page_content}\\n\")\n", "\n", "\n", "# Synthetic Questions: \n", "\n", "# 1. \"What was NVIDIA's first graphics accelerator called?\"\n", "# 2. \"Which company did NVIDIA acquire to enter the mobile processor market?\"\n", "# 3. \"What was Microsoft's first hardware product release?\"\n", "# 4. \"How much did Microsoft pay to acquire GitHub?\"\n", "# 5. \"In what year did Tesla begin production of the Roadster?\"\n", "# 6. \"Who succeeded Ze'ev Drori as CEO in October 2008?\"\n", "# 7. \"What was the name of the autonomous spaceport drone ship that achieved the first successful sea landing?\"\n", "# 8. \"What was the original name of Microsoft before it became Microsoft?\"" ] }, { "cell_type": "code", "execution_count": 2, "id": "9b97a0c6", "metadata": {}, "outputs": [ { "ename": "ImportError", "evalue": "cannot import name 'TransfoXLModel' from 'transformers' (c:\\Users\\vansh\\Projects\\AIAgents\\.venv\\Lib\\site-packages\\transformers\\__init__.py)", "output_type": "error", "traceback": [ "\u001b[31m---------------------------------------------------------------------------\u001b[39m", "\u001b[31mImportError\u001b[39m Traceback (most recent call last)", "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01msummarizer\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m Summarizer\n\u001b[32m 3\u001b[39m document = \u001b[33m\"\"\"\u001b[39m\n\u001b[32m 4\u001b[39m \u001b[33mDeep learning is a subset of machine learning that uses neural networks.\u001b[39m\n\u001b[32m 5\u001b[39m \u001b[33mThese networks consist of multiple layers that can learn complex patterns.\u001b[39m\n\u001b[32m 6\u001b[39m \u001b[33mConvolutional neural networks are widely used in image recognition tasks.\u001b[39m\n\u001b[32m 7\u001b[39m \u001b[33m\"\"\"\u001b[39m\n\u001b[32m 9\u001b[39m model = Summarizer()\n", "\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\vansh\\Projects\\AIAgents\\.venv\\Lib\\site-packages\\summarizer\\__init__.py:1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01msummarizer\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mbert\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m Summarizer, TransformerSummarizer\n\u001b[32m 3\u001b[39m __all__ = [\u001b[33m\"\u001b[39m\u001b[33mSummarizer\u001b[39m\u001b[33m\"\u001b[39m, \u001b[33m\"\u001b[39m\u001b[33mTransformerSummarizer\u001b[39m\u001b[33m\"\u001b[39m]\n", "\u001b[36mFile \u001b[39m\u001b[32mc:\\Users\\vansh\\Projects\\AIAgents\\.venv\\Lib\\site-packages\\summarizer\\bert.py:4\u001b[39m\n\u001b[32m 1\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mfunctools\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m partial\n\u001b[32m 2\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mtyping\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m List, Optional, Union\n\u001b[32m----> \u001b[39m\u001b[32m4\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mtransformers\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m (AlbertModel, AlbertTokenizer, BartModel, BigBirdModel, BigBirdTokenizer,\n\u001b[32m 5\u001b[39m BartTokenizer, BertModel, BertTokenizer,\n\u001b[32m 6\u001b[39m CamembertModel, CamembertTokenizer, CTRLModel,\n\u001b[32m 7\u001b[39m CTRLTokenizer, DistilBertModel, DistilBertTokenizer,\n\u001b[32m 8\u001b[39m GPT2Model, GPT2Tokenizer, LongformerModel,\n\u001b[32m 9\u001b[39m LongformerTokenizer, OpenAIGPTModel,\n\u001b[32m 10\u001b[39m OpenAIGPTTokenizer, PreTrainedModel,\n\u001b[32m 11\u001b[39m PreTrainedTokenizer, RobertaModel, RobertaTokenizer,\n\u001b[32m 12\u001b[39m TransfoXLModel, TransfoXLTokenizer, XLMModel,\n\u001b[32m 13\u001b[39m XLMTokenizer, XLNetModel, XLNetTokenizer)\n\u001b[32m 15\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01msummarizer\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01msummary_processor\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m SummaryProcessor\n\u001b[32m 16\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01msummarizer\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mtext_processors\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01msentence_handler\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m SentenceHandler\n", "\u001b[31mImportError\u001b[39m: cannot import name 'TransfoXLModel' from 'transformers' (c:\\Users\\vansh\\Projects\\AIAgents\\.venv\\Lib\\site-packages\\transformers\\__init__.py)" ] } ], "source": [ "from summarizer import Summarizer\n", "\n", "document = \"\"\"\n", "Deep learning is a subset of machine learning that uses neural networks.\n", "These networks consist of multiple layers that can learn complex patterns.\n", "Convolutional neural networks are widely used in image recognition tasks.\n", "\"\"\"\n", "\n", "model = Summarizer()\n", "\n", "summary = model(document, ratio=0.3)\n", "\n", "print(summary)" ] }, { "cell_type": "code", "execution_count": null, "id": "b538f506", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "AIAgents (3.12.12)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.12" } }, "nbformat": 4, "nbformat_minor": 5 }