JARVISXIRONMAN commited on
Commit
54f64f1
·
verified ·
1 Parent(s): d4f2501

Create rag/ingest.py

Browse files
Files changed (1) hide show
  1. rag/ingest.py +43 -0
rag/ingest.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import fitz # PyMuPDF
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain_community.vectorstores import Chroma
5
+ from langchain_community.embeddings import HuggingFaceEmbeddings
6
+
7
+ CHROMA_PATH = "data/chroma"
8
+ DOCS_PATH = "data/user_docs"
9
+
10
+ def extract_text_from_pdfs(folder_path):
11
+ all_text = ""
12
+ for filename in os.listdir(folder_path):
13
+ if filename.endswith(".pdf"):
14
+ file_path = os.path.join(folder_path, filename)
15
+ doc = fitz.open(file_path)
16
+ for page in doc:
17
+ all_text += page.get_text()
18
+ doc.close()
19
+ return all_text
20
+
21
+ def chunk_text(text, chunk_size=500, chunk_overlap=100):
22
+ splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
23
+ return splitter.create_documents([text])
24
+
25
+ def embed_and_store(chunks):
26
+ embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
27
+ db = Chroma.from_documents(documents=chunks, embedding=embedding_model, persist_directory=CHROMA_PATH)
28
+ db.persist()
29
+ print("✅ Embeddings stored successfully in ChromaDB.")
30
+
31
+ def run_ingest_pipeline():
32
+ print("📥 Extracting text from PDFs...")
33
+ raw_text = extract_text_from_pdfs(DOCS_PATH)
34
+
35
+ print("✂️ Splitting into chunks...")
36
+ chunks = chunk_text(raw_text)
37
+
38
+ print(f"🧠 Total chunks created: {len(chunks)}")
39
+ print("🔗 Embedding & saving into Chroma...")
40
+ embed_and_store(chunks)
41
+
42
+ if __name__ == "__main__":
43
+ run_ingest_pipeline()