Zubaish commited on
Commit ·
ebecac1
1
Parent(s): f6f60e8
update
Browse files- Dockerfile +3 -3
- rag.py +11 -45
- requirements.txt +3 -2
Dockerfile
CHANGED
|
@@ -1,7 +1,5 @@
|
|
| 1 |
FROM python:3.10-slim
|
| 2 |
-
|
| 3 |
WORKDIR /app
|
| 4 |
-
|
| 5 |
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
|
| 6 |
|
| 7 |
COPY requirements.txt .
|
|
@@ -10,6 +8,8 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
| 10 |
COPY app.py rag.py ingest.py config.py ./
|
| 11 |
COPY frontend ./frontend
|
| 12 |
|
| 13 |
-
|
|
|
|
| 14 |
|
|
|
|
| 15 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
|
| 1 |
FROM python:3.10-slim
|
|
|
|
| 2 |
WORKDIR /app
|
|
|
|
| 3 |
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/*
|
| 4 |
|
| 5 |
COPY requirements.txt .
|
|
|
|
| 8 |
COPY app.py rag.py ingest.py config.py ./
|
| 9 |
COPY frontend ./frontend
|
| 10 |
|
| 11 |
+
# This will now succeed because requirements.txt has langchain-chroma
|
| 12 |
+
RUN python ingest.py
|
| 13 |
|
| 14 |
+
EXPOSE 7860
|
| 15 |
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
rag.py
CHANGED
|
@@ -2,68 +2,34 @@
|
|
| 2 |
import os
|
| 3 |
from transformers import pipeline
|
| 4 |
from langchain_huggingface import HuggingFaceEmbeddings
|
| 5 |
-
from langchain_chroma import Chroma
|
| 6 |
from config import EMBEDDING_MODEL, LLM_MODEL, CHROMA_DIR
|
| 7 |
|
| 8 |
-
#
|
| 9 |
-
|
| 10 |
-
# -----------------------------
|
| 11 |
-
embeddings = HuggingFaceEmbeddings(
|
| 12 |
-
model_name=EMBEDDING_MODEL
|
| 13 |
-
)
|
| 14 |
|
| 15 |
-
#
|
| 16 |
-
# 2. Load Vector DB (Safe Loading)
|
| 17 |
-
# -----------------------------
|
| 18 |
-
# We expect the DB to be pre-built by ingest.py during Docker build
|
| 19 |
if os.path.exists(CHROMA_DIR) and os.listdir(CHROMA_DIR):
|
| 20 |
vectordb = Chroma(
|
| 21 |
persist_directory=CHROMA_DIR,
|
| 22 |
embedding_function=embeddings
|
| 23 |
)
|
| 24 |
-
print(f"✅ Vector DB loaded
|
| 25 |
else:
|
| 26 |
-
print(f"⚠️ Vector DB not found at {CHROMA_DIR}. Please check ingestion.")
|
| 27 |
vectordb = None
|
|
|
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
|
| 31 |
-
# -----------------------------
|
| 32 |
-
qa_pipeline = pipeline(
|
| 33 |
-
task="text-generation",
|
| 34 |
-
model=LLM_MODEL,
|
| 35 |
-
max_new_tokens=256
|
| 36 |
-
)
|
| 37 |
|
| 38 |
-
# -----------------------------
|
| 39 |
-
# 4. RAG Query Function
|
| 40 |
-
# -----------------------------
|
| 41 |
def ask_rag_with_status(question: str):
|
| 42 |
if vectordb is None:
|
| 43 |
-
return "Knowledge base is empty.
|
| 44 |
|
| 45 |
-
# Search for relevant context
|
| 46 |
docs = vectordb.similarity_search(question, k=3)
|
| 47 |
-
|
| 48 |
-
if not docs:
|
| 49 |
-
return "No relevant documents found in the knowledge base.", "NO_MATCH"
|
| 50 |
-
|
| 51 |
context = "\n\n".join(d.page_content for d in docs)
|
| 52 |
|
| 53 |
-
prompt = f"
|
| 54 |
-
|
| 55 |
-
Context:
|
| 56 |
-
{context}
|
| 57 |
-
|
| 58 |
-
Question:
|
| 59 |
-
{question}
|
| 60 |
-
|
| 61 |
-
Answer:"""
|
| 62 |
-
|
| 63 |
result = qa_pipeline(prompt)
|
| 64 |
|
| 65 |
-
|
| 66 |
-
full_text = result[0]["generated_text"]
|
| 67 |
-
answer = full_text.split("Answer:")[-1].strip()
|
| 68 |
-
|
| 69 |
-
return answer, ["Context retrieved", "LLM processed"]
|
|
|
|
| 2 |
import os
|
| 3 |
from transformers import pipeline
|
| 4 |
from langchain_huggingface import HuggingFaceEmbeddings
|
| 5 |
+
from langchain_chroma import Chroma # This requires langchain-chroma package
|
| 6 |
from config import EMBEDDING_MODEL, LLM_MODEL, CHROMA_DIR
|
| 7 |
|
| 8 |
+
# 1. Initialize Embeddings
|
| 9 |
+
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
# 2. Load Vector DB
|
|
|
|
|
|
|
|
|
|
| 12 |
if os.path.exists(CHROMA_DIR) and os.listdir(CHROMA_DIR):
|
| 13 |
vectordb = Chroma(
|
| 14 |
persist_directory=CHROMA_DIR,
|
| 15 |
embedding_function=embeddings
|
| 16 |
)
|
| 17 |
+
print(f"✅ Vector DB loaded")
|
| 18 |
else:
|
|
|
|
| 19 |
vectordb = None
|
| 20 |
+
print(f"⚠️ Vector DB not found")
|
| 21 |
|
| 22 |
+
# 3. LLM Pipeline
|
| 23 |
+
qa_pipeline = pipeline(task="text-generation", model=LLM_MODEL, max_new_tokens=256)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
|
|
|
|
|
|
|
|
|
| 25 |
def ask_rag_with_status(question: str):
|
| 26 |
if vectordb is None:
|
| 27 |
+
return "Knowledge base is empty.", "NO_KB"
|
| 28 |
|
|
|
|
| 29 |
docs = vectordb.similarity_search(question, k=3)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
context = "\n\n".join(d.page_content for d in docs)
|
| 31 |
|
| 32 |
+
prompt = f"Use the context to answer.\nContext:\n{context}\nQuestion:\n{question}\nAnswer:"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
result = qa_pipeline(prompt)
|
| 34 |
|
| 35 |
+
return result[0]["generated_text"].split("Answer:")[-1].strip(), "OK"
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -4,12 +4,13 @@ pydantic
|
|
| 4 |
python-dotenv
|
| 5 |
langchain>=0.2.17
|
| 6 |
langchain-community>=0.2.17
|
| 7 |
-
langchain-huggingface
|
|
|
|
| 8 |
langchain-text-splitters==0.2.4
|
| 9 |
chromadb==0.5.5
|
| 10 |
sentence-transformers
|
| 11 |
pypdf
|
| 12 |
-
pdfplumber
|
| 13 |
transformers>=4.39.0
|
| 14 |
huggingface_hub
|
| 15 |
datasets
|
|
|
|
| 4 |
python-dotenv
|
| 5 |
langchain>=0.2.17
|
| 6 |
langchain-community>=0.2.17
|
| 7 |
+
langchain-huggingface
|
| 8 |
+
langchain-chroma
|
| 9 |
langchain-text-splitters==0.2.4
|
| 10 |
chromadb==0.5.5
|
| 11 |
sentence-transformers
|
| 12 |
pypdf
|
| 13 |
+
pdfplumber
|
| 14 |
transformers>=4.39.0
|
| 15 |
huggingface_hub
|
| 16 |
datasets
|