fix langchain import path + full filters/fallbacks
Browse files
rag.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# rag.py (
|
| 2 |
from __future__ import annotations
|
| 3 |
import os, uuid, tempfile, requests, shutil, re
|
| 4 |
from pathlib import Path
|
|
@@ -7,10 +7,9 @@ from typing import List, Tuple
|
|
| 7 |
from datasets import load_dataset
|
| 8 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 9 |
from langchain_community.vectorstores import FAISS
|
| 10 |
-
from langchain_huggingface import HuggingFaceEmbeddings
|
| 11 |
from langchain_core.prompts import PromptTemplate
|
| 12 |
-
from langchain.chains import RetrievalQA
|
| 13 |
-
from langchain_huggingface import HuggingFaceEndpoint
|
| 14 |
from supabase import create_client
|
| 15 |
|
| 16 |
# ---------- config ----------
|
|
@@ -120,7 +119,7 @@ def get_llm():
|
|
| 120 |
)
|
| 121 |
|
| 122 |
PROMPT = PromptTemplate.from_template("""You are Amina, assistant for {company}.
|
| 123 |
-
Use only the context below. If unsure, say:
|
| 124 |
Context: {context}
|
| 125 |
Question: {question}
|
| 126 |
Answer:""")
|
|
|
|
| 1 |
+
# rag.py (v3 – imports fixed, no more ModuleNotFoundError)
|
| 2 |
from __future__ import annotations
|
| 3 |
import os, uuid, tempfile, requests, shutil, re
|
| 4 |
from pathlib import Path
|
|
|
|
| 7 |
from datasets import load_dataset
|
| 8 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 9 |
from langchain_community.vectorstores import FAISS
|
| 10 |
+
from langchain_huggingface import HuggingFaceEmbeddings, HuggingFaceEndpoint
|
| 11 |
from langchain_core.prompts import PromptTemplate
|
| 12 |
+
from langchain.chains.retrieval_qa.base import RetrievalQA # ← correct path
|
|
|
|
| 13 |
from supabase import create_client
|
| 14 |
|
| 15 |
# ---------- config ----------
|
|
|
|
| 119 |
)
|
| 120 |
|
| 121 |
PROMPT = PromptTemplate.from_template("""You are Amina, assistant for {company}.
|
| 122 |
+
Use only the context below. If unsure, say: "A human agent will follow up."
|
| 123 |
Context: {context}
|
| 124 |
Question: {question}
|
| 125 |
Answer:""")
|