clean final: HF Inference API embeddings (no disk)
Browse files
rag.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
from __future__ import annotations
|
| 2 |
import os, re, json, requests
|
| 3 |
from functools import lru_cache
|
|
@@ -5,7 +6,7 @@ from typing import List, Tuple
|
|
| 5 |
|
| 6 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 7 |
from langchain_community.vectorstores import FAISS
|
| 8 |
-
from langchain_huggingface import HuggingFaceEndpoint
|
| 9 |
from langchain_core.prompts import PromptTemplate
|
| 10 |
from langchain.chains import RetrievalQA
|
| 11 |
from supabase import create_client
|
|
@@ -24,7 +25,6 @@ HF_TOKEN = os.getenv("HF_TOKEN")
|
|
| 24 |
supabase = create_client(SUPABASE_URL, SUPABASE_KEY)
|
| 25 |
|
| 26 |
# ------------------------------------------------------------------ INTENT
|
| 27 |
-
import re
|
| 28 |
GREETING_RE = re.compile(r"\b(hi|hello|hey|good morning|good afternoon|good evening)\b", re.I)
|
| 29 |
THANKS_RE = re.compile(r"\b(thank|thanks|appreciate)\b", re.I)
|
| 30 |
BYE_RE = re.compile(r"\b(bye|goodbye|see you|later)\b", re.I)
|
|
@@ -94,14 +94,28 @@ def get_texts() -> List[str]:
|
|
| 94 |
@lru_cache(maxsize=1)
|
| 95 |
def get_vectorstore() -> FAISS:
|
| 96 |
texts = get_texts()
|
|
|
|
| 97 |
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
if not texts:
|
| 104 |
-
return FAISS.from_texts([""], embeddings) # dummy
|
| 105 |
|
| 106 |
splitter = RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=50)
|
| 107 |
docs = splitter.create_documents(texts, metadatas=[{"source": DATASET}] * len(texts))
|
|
@@ -111,17 +125,19 @@ def get_vectorstore() -> FAISS:
|
|
| 111 |
@lru_cache(maxsize=1)
|
| 112 |
def get_llm():
|
| 113 |
return HuggingFaceEndpoint(
|
| 114 |
-
repo_id=
|
| 115 |
temperature=0.1,
|
| 116 |
max_new_tokens=150,
|
| 117 |
huggingfacehub_api_token=HF_TOKEN
|
| 118 |
)
|
| 119 |
|
| 120 |
-
PROMPT = PromptTemplate.from_template("""
|
|
|
|
| 121 |
Use only the context below. If unsure, say: "A human agent will follow up."
|
| 122 |
Context: {context}
|
| 123 |
Question: {question}
|
| 124 |
-
Answer:
|
|
|
|
| 125 |
|
| 126 |
# ------------------------------------------------------------------ MAIN
|
| 127 |
def ask_question(phone: str, question: str) -> Tuple[str, List]:
|
|
@@ -153,4 +169,4 @@ def ask_question(phone: str, question: str) -> Tuple[str, List]:
|
|
| 153 |
|
| 154 |
def _save_chat(phone: str, q: str, a: str) -> None:
|
| 155 |
supabase.table("chat_memory").insert({"user_phone": phone, "role": "user", "message": q}).execute()
|
| 156 |
-
supabase.table("chat_memory").insert({"user_phone": phone, "role": "assistant", "message": a}).execute()
|
|
|
|
| 1 |
+
# rag.py – bullet-proof: online fetch with fallback on any error
|
| 2 |
from __future__ import annotations
|
| 3 |
import os, re, json, requests
|
| 4 |
from functools import lru_cache
|
|
|
|
| 6 |
|
| 7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 8 |
from langchain_community.vectorstores import FAISS
|
| 9 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
| 10 |
from langchain_core.prompts import PromptTemplate
|
| 11 |
from langchain.chains import RetrievalQA
|
| 12 |
from supabase import create_client
|
|
|
|
| 25 |
supabase = create_client(SUPABASE_URL, SUPABASE_KEY)
|
| 26 |
|
| 27 |
# ------------------------------------------------------------------ INTENT
|
|
|
|
| 28 |
GREETING_RE = re.compile(r"\b(hi|hello|hey|good morning|good afternoon|good evening)\b", re.I)
|
| 29 |
THANKS_RE = re.compile(r"\b(thank|thanks|appreciate)\b", re.I)
|
| 30 |
BYE_RE = re.compile(r"\b(bye|goodbye|see you|later)\b", re.I)
|
|
|
|
| 94 |
@lru_cache(maxsize=1)
|
| 95 |
def get_vectorstore() -> FAISS:
|
| 96 |
texts = get_texts()
|
| 97 |
+
embeddings = None
|
| 98 |
|
| 99 |
+
try:
|
| 100 |
+
# Try new API first
|
| 101 |
+
from langchain_huggingface import HuggingFaceInferenceAPIEmbeddings
|
| 102 |
+
embeddings = HuggingFaceInferenceAPIEmbeddings(
|
| 103 |
+
api_key=HF_TOKEN,
|
| 104 |
+
model_name="sentence-transformers/all-MiniLM-L6-v2"
|
| 105 |
+
)
|
| 106 |
+
except ImportError:
|
| 107 |
+
try:
|
| 108 |
+
# Fallback for updated version
|
| 109 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 110 |
+
embeddings = HuggingFaceEmbeddings(
|
| 111 |
+
model_name="sentence-transformers/all-MiniLM-L6-v2"
|
| 112 |
+
)
|
| 113 |
+
print("⚙️ Using HuggingFaceEmbeddings fallback")
|
| 114 |
+
except Exception as e:
|
| 115 |
+
raise RuntimeError(f"❌ Failed to load embeddings: {e}")
|
| 116 |
|
| 117 |
if not texts:
|
| 118 |
+
return FAISS.from_texts([""], embeddings) # dummy FAISS instance
|
| 119 |
|
| 120 |
splitter = RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=50)
|
| 121 |
docs = splitter.create_documents(texts, metadatas=[{"source": DATASET}] * len(texts))
|
|
|
|
| 125 |
@lru_cache(maxsize=1)
|
| 126 |
def get_llm():
|
| 127 |
return HuggingFaceEndpoint(
|
| 128 |
+
repo_id=LLM_MODEL,
|
| 129 |
temperature=0.1,
|
| 130 |
max_new_tokens=150,
|
| 131 |
huggingfacehub_api_token=HF_TOKEN
|
| 132 |
)
|
| 133 |
|
| 134 |
+
PROMPT = PromptTemplate.from_template("""
|
| 135 |
+
You are Amina, assistant for {company}.
|
| 136 |
Use only the context below. If unsure, say: "A human agent will follow up."
|
| 137 |
Context: {context}
|
| 138 |
Question: {question}
|
| 139 |
+
Answer:
|
| 140 |
+
""")
|
| 141 |
|
| 142 |
# ------------------------------------------------------------------ MAIN
|
| 143 |
def ask_question(phone: str, question: str) -> Tuple[str, List]:
|
|
|
|
| 169 |
|
| 170 |
def _save_chat(phone: str, q: str, a: str) -> None:
|
| 171 |
supabase.table("chat_memory").insert({"user_phone": phone, "role": "user", "message": q}).execute()
|
| 172 |
+
supabase.table("chat_memory").insert({"user_phone": phone, "role": "assistant", "message": a}).execute()
|