Update src/qa.py
Browse files
src/qa.py
CHANGED
|
@@ -1,16 +1,24 @@
|
|
| 1 |
import os
|
| 2 |
-
from sentence_transformers import SentenceTransformer
|
| 3 |
-
from transformers import pipeline
|
| 4 |
-
from vectorstore import search_faiss
|
| 5 |
-
|
| 6 |
-
print("✅ qa.py loaded from:", __file__)
|
| 7 |
|
|
|
|
| 8 |
# Force Hugging Face to use /tmp for cache
|
|
|
|
| 9 |
CACHE_DIR = "/tmp/huggingface"
|
|
|
|
|
|
|
| 10 |
os.environ["HF_HOME"] = CACHE_DIR
|
| 11 |
os.environ["TRANSFORMERS_CACHE"] = CACHE_DIR
|
| 12 |
os.environ["HF_DATASETS_CACHE"] = CACHE_DIR
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
# ----------------------------
|
| 15 |
# Query embedding model
|
| 16 |
# ----------------------------
|
|
@@ -24,7 +32,6 @@ _query_model = SentenceTransformer(
|
|
| 24 |
# ----------------------------
|
| 25 |
MODEL_NAME = "google/flan-t5-small"
|
| 26 |
|
| 27 |
-
# Make sure model downloads into /tmp
|
| 28 |
_answer_model = pipeline(
|
| 29 |
"text2text-generation",
|
| 30 |
model=MODEL_NAME,
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
# ----------------------------
|
| 4 |
# Force Hugging Face to use /tmp for cache
|
| 5 |
+
# ----------------------------
|
| 6 |
CACHE_DIR = "/tmp/huggingface"
|
| 7 |
+
os.makedirs(CACHE_DIR, exist_ok=True)
|
| 8 |
+
|
| 9 |
os.environ["HF_HOME"] = CACHE_DIR
|
| 10 |
os.environ["TRANSFORMERS_CACHE"] = CACHE_DIR
|
| 11 |
os.environ["HF_DATASETS_CACHE"] = CACHE_DIR
|
| 12 |
|
| 13 |
+
# ----------------------------
|
| 14 |
+
# Now import libraries (they'll respect env vars)
|
| 15 |
+
# ----------------------------
|
| 16 |
+
from sentence_transformers import SentenceTransformer
|
| 17 |
+
from transformers import pipeline
|
| 18 |
+
from vectorstore import search_faiss
|
| 19 |
+
|
| 20 |
+
print("✅ qa.py loaded from:", __file__)
|
| 21 |
+
|
| 22 |
# ----------------------------
|
| 23 |
# Query embedding model
|
| 24 |
# ----------------------------
|
|
|
|
| 32 |
# ----------------------------
|
| 33 |
MODEL_NAME = "google/flan-t5-small"
|
| 34 |
|
|
|
|
| 35 |
_answer_model = pipeline(
|
| 36 |
"text2text-generation",
|
| 37 |
model=MODEL_NAME,
|