Update src/embeddings.py
Browse files- src/embeddings.py +29 -18
src/embeddings.py
CHANGED
|
@@ -2,6 +2,8 @@
|
|
| 2 |
# Hugging Face cache bootstrap
|
| 3 |
# ----------------------------
|
| 4 |
import os
|
|
|
|
|
|
|
| 5 |
|
| 6 |
CACHE_DIR = "/tmp/hf_cache"
|
| 7 |
os.makedirs(CACHE_DIR, exist_ok=True)
|
|
@@ -14,28 +16,37 @@ os.environ["HF_MODULES_CACHE"] = CACHE_DIR
|
|
| 14 |
print(f"β
Using Hugging Face cache at {CACHE_DIR}")
|
| 15 |
|
| 16 |
# ----------------------------
|
| 17 |
-
#
|
| 18 |
-
# ----------------------------
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
#
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# ----------------------------
|
| 30 |
# Function: generate embeddings
|
| 31 |
# ----------------------------
|
| 32 |
def generate_embeddings(chunks: list) -> list:
|
| 33 |
"""
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
chunks (list): List of text chunks.
|
| 37 |
-
Returns:
|
| 38 |
-
list: List of embedding vectors (plain Python lists).
|
| 39 |
"""
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
# Hugging Face cache bootstrap
|
| 3 |
# ----------------------------
|
| 4 |
import os
|
| 5 |
+
import numpy as np
|
| 6 |
+
from sentence_transformers import SentenceTransformer
|
| 7 |
|
| 8 |
CACHE_DIR = "/tmp/hf_cache"
|
| 9 |
os.makedirs(CACHE_DIR, exist_ok=True)
|
|
|
|
| 16 |
print(f"β
Using Hugging Face cache at {CACHE_DIR}")
|
| 17 |
|
| 18 |
# ----------------------------
|
| 19 |
+
# Load embedding model once (with fallback)
|
| 20 |
+
# ----------------------------
|
| 21 |
+
try:
|
| 22 |
+
_model = SentenceTransformer(
|
| 23 |
+
"intfloat/e5-small-v2", # β
Better for document QA retrieval
|
| 24 |
+
cache_folder=CACHE_DIR
|
| 25 |
+
)
|
| 26 |
+
print("β
Loaded model: intfloat/e5-small-v2")
|
| 27 |
+
except Exception as e:
|
| 28 |
+
print(f"β οΈ Model load failed ({e}), falling back to MiniLM.")
|
| 29 |
+
_model = SentenceTransformer(
|
| 30 |
+
"sentence-transformers/all-MiniLM-L6-v2",
|
| 31 |
+
cache_folder=CACHE_DIR
|
| 32 |
+
)
|
| 33 |
+
print("β
Loaded fallback model: all-MiniLM-L6-v2")
|
| 34 |
|
| 35 |
# ----------------------------
|
| 36 |
# Function: generate embeddings
|
| 37 |
# ----------------------------
|
| 38 |
def generate_embeddings(chunks: list) -> list:
|
| 39 |
"""
|
| 40 |
+
Generate normalized embeddings for a list of text chunks.
|
| 41 |
+
Normalization improves FAISS retrieval accuracy (cosine-based).
|
|
|
|
|
|
|
|
|
|
| 42 |
"""
|
| 43 |
+
# Add semantic prefix for e5 model to help it distinguish queries vs passages
|
| 44 |
+
prepared_chunks = [f"passage: {chunk.strip()}" for chunk in chunks]
|
| 45 |
+
|
| 46 |
+
vectors = _model.encode(
|
| 47 |
+
prepared_chunks,
|
| 48 |
+
convert_to_numpy=True,
|
| 49 |
+
normalize_embeddings=True # β
ensures better FAISS matching
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
return vectors.tolist()
|