rag-mixedbread / build_index.py
Kushalkhemka's picture
Upload rag_mixedbread scripts and artifacts
c980379 verified
#!/usr/bin/env python3
"""
Build a Chroma DB index of CVE chunks using Mixedbread embeddings.
"""
from __future__ import annotations
import argparse
import json
from pathlib import Path
from typing import Iterable, List, Sequence, Any
import numpy as np
import chromadb
from chromadb.config import Settings as ChromaSettings
from chromadb import errors as chroma_errors
from sentence_transformers import SentenceTransformer
from .config import Settings, load_settings
def parse_args() -> argparse.Namespace:
settings = load_settings()
parser = argparse.ArgumentParser(description="Build the Mixedbread CVE index.")
parser.add_argument(
"--corpus",
default=str(settings.corpus_path),
help="Path to the prepared CVE corpus JSONL.",
)
parser.add_argument(
"--batch-size",
type=int,
default=settings.embed_batch_size,
help="Number of texts per embedding batch.",
)
parser.add_argument(
"--normalize",
action="store_true",
help="L2-normalize embeddings before indexing.",
)
parser.add_argument(
"--reset",
action="store_true",
help="Drop the existing Chroma collection before re-building.",
)
return parser.parse_args()
def read_corpus(path: Path) -> Iterable[dict]:
with path.open("r", encoding="utf-8") as fh:
for line in fh:
if line.strip():
yield json.loads(line)
def chunk_iterable(items: Sequence[str], size: int) -> Iterable[Sequence[str]]:
for start in range(0, len(items), size):
yield items[start : start + size]
def load_embedding_model(settings: Settings) -> SentenceTransformer:
"""Load the Mixedbread embedding model locally."""
print(f"Loading embedding model: {settings.embed_model}")
model = SentenceTransformer(
settings.embed_model,
device=settings.device,
use_auth_token=settings.hf_token if settings.hf_token else None,
)
print(f"Model loaded on {settings.device}")
return model
def embed_batch(
texts: Sequence[str], model: SentenceTransformer, normalize: bool
) -> List[List[float]]:
"""Embed a batch of texts using the local model."""
embeddings = model.encode(
list(texts),
batch_size=len(texts),
show_progress_bar=False,
normalize_embeddings=normalize,
convert_to_numpy=True,
)
return embeddings.tolist()
def get_collection(settings: Settings, reset: bool):
chroma_settings = ChromaSettings(
is_persistent=True,
persist_directory=str(settings.chroma_dir),
)
client = chromadb.Client(chroma_settings)
if reset:
try:
client.delete_collection(settings.chroma_collection)
except chroma_errors.NotFoundError:
pass
collection = client.get_or_create_collection(
settings.chroma_collection,
metadata={"hnsw:space": "cosine"},
)
return client, collection
def coerce_metadata(meta: dict[str, Any]) -> dict[str, Any]:
"""Ensure metadata types are supported by Chroma."""
coerced: dict[str, Any] = {}
for key, value in (meta or {}).items():
if value is None:
continue
if isinstance(value, (str, int, float, bool)):
coerced[key] = value
elif isinstance(value, (list, tuple, set)):
coerced[key] = ", ".join(str(item) for item in value if item is not None)
elif isinstance(value, dict):
coerced[key] = json.dumps(value, sort_keys=True)
else:
coerced[key] = str(value)
return coerced
def build_index(
corpus_path: Path,
settings: Settings,
batch_size: int,
normalize: bool,
reset: bool,
) -> None:
if not corpus_path.exists():
raise FileNotFoundError(
f"Corpus not found at {corpus_path}. Run rag_mixedbread/prepare_cve_corpus.py first."
)
texts: List[str] = []
metadata: List[dict] = []
print("Loading corpus into memory...")
for record in read_corpus(corpus_path):
text = record["text"]
texts.append(text)
metadata.append(
{
"cve_id": record["cve_id"],
"chunk_id": record["chunk_id"],
"text": text,
**coerce_metadata(record.get("metadata") or {}),
}
)
if not texts:
raise RuntimeError("Corpus is empty; nothing to embed.")
# Load the embedding model locally
model = load_embedding_model(settings)
client, collection = get_collection(settings, reset=reset)
total = len(texts)
added = 0
for start in range(0, total, batch_size):
batch_texts = texts[start : start + batch_size]
batch_meta = metadata[start : start + batch_size]
# Embed using local model (normalization handled by sentence-transformers)
vectors = embed_batch(batch_texts, model, normalize=normalize)
matrix = np.array(vectors, dtype="float32")
ids = [
f"{meta['cve_id']}:{meta['chunk_id']}"
for meta in batch_meta
]
collection.add(
ids=ids,
documents=batch_texts,
metadatas=batch_meta,
embeddings=matrix.tolist(),
)
added += len(batch_texts)
print(f"Embedded & stored {added} / {total} chunks", end="\r")
print()
client.persist()
print(
f"Indexed {added} vectors into Chroma at {settings.chroma_dir} "
f"(collection: {settings.chroma_collection})."
)
def main() -> None:
args = parse_args()
settings = load_settings()
build_index(
Path(args.corpus),
settings,
args.batch_size,
args.normalize,
args.reset,
)
if __name__ == "__main__":
main()