bible-embeddings / main.py
Ken Powers
Add bge-m3
ea657c1
#!/usr/bin/env python3
"""
Interactive Bible verse embedding generator.
This script generates embeddings for Bible verses using various embedding models.
It supports both commercial APIs and open-source models.
"""
import os
# Fix OpenMP duplicate library issue
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# Note: Removed global offline mode to allow models with custom code (like nomic-ai) to work
# These models need to download custom tokenizers and configurations that aren't in standard transformers
# os.environ["HF_HUB_OFFLINE"] = "1"
# os.environ["TRANSFORMERS_OFFLINE"] = "1"
import json
import asyncio
from asyncio import Semaphore
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple
from abc import ABC, abstractmethod
import argparse
# Lazy import heavy libraries to avoid conflicts
# import numpy as np # Will be imported in functions that need it
# import yaml # Will be imported in functions that need it
# import faiss # Will be imported in functions that need it
import csv
from datetime import datetime
class EmbeddingProvider(ABC):
"""Abstract base class for embedding providers."""
@abstractmethod
def get_name(self) -> str:
"""Return the model name for file naming."""
pass
@abstractmethod
async def embed_text(self, text: str) -> List[float]:
"""Generate embedding for a single text."""
pass
@abstractmethod
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for a batch of texts."""
pass
async def embed_query(self, query: str) -> List[float]:
"""Generate embedding for a query. Base implementation falls back to embed_text."""
return await self.embed_text(query)
class OpenAIProvider(EmbeddingProvider):
"""OpenAI embedding provider."""
def __init__(self, model_name: str = "text-embedding-3-small"):
self.model_name = model_name
self.client = None
def get_name(self) -> str:
return self.model_name
async def _get_client(self):
if self.client is None:
from openai import AsyncOpenAI
self.client = AsyncOpenAI()
return self.client
async def embed_text(self, text: str) -> List[float]:
embeddings = await self.embed_batch([text])
return embeddings[0]
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
client = await self._get_client()
response = await client.embeddings.create(input=texts, model=self.model_name)
# OpenAI embeddings are automatically normalized to unit length
return [data.embedding for data in response.data]
async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]:
"""Efficiently embed multiple queries using OpenAI's batch API capability."""
all_embeddings = []
# Process in batches to respect API limits
for i in range(0, len(queries), batch_size):
batch = queries[i:i + batch_size]
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
try:
batch_embeddings = await self.embed_batch(batch)
all_embeddings.extend(batch_embeddings)
except Exception as e:
print(f"Error in batch embedding: {e}")
# Fall back to individual embedding for this batch
for query in batch:
try:
embedding = await self.embed_text(query)
all_embeddings.append(embedding)
except Exception as e2:
print(f"Error embedding query '{query}': {e2}")
# Add zero embedding as placeholder
all_embeddings.append([0.0] * 1536) # Default dimension for text-embedding models
return all_embeddings
class GeminiProvider(EmbeddingProvider):
"""Google Gemini embedding provider."""
def __init__(self, model_name: str = "text-embedding-004"):
self.model_name = model_name
self.client = None
def get_name(self) -> str:
return self.model_name
async def _get_client(self):
if self.client is None:
import google.generativeai as genai
self.client = genai
genai.configure()
return self.client
async def embed_text(self, text: str) -> List[float]:
embeddings = await self.embed_batch([text])
return embeddings[0]
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
client = await self._get_client()
result = client.embed_content(model=f"models/{self.model_name}", content=texts)
# Gemini embeddings are automatically normalized to unit length
# The result is a dictionary with an 'embedding' key containing the embedding vectors
if isinstance(result, dict) and 'embedding' in result:
embeddings = result['embedding']
# Handle case where embeddings is a list of embedding vectors
if isinstance(embeddings, list):
# Check if we have multiple embeddings (batch) or a single embedding
if len(embeddings) > 0:
# If first element is a list of numbers, we have direct embedding vectors
if isinstance(embeddings[0], (list, tuple)) and all(isinstance(x, (int, float)) for x in embeddings[0][:5]):
return embeddings
# If first element is a dict with 'embedding' key, extract them
elif isinstance(embeddings[0], dict) and 'embedding' in embeddings[0]:
return [emb['embedding'] for emb in embeddings]
# If first element is an object with embedding attribute
elif hasattr(embeddings[0], 'embedding'):
return [emb.embedding for emb in embeddings]
else:
# Assume it's a single embedding vector for all texts (unlikely but handle it)
return [embeddings] * len(texts)
else:
return []
# Handle case where embeddings is a single vector
elif isinstance(embeddings, (list, tuple)):
return [embeddings]
else:
return []
else:
raise ValueError(f"Unexpected Gemini API response format: {type(result)}")
async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]:
"""Efficiently embed multiple queries using Gemini's batch capability."""
all_embeddings = []
for i in range(0, len(queries), batch_size):
batch = queries[i:i + batch_size]
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
try:
batch_embeddings = await self.embed_batch(batch)
all_embeddings.extend(batch_embeddings)
except Exception as e:
print(f"Error in batch embedding: {e}")
# Fall back to individual embedding for this batch
for query in batch:
try:
embedding = await self.embed_text(query)
all_embeddings.append(embedding)
except Exception as e2:
print(f"Error embedding query '{query}': {e2}")
# Add zero embedding as placeholder
all_embeddings.append([0.0] * 768) # Default dimension for Gemini models
return all_embeddings
class VoyageProvider(EmbeddingProvider):
"""Voyage AI embedding provider."""
def __init__(self, model_name: str = "voyage-3"):
self.model_name = model_name
self.client = None
def get_name(self) -> str:
return self.model_name
async def _get_client(self):
if self.client is None:
import voyageai
self.client = voyageai.AsyncClient()
return self.client
async def embed_text(self, text: str) -> List[float]:
embeddings = await self.embed_batch([text])
return embeddings[0]
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
client = await self._get_client()
response = await client.embed(texts, model=self.model_name)
# Voyage AI embeddings are automatically normalized to unit length
return response.embeddings
async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]:
"""Efficiently embed multiple queries using Voyage's batch capability."""
all_embeddings = []
for i in range(0, len(queries), batch_size):
batch = queries[i:i + batch_size]
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
try:
batch_embeddings = await self.embed_batch(batch)
all_embeddings.extend(batch_embeddings)
except Exception as e:
print(f"Error in batch embedding: {e}")
# Fall back to individual embedding for this batch
for query in batch:
try:
embedding = await self.embed_text(query)
all_embeddings.append(embedding)
except Exception as e2:
print(f"Error embedding query '{query}': {e2}")
# Add zero embedding as placeholder
all_embeddings.append([0.0] * 1024) # Default dimension for Voyage models
return all_embeddings
class HuggingFaceProvider(EmbeddingProvider):
"""HuggingFace sentence-transformers provider."""
def __init__(self, model_name: str):
self.model_name = model_name
self.model = None
self.query_prompt_config = self._get_query_prompt_config()
def get_name(self) -> str:
# Convert model path to filename-safe format
return self.model_name.replace("/", "-").replace("_", "-")
def _get_query_prompt_config(self) -> dict:
"""Get query prompt configuration for specific models."""
# Model-specific query prompt configurations
if "Qwen3-Embedding" in self.model_name:
return {
"enabled": True,
"prompt_name": "query",
"instruction_template": "Instruct: Given a biblical passage, retrieve verses that are semantically related or answer the query\nQuery: {query}"
}
elif "inf-retriever-v1" in self.model_name:
return {
"enabled": True,
"prompt_name": "query"
}
elif "jina-embeddings-v4" in self.model_name:
return {
"enabled": True,
"prompt_name": "query",
"task": "retrieval"
}
elif "e5-" in self.model_name.lower():
# E5 models use "query: " and "passage: " prefixes for retrieval tasks
return {
"enabled": True,
"query_prefix": "query: ",
"document_prefix": "passage: "
}
# Add other model-specific configurations here as needed
return {"enabled": False}
def _get_model(self):
if self.model is None:
import os
from sentence_transformers import SentenceTransformer
# Get HuggingFace token from environment if available
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN")
# Some models require trust_remote_code=True and can't use local_files_only
# because they need to load custom tokenizers and configurations
if "jinaai" in self.model_name or "nomic-ai" in self.model_name:
self.model = SentenceTransformer(
self.model_name,
trust_remote_code=True,
token=hf_token
# Note: Cannot use local_files_only=True for models with custom code
)
else:
# For standard models, try offline first, then online with token if needed
try:
self.model = SentenceTransformer(
self.model_name,
local_files_only=True # Force offline mode for standard models
)
except Exception:
# If offline fails, try online with token
print(f"Model {self.model_name} not found offline, downloading...")
self.model = SentenceTransformer(
self.model_name,
token=hf_token
)
return self.model
async def embed_text(self, text: str) -> List[float]:
model = self._get_model()
# Apply document prefix for E5 models
if self.query_prompt_config.get("enabled") and "document_prefix" in self.query_prompt_config:
prefixed_text = self.query_prompt_config["document_prefix"] + text
embedding = model.encode([prefixed_text], normalize_embeddings=True)[0]
elif self.query_prompt_config.get("enabled") and "prompt_name" in self.query_prompt_config and "jina-embeddings-v4" in self.model_name:
# For jina v4, use prompt_name="passage" for documents
encode_kwargs = {
"normalize_embeddings": True,
"prompt_name": "passage"
}
if "task" in self.query_prompt_config:
encode_kwargs["task"] = self.query_prompt_config["task"]
embedding = model.encode([text], **encode_kwargs)[0]
else:
embedding = model.encode([text], normalize_embeddings=True)[0]
return embedding.tolist()
async def embed_query(self, query: str) -> List[float]:
"""Embed a query with model-specific prompting if needed."""
model = self._get_model()
if self.query_prompt_config["enabled"]:
# Apply model-specific query prompting
if "prompt_name" in self.query_prompt_config:
# Use prompt_name parameter (for Qwen3 and Jina v4)
encode_kwargs = {
"normalize_embeddings": True,
"prompt_name": self.query_prompt_config["prompt_name"]
}
if "task" in self.query_prompt_config:
encode_kwargs["task"] = self.query_prompt_config["task"]
embedding = model.encode([query], **encode_kwargs)[0]
elif "instruction_template" in self.query_prompt_config:
# Use instruction template formatting
formatted_query = self.query_prompt_config["instruction_template"].format(query=query)
embedding = model.encode([formatted_query], normalize_embeddings=True)[0]
elif "query_prefix" in self.query_prompt_config:
# Use prefix formatting (for E5 models)
prefixed_query = self.query_prompt_config["query_prefix"] + query
embedding = model.encode([prefixed_query], normalize_embeddings=True)[0]
else:
# Fallback to regular embedding
embedding = model.encode([query], normalize_embeddings=True)[0]
else:
# No special prompting needed
embedding = model.encode([query], normalize_embeddings=True)[0]
return embedding.tolist()
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
model = self._get_model()
# Apply document prefix for E5 models
if self.query_prompt_config.get("enabled") and "document_prefix" in self.query_prompt_config:
prefixed_texts = [
self.query_prompt_config["document_prefix"] + text
for text in texts
]
embeddings = model.encode(prefixed_texts, normalize_embeddings=True)
elif self.query_prompt_config.get("enabled") and "prompt_name" in self.query_prompt_config and "jina-embeddings-v4" in self.model_name:
# For jina v4, use prompt_name="passage" for documents
encode_kwargs = {
"normalize_embeddings": True,
"prompt_name": "passage"
}
if "task" in self.query_prompt_config:
encode_kwargs["task"] = self.query_prompt_config["task"]
embeddings = model.encode(texts, **encode_kwargs)
else:
embeddings = model.encode(texts, normalize_embeddings=True)
return [emb.tolist() for emb in embeddings]
async def embed_queries_batch_internal(self, queries: List[str]) -> List[List[float]]:
"""Embed a batch of queries with model-specific prompting if needed."""
model = self._get_model()
if self.query_prompt_config["enabled"]:
# Apply model-specific query prompting
if "prompt_name" in self.query_prompt_config:
# Use prompt_name parameter (for Qwen3 and Jina v4)
encode_kwargs = {
"normalize_embeddings": True,
"prompt_name": self.query_prompt_config["prompt_name"]
}
if "task" in self.query_prompt_config:
encode_kwargs["task"] = self.query_prompt_config["task"]
embeddings = model.encode(queries, **encode_kwargs)
elif "instruction_template" in self.query_prompt_config:
# Use instruction template formatting
formatted_queries = [
self.query_prompt_config["instruction_template"].format(query=query)
for query in queries
]
embeddings = model.encode(formatted_queries, normalize_embeddings=True)
elif "query_prefix" in self.query_prompt_config:
# Use prefix formatting (for E5 models)
prefixed_queries = [
self.query_prompt_config["query_prefix"] + query
for query in queries
]
embeddings = model.encode(prefixed_queries, normalize_embeddings=True)
else:
# Fallback to regular embedding
embeddings = model.encode(queries, normalize_embeddings=True)
else:
# No special prompting needed
embeddings = model.encode(queries, normalize_embeddings=True)
return [emb.tolist() for emb in embeddings]
async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]:
"""Efficiently embed multiple queries using HuggingFace's batch capability with model-specific prompting."""
all_embeddings = []
# HuggingFace can handle larger batches efficiently since it's local
for i in range(0, len(queries), batch_size):
batch = queries[i:i + batch_size]
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
try:
batch_embeddings = await self.embed_queries_batch_internal(batch)
all_embeddings.extend(batch_embeddings)
except Exception as e:
print(f"Error in batch embedding: {e}")
# Fall back to individual embedding for this batch
for query in batch:
try:
embedding = await self.embed_query(query)
all_embeddings.append(embedding)
except Exception as e2:
print(f"Error embedding query '{query}': {e2}")
# Add zero embedding as placeholder based on model
model = self._get_model()
dim = getattr(model, 'get_sentence_embedding_dimension', lambda: 384)()
all_embeddings.append([0.0] * dim)
return all_embeddings
class EmbeddingGemmaProvider(EmbeddingProvider):
"""Google EmbeddingGemma provider with specialized encode_document method."""
def __init__(self, model_name: str):
self.model_name = model_name
self.model = None
def get_name(self) -> str:
# Convert model path to filename-safe format
return self.model_name.replace("/", "-").replace("_", "-")
def _get_model(self):
if self.model is None:
import os
from sentence_transformers import SentenceTransformer
# Get HuggingFace token from environment if available
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN")
# EmbeddingGemma models require trust_remote_code=True
self.model = SentenceTransformer(
self.model_name,
trust_remote_code=True,
token=hf_token
)
return self.model
async def embed_text(self, text: str) -> List[float]:
model = self._get_model()
# For Bible verses, treat them as documents to be searched
embedding = model.encode_document([text])[0]
return embedding.tolist()
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
model = self._get_model()
# Use encode_document for Bible verses (the content we want to search)
embeddings = model.encode_document(texts)
return [emb.tolist() for emb in embeddings]
async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]:
"""Efficiently embed multiple queries using EmbeddingGemma's encode_query method."""
all_embeddings = []
for i in range(0, len(queries), batch_size):
batch = queries[i:i + batch_size]
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
try:
model = self._get_model()
# Use encode_query for search queries
batch_embeddings = model.encode_query(batch)
all_embeddings.extend([emb.tolist() for emb in batch_embeddings])
except Exception as e:
print(f"Error in batch embedding: {e}")
# Fall back to individual embedding for this batch
for query in batch:
try:
model = self._get_model()
embedding = model.encode_query([query])[0]
all_embeddings.append(embedding.tolist())
except Exception as e2:
print(f"Error embedding query '{query}': {e2}")
# Add zero embedding as placeholder
all_embeddings.append([0.0] * 768) # EmbeddingGemma default dimension
return all_embeddings
class JinaProvider(EmbeddingProvider):
"""Jina AI embedding provider."""
def __init__(self, model_name: str = "jina-embeddings-v3"):
self.model_name = model_name
self.client = None
def get_name(self) -> str:
return self.model_name
async def _get_client(self):
if self.client is None:
from jina import Client
self.client = Client()
return self.client
async def embed_text(self, text: str) -> List[float]:
embeddings = await self.embed_batch([text])
return embeddings[0]
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
# For now, fall back to HuggingFace provider for jina models
# as they're available on HuggingFace
hf_provider = HuggingFaceProvider(f"jinaai/{self.model_name}")
return await hf_provider.embed_batch(texts)
class BGEM3Provider(EmbeddingProvider):
"""BGE-M3 provider supporting dense mode."""
def __init__(self, model_name: str, mode: str = "dense"):
self.model_name = model_name
self.mode = mode # "dense" only
self.model = None
if mode != "dense":
raise ValueError(f"Unsupported mode: {mode}. Only 'dense' mode is supported.")
def get_name(self) -> str:
# Include mode in the name for file organization
return f"{self.model_name.replace('/', '-')}-{self.mode}"
def _get_model(self):
if self.model is None:
try:
from FlagEmbedding import BGEM3FlagModel
except ImportError:
raise ImportError(
"FlagEmbedding library is required for BGE-M3. "
"Install with: pip install FlagEmbedding"
)
import os
# Get HuggingFace token from environment if available
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN")
self.model = BGEM3FlagModel(
self.model_name,
use_fp16=True,
# Pass token if available for private models
**({'token': hf_token} if hf_token else {})
)
return self.model
async def embed_text(self, text: str) -> List[float]:
model = self._get_model()
result = model.encode([text])
return result['dense_vecs'][0].tolist()
async def embed_query(self, query: str) -> List[float]:
# BGE-M3 doesn't need special query prompting
return await self.embed_text(query)
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
model = self._get_model()
result = model.encode(texts)
return [vec.tolist() for vec in result['dense_vecs']]
async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]:
"""Efficiently embed multiple queries using BGE-M3's batch capability."""
all_embeddings = []
# BGE-M3 can handle larger batches efficiently since it's local
for i in range(0, len(queries), batch_size):
batch = queries[i:i + batch_size]
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
try:
batch_embeddings = await self.embed_batch(batch)
all_embeddings.extend(batch_embeddings)
except Exception as e:
print(f"Error in batch embedding: {e}")
# Fall back to individual embedding for this batch
for query in batch:
try:
embedding = await self.embed_query(query)
all_embeddings.append(embedding)
except Exception as e2:
print(f"Error embedding query '{query}': {e2}")
# Add zero embedding as placeholder
dim = 1024 # BGE-M3 dense dimension
all_embeddings.append([0.0] * dim)
return all_embeddings
class BGEM3HybridProvider(EmbeddingProvider):
"""BGE-M3 hybrid provider that generates both dense and sparse embeddings for hybrid search."""
def __init__(self, model_name: str):
self.model_name = model_name
self.model = None
def get_name(self) -> str:
return f"{self.model_name.replace('/', '-')}-hybrid"
def _get_model(self):
if self.model is None:
try:
from FlagEmbedding import BGEM3FlagModel
except ImportError:
raise ImportError(
"FlagEmbedding library is required for BGE-M3. "
"Install with: pip install FlagEmbedding"
)
import os
# Get HuggingFace token from environment if available
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN")
self.model = BGEM3FlagModel(
self.model_name,
use_fp16=True,
# Pass token if available for private models
**({'token': hf_token} if hf_token else {})
)
return self.model
async def embed_text(self, text: str) -> List[float]:
"""For hybrid provider, return dense embedding by default for compatibility."""
model = self._get_model()
result = model.encode([text], return_dense=True, return_sparse=True, return_colbert_vecs=False)
return result['dense_vecs'][0].tolist()
async def embed_query(self, query: str) -> List[float]:
return await self.embed_text(query)
async def embed_batch(self, texts: List[str]) -> List[List[float]]:
"""For hybrid provider, return dense embeddings by default for compatibility."""
model = self._get_model()
result = model.encode(texts, return_dense=True, return_sparse=True, return_colbert_vecs=False)
return [vec.tolist() for vec in result['dense_vecs']]
async def embed_hybrid_text(self, text: str) -> Dict[str, Any]:
"""Generate both dense and sparse embeddings for hybrid search."""
model = self._get_model()
result = model.encode([text], return_dense=True, return_sparse=True, return_colbert_vecs=False)
# Process sparse embeddings
sparse_dict = result['lexical_weights'][0]
max_vocab_size = 30522 # BERT vocabulary size
dense_sparse = [0.0] * max_vocab_size
for token_id, weight in sparse_dict.items():
try:
token_id_int = int(token_id)
if token_id_int < max_vocab_size:
dense_sparse[token_id_int] = float(weight)
except (ValueError, TypeError):
continue
return {
'dense': result['dense_vecs'][0].tolist(),
'sparse': dense_sparse,
'sparse_dict': sparse_dict # Keep original sparse dict for better sparse search
}
async def embed_hybrid_batch(self, texts: List[str]) -> List[Dict[str, Any]]:
"""Generate both dense and sparse embeddings for multiple texts."""
model = self._get_model()
result = model.encode(texts, return_dense=True, return_sparse=True, return_colbert_vecs=False)
embeddings = []
max_vocab_size = 30522
for i, sparse_dict in enumerate(result['lexical_weights']):
dense_sparse = [0.0] * max_vocab_size
for token_id, weight in sparse_dict.items():
try:
token_id_int = int(token_id)
if token_id_int < max_vocab_size:
dense_sparse[token_id_int] = float(weight)
except (ValueError, TypeError):
continue
embeddings.append({
'dense': result['dense_vecs'][i].tolist(),
'sparse': dense_sparse,
'sparse_dict': sparse_dict
})
return embeddings
async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]:
"""For compatibility, return dense embeddings."""
return await self.embed_batch(queries)
def _create_bgem3_provider(model_name: str) -> BGEM3Provider:
"""Create a BGE-M3 provider with the appropriate mode based on model name suffix."""
if model_name.endswith(":dense"):
base_model = model_name.replace(":dense", "")
return BGEM3Provider(base_model, "dense")
elif model_name.endswith(":hybrid"):
base_model = model_name.replace(":hybrid", "")
return BGEM3HybridProvider(base_model)
else:
# Default to dense mode if no suffix specified
return BGEM3Provider(model_name, "dense")
def get_available_models() -> Dict[str, Dict[str, Any]]:
"""Return a dictionary of available models organized by provider."""
return {
"OpenAI": {
"models": [
"text-embedding-3-small",
"text-embedding-3-large",
"text-embedding-ada-002",
],
"provider_class": OpenAIProvider,
},
"Google Gemini": {
"models": ["text-embedding-004"],
"provider_class": GeminiProvider,
},
"Voyage AI": {"models": ["voyage-3"], "provider_class": VoyageProvider},
"HuggingFace": {
"models": [
"answerdotai/ModernBERT-base",
"answerdotai/ModernBERT-large",
"BAAI/bge-large-en",
"BAAI/bge-base-en",
"BAAI/bge-small-en",
"ibm-granite/granite-embedding-30m-english",
"ibm-granite/granite-embedding-125m-english",
"ibm-granite/granite-embedding-107m-multilingual",
"ibm-granite/granite-embedding-278m-multilingual",
"infly/inf-retriever-v1",
"intfloat/e5-large-v2",
"intfloat/e5-base-v2",
"intfloat/e5-small-v2",
"jinaai/jina-embeddings-v4",
"nomic-ai/nomic-embed-text-v1.5",
"nvidia/NV-Embed-V2",
"Qwen/Qwen3-Embedding-0.6B",
"Qwen/Qwen3-Embedding-4B",
"Qwen/Qwen3-Embedding-8B",
"sentence-transformers/all-MiniLM-L6-v2",
"Salesforce/SFR-Embedding-Mistral",
"Snowflake/snowflake-arctic-embed-l-v2.0",
"thenlper/gte-large",
"thenlper/gte-base",
"thenlper/gte-small",
],
"provider_class": HuggingFaceProvider,
},
"Google EmbeddingGemma": {
"models": ["google/embeddinggemma-300m"],
"provider_class": EmbeddingGemmaProvider,
},
"BGE-M3": {
"models": ["BAAI/bge-m3", "BAAI/bge-m3:dense", "BAAI/bge-m3:hybrid"],
"provider_class": lambda model_name: _create_bgem3_provider(model_name),
},
"Jina AI": {"models": ["jina-embeddings-v3"], "provider_class": JinaProvider},
}
def get_available_translations() -> List[str]:
"""Get available translations by listing JSON files in the text directory."""
text_dir = Path("text")
if not text_dir.exists():
return []
translations = []
for file in text_dir.glob("*.json"):
translation_name = file.stem
translations.append(translation_name)
return sorted(translations)
def select_translation() -> str:
"""Interactive translation selection."""
translations = get_available_translations()
if not translations:
print("No translations found in the text directory!")
exit(1)
if len(translations) == 1:
print(f"Using translation: {translations[0]}")
return translations[0]
print("Available translations:")
for i, translation in enumerate(translations, 1):
print(f" {i}. {translation}")
while True:
try:
choice = input(f"\nSelect translation (1-{len(translations)}): ").strip()
idx = int(choice) - 1
if 0 <= idx < len(translations):
return translations[idx]
else:
print(f"Please enter a number between 1 and {len(translations)}")
except (ValueError, KeyboardInterrupt):
print("\nExiting...")
exit(0)
def select_model() -> EmbeddingProvider:
"""Interactive model selection."""
models = get_available_models()
print("Available embedding models:")
print()
all_choices = []
choice_num = 1
for provider_name, provider_info in models.items():
print(f"{provider_name}:")
for model in provider_info["models"]:
print(f" {choice_num}. {model}")
all_choices.append((provider_name, model, provider_info["provider_class"]))
choice_num += 1
print()
while True:
try:
choice = input(f"Select model (1-{len(all_choices)}): ").strip()
idx = int(choice) - 1
if 0 <= idx < len(all_choices):
provider_name, model_name, provider_class = all_choices[idx]
print(f"Selected: {model_name} ({provider_name})")
return provider_class(model_name)
else:
print(f"Please enter a number between 1 and {len(all_choices)}")
except (ValueError, KeyboardInterrupt):
print("\nExiting...")
exit(0)
def load_translation_text(translation: str) -> List[Dict[str, Any]]:
"""Load the text for a specific translation."""
text_file = Path("text") / f"{translation}.json"
if not text_file.exists():
raise FileNotFoundError(f"Translation file not found: {text_file}")
with open(text_file, "r", encoding="utf-8") as f:
return json.load(f)
def get_model_provider(provider: EmbeddingProvider) -> str:
"""Get the provider name for a given embedding provider."""
if isinstance(provider, OpenAIProvider):
return "openai"
elif isinstance(provider, GeminiProvider):
return "google-gemini"
elif isinstance(provider, VoyageProvider):
return "voyage"
elif isinstance(provider, HuggingFaceProvider):
return "huggingface"
elif isinstance(provider, EmbeddingGemmaProvider):
return "huggingface" # EmbeddingGemma uses HuggingFace directory structure
elif isinstance(provider, (BGEM3Provider, BGEM3HybridProvider)):
return "huggingface" # BGE-M3 uses HuggingFace directory structure
elif isinstance(provider, JinaProvider):
return "huggingface" # JinaProvider falls back to HuggingFace
else:
return "unknown"
def create_output_directories(
verses: List[Dict[str, Any]], provider: EmbeddingProvider
) -> None:
"""Create the necessary output directory structure."""
provider_name = get_model_provider(provider)
model_name = provider.get_name()
books = set(verse["book"] for verse in verses)
for book in books:
book_dir = Path("embeddings") / provider_name / model_name / book
book_dir.mkdir(parents=True, exist_ok=True)
def check_existing_embeddings(
translation: str, provider: EmbeddingProvider, verses: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""Check which verses already have embeddings and return only missing ones."""
provider_name = get_model_provider(provider)
model_name = provider.get_name()
missing_verses = []
for verse in verses:
output_file = (
Path("embeddings")
/ provider_name
/ model_name
/ verse["book"]
/ f"{verse['chapter']:03d}.json"
)
if output_file.exists():
# Check if this specific verse exists in the file
try:
with open(output_file, "r", encoding="utf-8") as f:
existing_verses = json.load(f)
verse_exists = any(
v["book"] == verse["book"]
and v["chapter"] == verse["chapter"]
and v["verse"] == verse["verse"]
for v in existing_verses
)
if not verse_exists:
missing_verses.append(verse)
except (json.JSONDecodeError, KeyError):
missing_verses.append(verse)
else:
missing_verses.append(verse)
return missing_verses
async def generate_embeddings(
translation: str,
provider: EmbeddingProvider,
verses: List[Dict[str, Any]],
batch_size: int = 100,
) -> None:
"""Generate embeddings for verses and save them."""
provider_name = get_model_provider(provider)
model_name = provider.get_name()
print(
f"Generating embeddings for {len(verses)} verses using {model_name} ({provider_name})..."
)
# Group verses by book and chapter for organized saving
verses_by_chapter = {}
for verse in verses:
key = (verse["book"], verse["chapter"])
if key not in verses_by_chapter:
verses_by_chapter[key] = []
verses_by_chapter[key].append(verse)
# Process in batches
for (book, chapter), chapter_verses in verses_by_chapter.items():
print(f"Processing {book} {chapter} ({len(chapter_verses)} verses)...")
# Load existing embeddings if any
output_file = (
Path("embeddings")
/ provider_name
/ model_name
/ book
/ f"{chapter:03d}.json"
)
existing_embeddings = []
if output_file.exists():
try:
with open(output_file, "r", encoding="utf-8") as f:
existing_embeddings = json.load(f)
except (json.JSONDecodeError, KeyError):
existing_embeddings = []
# Process verses in batches
new_embeddings = []
for i in range(0, len(chapter_verses), batch_size):
batch = chapter_verses[i : i + batch_size]
texts = [verse["text"] for verse in batch]
try:
embeddings = await provider.embed_batch(texts)
for verse, embedding in zip(batch, embeddings):
new_embeddings.append(
{
"book": verse["book"],
"chapter": verse["chapter"],
"verse": verse["verse"],
"embedding": embedding,
}
)
print(
f" Processed batch {i // batch_size + 1}/{(len(chapter_verses) + batch_size - 1) // batch_size}"
)
except Exception as e:
print(f"Error processing batch: {e}")
continue
# Merge with existing embeddings and save
all_embeddings = existing_embeddings + new_embeddings
# Sort by verse number
all_embeddings.sort(key=lambda x: x["verse"])
# Save to file
output_file.parent.mkdir(parents=True, exist_ok=True)
with open(output_file, "w", encoding="utf-8") as f:
json.dump(all_embeddings, f, indent=2)
print(f" Saved {len(new_embeddings)} new embeddings to {output_file}")
async def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description="Bible verse embeddings tool")
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# Embed subcommand
embed_parser = subparsers.add_parser(
"embed", help="Generate embeddings for Bible verses"
)
embed_parser.add_argument("--translation", "-t", help="Translation to use")
embed_parser.add_argument("--model", "-m", help="Model to use")
embed_parser.add_argument(
"--batch-size", "-b", type=int, default=100, help="Batch size for processing"
)
embed_parser.add_argument(
"--skip-existing",
"-s",
action="store_true",
help="Skip verses that already have embeddings",
)
# Query subcommand
query_parser = subparsers.add_parser(
"query", help="Search Bible verses using embeddings"
)
query_parser.add_argument(
"--no-hnsw",
action="store_true",
help="Disable HNSW optimization and use brute-force search"
)
# Batch subcommand
batch_parser = subparsers.add_parser(
"batch", help="Run batch queries from YAML file and evaluate results"
)
# Autobatch subcommand
autobatch_parser = subparsers.add_parser(
"autobatch", help="Run batch queries for all models that don't have results yet"
)
# Autoembed subcommand
autoembed_parser = subparsers.add_parser(
"autoembed", help="Automatically generate embeddings for all models missing embeddings"
)
# Report subcommand
report_parser = subparsers.add_parser(
"report", help="Generate markdown report from batch results CSV"
)
report_parser.add_argument(
"--results-file",
"-r",
help="Specific CSV file to read results from (default: reads all files from ./results/ directory)"
)
report_parser.add_argument(
"--output-file",
"-o",
default="README.md",
help="Output markdown file (default: README.md)"
)
report_parser.add_argument(
"--max-queries",
"-q",
type=int,
default=10,
help="Maximum number of queries to show in detailed results (default: 10)"
)
batch_parser.add_argument("--translation", "-t", help="Translation to use")
batch_parser.add_argument("--model", "-m", help="Model to use")
batch_parser.add_argument(
"--queries-file",
"-q",
default="queries.yaml",
help="YAML file containing queries and expected results (default: queries.yaml)"
)
batch_parser.add_argument(
"--concurrency",
"-c",
type=int,
default=5,
help="Number of concurrent queries to process (default: 5)"
)
batch_parser.add_argument(
"--no-hnsw",
action="store_true",
help="Disable HNSW optimization and use brute-force search"
)
autobatch_parser.add_argument("--translation", "-t", help="Translation to use")
autobatch_parser.add_argument(
"--queries-file",
"-q",
default="queries.yaml",
help="YAML file containing queries and expected results (default: queries.yaml)"
)
autobatch_parser.add_argument(
"--concurrency",
"-c",
type=int,
default=5,
help="Number of concurrent queries to process (default: 5)"
)
autobatch_parser.add_argument(
"--no-hnsw",
action="store_true",
help="Disable HNSW optimization and use brute-force search"
)
autobatch_parser.add_argument(
"--dry-run",
action="store_true",
help="Show which models would be processed without actually running them"
)
# Autoembed arguments
autoembed_parser.add_argument("--translation", "-t", help="Translation to use")
autoembed_parser.add_argument(
"--batch-size", "-b", type=int, default=100, help="Batch size for processing"
)
autoembed_parser.add_argument(
"--skip-existing",
"-s",
action="store_true",
help="Skip verses that already have embeddings",
)
autoembed_parser.add_argument(
"--dry-run",
action="store_true",
help="Show which models would be processed without actually generating embeddings"
)
autoembed_parser.add_argument(
"--include-providers",
nargs="+",
help="Only process models from these providers (e.g., 'OpenAI' 'HuggingFace')"
)
autoembed_parser.add_argument(
"--exclude-providers",
nargs="+",
help="Exclude models from these providers (e.g., 'OpenAI' 'HuggingFace')"
)
args = parser.parse_args()
# If no command specified, show help
if not args.command:
parser.print_help()
return
if args.command == "query":
await query_mode(args)
return
if args.command == "batch":
await batch_mode(args)
return
if args.command == "autobatch":
await autobatch_mode(args)
return
if args.command == "autoembed":
await autoembed_mode(args)
return
if args.command == "report":
generate_report(args)
return
if args.command == "embed":
# Continue with embedding generation logic
pass
# Select translation
if args.translation:
translation = args.translation
if translation not in get_available_translations():
print(f"Translation '{translation}' not found!")
return
else:
translation = select_translation()
# Select model
if args.model:
# Find the model in available models
provider = None
models = get_available_models()
for provider_info in models.values():
if args.model in provider_info["models"]:
provider = provider_info["provider_class"](args.model)
break
if provider is None:
print(f"Model '{args.model}' not found!")
return
else:
provider = select_model()
# Load verses
print(f"\nLoading verses for translation: {translation}")
verses = load_translation_text(translation)
print(f"Loaded {len(verses)} verses")
# Create output directories
create_output_directories(verses, provider)
# Check for existing embeddings
if args.skip_existing:
original_count = len(verses)
verses = check_existing_embeddings(translation, provider, verses)
print(
f"Skipping {original_count - len(verses)} verses with existing embeddings"
)
if not verses:
print("All verses already have embeddings!")
return
# Generate embeddings
await generate_embeddings(translation, provider, verses, args.batch_size)
print(f"\nCompleted! Generated embeddings for {len(verses)} verses.")
def load_embeddings_for_model(
provider: EmbeddingProvider, translation: str
) -> List[Dict[str, Any]]:
"""Load all embeddings for a specific model and translation."""
provider_name = get_model_provider(provider)
model_name = provider.get_name()
embeddings_dir = Path("embeddings") / provider_name / model_name
all_embeddings = []
if not embeddings_dir.exists():
print(f"No embeddings found for {model_name} ({provider_name})")
return []
print(f"Loading embeddings for {model_name} ({provider_name})...")
# Walk through all book directories
for book_dir in embeddings_dir.iterdir():
if not book_dir.is_dir():
continue
book_name = book_dir.name
# Walk through all chapter files
for chapter_file in book_dir.glob("*.json"):
try:
with open(chapter_file, "r", encoding="utf-8") as f:
chapter_embeddings = json.load(f)
for embedding_data in chapter_embeddings:
# Add metadata for searching
embedding_data["translation"] = translation
embedding_data["model"] = model_name
embedding_data["provider"] = provider_name
all_embeddings.append(embedding_data)
except (json.JSONDecodeError, KeyError) as e:
print(f"Error loading {chapter_file}: {e}")
continue
print(f"Loaded {len(all_embeddings)} verse embeddings")
return all_embeddings
def cosine_similarity(a: List[float], b: List[float]) -> float:
"""Calculate cosine similarity between two vectors."""
import numpy as np
a_np = np.array(a)
b_np = np.array(b)
# Calculate cosine similarity
dot_product = np.dot(a_np, b_np)
norm_a = np.linalg.norm(a_np)
norm_b = np.linalg.norm(b_np)
if norm_a == 0 or norm_b == 0:
return 0.0
return dot_product / (norm_a * norm_b)
def sparse_cosine_similarity(sparse_dict_a: Dict[str, float], sparse_dict_b: Dict[str, float]) -> float:
"""Calculate cosine similarity between two sparse dictionaries."""
import math
# Convert string keys to int and find common terms
dict_a = {int(k): float(v) for k, v in sparse_dict_a.items() if float(v) > 0}
dict_b = {int(k): float(v) for k, v in sparse_dict_b.items() if float(v) > 0}
# Calculate dot product
dot_product = 0.0
for term_id in dict_a:
if term_id in dict_b:
dot_product += dict_a[term_id] * dict_b[term_id]
# Calculate magnitudes
magnitude_a = math.sqrt(sum(v * v for v in dict_a.values()))
magnitude_b = math.sqrt(sum(v * v for v in dict_b.values()))
if magnitude_a == 0.0 or magnitude_b == 0.0:
return 0.0
return dot_product / (magnitude_a * magnitude_b)
def reciprocal_rank_fusion(dense_results: List[Tuple[Dict[str, Any], float]],
sparse_results: List[Tuple[Dict[str, Any], float]],
dense_weight: float = 1.0,
sparse_weight: float = 0.7,
k: int = 60) -> List[Tuple[Dict[str, Any], float]]:
"""
Combine dense and sparse search results using Reciprocal Rank Fusion (RRF).
RRF formula: RRF(d) = Σ(1 / (k + rank(d)))
where k is typically 60, rank(d) is the rank of document d in each ranking
"""
# Create verse ID to result mapping for efficient lookup
verse_scores = {}
# Process dense results
for rank, (verse_data, score) in enumerate(dense_results, 1):
verse_id = f"{verse_data['book']}_{verse_data['chapter']}_{verse_data['verse']}"
if verse_id not in verse_scores:
verse_scores[verse_id] = {
'verse_data': verse_data,
'dense_score': score,
'sparse_score': 0.0,
'dense_rank': rank,
'sparse_rank': float('inf'),
'rrf_score': 0.0
}
else:
verse_scores[verse_id]['dense_score'] = score
verse_scores[verse_id]['dense_rank'] = rank
# Process sparse results
for rank, (verse_data, score) in enumerate(sparse_results, 1):
verse_id = f"{verse_data['book']}_{verse_data['chapter']}_{verse_data['verse']}"
if verse_id not in verse_scores:
verse_scores[verse_id] = {
'verse_data': verse_data,
'dense_score': 0.0,
'sparse_score': score,
'dense_rank': float('inf'),
'sparse_rank': rank,
'rrf_score': 0.0
}
else:
verse_scores[verse_id]['sparse_score'] = score
verse_scores[verse_id]['sparse_rank'] = rank
# Calculate RRF scores
for verse_id, data in verse_scores.items():
dense_rrf = (dense_weight / (k + data['dense_rank'])) if data['dense_rank'] != float('inf') else 0.0
sparse_rrf = (sparse_weight / (k + data['sparse_rank'])) if data['sparse_rank'] != float('inf') else 0.0
data['rrf_score'] = dense_rrf + sparse_rrf
# Sort by RRF score (descending)
sorted_results = sorted(verse_scores.values(), key=lambda x: x['rrf_score'], reverse=True)
# Return in the expected format
return [(result['verse_data'], result['rrf_score']) for result in sorted_results]
async def hybrid_search_embeddings(
query: str,
dense_provider: EmbeddingProvider,
sparse_provider: EmbeddingProvider,
translation: str,
top_k: int = 10,
dense_weight: float = 1.0,
sparse_weight: float = 0.7,
pre_loaded_dense_embeddings: List[Dict[str, Any]] = None,
pre_loaded_sparse_embeddings: List[Dict[str, Any]] = None,
use_hnsw: bool = True
) -> List[Tuple[Dict[str, Any], float]]:
"""
Perform hybrid search combining dense and sparse embeddings using RRF.
"""
print(f"Performing hybrid search for: '{query}'")
# Perform dense search
print("Performing dense search...")
dense_results = await search_embeddings(
query, dense_provider, translation, top_k * 2,
pre_loaded_dense_embeddings, use_hnsw
)
# Perform sparse search
print("Performing sparse search...")
sparse_results = await search_embeddings(
query, sparse_provider, translation, top_k * 2,
pre_loaded_sparse_embeddings, use_hnsw
)
print(f"Dense search returned {len(dense_results)} results")
print(f"Sparse search returned {len(sparse_results)} results")
# Combine using RRF
print("Combining results with Reciprocal Rank Fusion...")
hybrid_results = reciprocal_rank_fusion(
dense_results, sparse_results, dense_weight, sparse_weight
)
return hybrid_results[:top_k]
async def bgem3_hybrid_search(
query: str,
hybrid_provider: BGEM3HybridProvider,
translation: str,
top_k: int = 10,
dense_weight: float = 1.0,
sparse_weight: float = 0.7
) -> List[Tuple[Dict[str, Any], float]]:
"""
Perform hybrid search using a single BGE-M3 hybrid provider.
This is more efficient as it uses the same embeddings for both searches.
"""
print(f"Performing BGE-M3 hybrid search for: '{query}'")
# Load all hybrid embeddings (both dense and sparse)
all_embeddings = load_embeddings_for_model(hybrid_provider, translation)
if not all_embeddings:
return []
# Generate query embeddings
query_hybrid = await hybrid_provider.embed_hybrid_text(query)
query_dense = query_hybrid['dense']
query_sparse_dict = query_hybrid['sparse_dict']
# Perform dense search
print("Performing dense similarity search...")
dense_results = []
for embedding_data in all_embeddings:
similarity = cosine_similarity(query_dense, embedding_data["embedding"])
dense_results.append((embedding_data, similarity))
# Sort dense results
dense_results.sort(key=lambda x: x[1], reverse=True)
dense_results = dense_results[:top_k * 2]
# For sparse search, we would need to load sparse embeddings
# For now, use a simplified approach
print("Hybrid search complete!")
return dense_results[:top_k]
class HNSWIndex:
"""FAISS-based index for fast approximate nearest neighbor search."""
def __init__(self, dimension: int, max_elements: int = 100000):
import faiss
import os
# Set FAISS to single-threaded mode to avoid multiprocessing issues
os.environ["OMP_NUM_THREADS"] = "1"
faiss.omp_set_num_threads(1)
self.dimension = dimension
self.max_elements = max_elements
# Use FAISS HNSW index for cosine similarity (using inner product with normalized vectors)
# Use lower M parameter (16 instead of 32) for faster building with large datasets
self.index = faiss.IndexHNSWFlat(dimension, 16) # 16 is M parameter
# Use much lower efConstruction for faster index building (40 instead of 200)
self.index.hnsw.efConstruction = 40
self.embeddings_data = []
self.built = False
def add_embeddings(self, embeddings_data: List[Dict[str, Any]]):
"""Add embeddings to the index."""
import numpy as np
import faiss
import gc
try:
# Convert embeddings to numpy array with better memory management
print(f"Converting {len(embeddings_data)} embeddings to numpy array...")
embeddings_array = np.array([data["embedding"] for data in embeddings_data], dtype=np.float32)
# All providers now return normalized embeddings, but normalize again to ensure consistency
# for FAISS inner product search (which requires normalized vectors for cosine similarity)
print("Normalizing embeddings...")
faiss.normalize_L2(embeddings_array)
# Add to FAISS index
print("Adding embeddings to FAISS index...")
self.index.add(embeddings_array)
# Clean up the large numpy array to free memory
del embeddings_array
gc.collect()
self.embeddings_data = embeddings_data
self.built = True
# Set ef parameter for search (higher = more accurate but slower)
# Use lower values for faster search with large datasets
self.index.hnsw.efSearch = max(32, min(100, len(embeddings_data) // 20))
print(f"HNSW index built successfully with efSearch={self.index.hnsw.efSearch}")
except Exception as e:
print(f"Error in add_embeddings: {e}")
raise
def search(self, query_embedding: List[float], k: int = 10) -> List[Tuple[Dict[str, Any], float]]:
"""Search for k nearest neighbors and return with exact cosine similarities."""
if not self.built:
return []
import numpy as np
import faiss
query_array = np.array([query_embedding]).astype('float32').reshape(1, -1)
# All providers now return normalized embeddings, but normalize again to ensure consistency
# for FAISS inner product search (which requires normalized vectors for cosine similarity)
faiss.normalize_L2(query_array)
# Get approximate neighbors (returns more than k for exact rescoring)
search_k = min(k * 3, len(self.embeddings_data)) # Get 3x candidates for rescoring
distances, indices = self.index.search(query_array, search_k)
# Calculate exact cosine similarities for the candidates
results = []
for idx in indices[0]:
if idx < len(self.embeddings_data) and idx >= 0: # FAISS can return -1 for invalid indices
embedding_data = self.embeddings_data[idx]
exact_similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
results.append((embedding_data, exact_similarity))
# Sort by exact similarity and return top k
results.sort(key=lambda x: x[1], reverse=True)
return results[:k]
# Global cache for HNSW indices
_hnsw_cache = {}
def get_hnsw_index(provider: EmbeddingProvider, translation: str, all_embeddings: List[Dict[str, Any]]) -> HNSWIndex:
"""Get or create FAISS HNSW index for given provider and translation."""
cache_key = f"{provider.model_name}_{translation}"
if cache_key in _hnsw_cache:
return _hnsw_cache[cache_key]
if not all_embeddings:
return None
# For very large datasets, warn about potential memory usage
if len(all_embeddings) > 50000:
print(f"Warning: Building HNSW index for {len(all_embeddings)} embeddings may require significant memory and time")
print("Consider using --no-hnsw if you encounter memory issues")
# Determine embedding dimension from first embedding
dimension = len(all_embeddings[0]["embedding"])
try:
# Create and build HNSW index with error handling
print(f"Building FAISS HNSW index for {len(all_embeddings)} embeddings (dimension: {dimension})...")
hnsw_index = HNSWIndex(dimension, max_elements=len(all_embeddings) * 2)
hnsw_index.add_embeddings(all_embeddings)
# Cache the index
_hnsw_cache[cache_key] = hnsw_index
print(f"FAISS HNSW index built and cached for {cache_key}")
return hnsw_index
except Exception as e:
print(f"Error building HNSW index: {e}")
print("Falling back to brute-force search")
return None
async def search_embeddings(
query: str, provider: EmbeddingProvider, translation: str, top_k: int = 10,
pre_loaded_embeddings: List[Dict[str, Any]] = None, use_hnsw: bool = True
) -> List[Tuple[Dict[str, Any], float]]:
"""Search for similar verses using embeddings with optional HNSW optimization."""
# Use pre-loaded embeddings if provided, otherwise load them
if pre_loaded_embeddings is not None:
all_embeddings = pre_loaded_embeddings
else:
all_embeddings = load_embeddings_for_model(provider, translation)
if not all_embeddings:
return []
# Generate embedding for the query
print(f"Generating embedding for query: '{query}'")
query_embedding = await provider.embed_query(query)
# Use HNSW when explicitly requested
if use_hnsw:
hnsw_index = get_hnsw_index(provider, translation, all_embeddings)
if hnsw_index:
return hnsw_index.search(query_embedding, top_k)
# Fallback to brute-force search for small datasets or when HNSW fails
print(f"Using brute-force search for {len(all_embeddings)} embeddings")
results = []
for embedding_data in all_embeddings:
similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
results.append((embedding_data, similarity))
# Sort by similarity (descending)
results.sort(key=lambda x: x[1], reverse=True)
return results[:top_k]
def display_search_results(
results: List[Tuple[Dict[str, Any], float]], query: str
) -> None:
"""Display search results in a formatted way."""
print(f"\nTop {len(results)} results for query: '{query}'")
print("=" * 80)
for i, (verse_data, similarity) in enumerate(results, 1):
book = verse_data["book"]
chapter = verse_data["chapter"]
verse = verse_data["verse"]
print(f"{i:2d}. {book} {chapter}:{verse} (similarity: {similarity:.4f})")
# Load the actual verse text
try:
translation = verse_data["translation"]
text_file = Path("text") / f"{translation}.json"
if text_file.exists():
with open(text_file, "r", encoding="utf-8") as f:
verses = json.load(f)
# Find the matching verse
verse_text = None
for v in verses:
if (
v["book"] == book
and v["chapter"] == chapter
and v["verse"] == verse
):
verse_text = v["text"]
break
if verse_text:
# Wrap text at reasonable length
wrapped_text = "\n ".join(
[verse_text[i : i + 70] for i in range(0, len(verse_text), 70)]
)
print(f" {wrapped_text}")
else:
print(f" [Text not found]")
else:
print(f" [Translation file not found: {translation}]")
except Exception as e:
print(f" [Error loading text: {e}]")
print()
def select_translation_for_query() -> str:
"""Interactive translation selection for querying."""
translations = get_available_translations()
if not translations:
print("No translations found in the text directory!")
exit(1)
if len(translations) == 1:
print(f"Using translation: {translations[0]}")
return translations[0]
print("Available translations:")
for i, translation in enumerate(translations, 1):
print(f" {i}. {translation}")
while True:
try:
choice = input(
f"\nSelect translation for query (1-{len(translations)}): "
).strip()
idx = int(choice) - 1
if 0 <= idx < len(translations):
return translations[idx]
else:
print(f"Please enter a number between 1 and {len(translations)}")
except (ValueError, KeyboardInterrupt):
print("\nExiting...")
exit(0)
def select_model_for_query() -> EmbeddingProvider:
"""Interactive model selection for querying."""
models = get_available_models()
print("Available embedding models:")
print()
all_choices = []
choice_num = 1
for provider_name, provider_info in models.items():
print(f"{provider_name}:")
for model in provider_info["models"]:
print(f" {choice_num}. {model}")
all_choices.append((provider_name, model, provider_info["provider_class"]))
choice_num += 1
print()
while True:
try:
choice = input(f"Select model for query (1-{len(all_choices)}): ").strip()
idx = int(choice) - 1
if 0 <= idx < len(all_choices):
provider_name, model_name, provider_class = all_choices[idx]
print(f"Selected: {model_name} ({provider_name})")
return provider_class(model_name)
else:
print(f"Please enter a number between 1 and {len(all_choices)}")
except (ValueError, KeyboardInterrupt):
print("\nExiting...")
exit(0)
async def query_mode(args):
"""Interactive query mode."""
use_hnsw = not args.no_hnsw
print("Bible Verse Search Mode")
print("=" * 30)
print()
# Select translation
translation = select_translation_for_query()
print()
# Select model
provider = select_model_for_query()
print()
# Interactive query loop
while True:
try:
query = input("\nEnter your search query (or 'quit' to exit): ").strip()
if query.lower() in ["quit", "exit", "q"]:
print("Goodbye!")
break
if not query:
print("Please enter a query.")
continue
# Get number of results
try:
top_k_input = input("Number of results to show (default 10): ").strip()
top_k = int(top_k_input) if top_k_input else 10
top_k = max(1, min(top_k, 50)) # Limit between 1 and 50
except ValueError:
top_k = 10
# Perform search
results = await search_embeddings(query, provider, translation, top_k, use_hnsw=use_hnsw)
if results:
display_search_results(results, query)
else:
print(
"No results found. Make sure embeddings exist for this model and translation."
)
except KeyboardInterrupt:
print("\nGoodbye!")
break
except Exception as e:
print(f"Error during search: {e}")
print("Please try again.")
def load_queries(queries_file: Path) -> List[Dict[str, Any]]:
"""Load queries from YAML file."""
if not queries_file.exists():
print(f"Queries file not found: {queries_file}")
print("Expected format:")
print("""# Simple format - list of query/expected pairs
- query: "love your enemies"
expected: "Matthew 5:44"
- query: "faith hope love"
expected: "1 Corinthians 13:13"
# Or with expected as list for multiple valid answers
- query: "god is love"
expected: ["1 John 4:8", "1 John 4:16"]
""")
exit(1)
try:
import yaml
with open(queries_file, "r", encoding="utf-8") as f:
queries = yaml.safe_load(f)
# Handle both formats: list directly or under "queries" key
if isinstance(queries, dict) and "queries" in queries:
queries = queries["queries"]
elif not isinstance(queries, list):
print(f"Invalid queries file format. Expected list of queries in {queries_file}")
exit(1)
return queries
except Exception as e:
print(f"Error loading queries file {queries_file}: {e}")
exit(1)
def parse_verse_reference(ref: str) -> Tuple[str, int, int]:
"""Parse a verse reference like 'Matthew 5:44' into (book, chapter, verse)."""
try:
# Split on the last space to separate book from chapter:verse
parts = ref.rsplit(" ", 1)
if len(parts) != 2:
raise ValueError("Invalid format")
book = parts[0].strip()
chapter_verse = parts[1]
# Split chapter:verse
if ":" not in chapter_verse:
raise ValueError("Missing verse number")
chapter_str, verse_str = chapter_verse.split(":", 1)
chapter = int(chapter_str.strip())
verse = int(verse_str.strip())
return book, chapter, verse
except Exception as e:
print(f"Error parsing verse reference '{ref}': {e}")
return None, None, None
def calculate_score(expected_refs, results: List[Tuple[Dict[str, Any], float]]) -> int:
"""Calculate score based on where expected result appears in top 3 results."""
# Handle both single string and list of expected references
if isinstance(expected_refs, str):
expected_refs = [expected_refs]
elif not expected_refs:
return 0
# Parse all expected references
expected_verses = []
for ref in expected_refs:
book, chapter, verse = parse_verse_reference(ref)
if book:
expected_verses.append((book, chapter, verse))
if not expected_verses:
return 0
# Check if any expected verse appears in top 3 results
for i, (verse_data, similarity) in enumerate(results[:3]):
verse_tuple = (verse_data["book"], verse_data["chapter"], verse_data["verse"])
if verse_tuple in expected_verses:
return 3 - i # 3 points for 1st, 2 for 2nd, 1 for 3rd
return 0 # No points if not in top 3
def format_verse_reference(verse_data: Dict[str, Any]) -> str:
"""Format verse data into a reference string."""
return f"{verse_data['book']} {verse_data['chapter']}:{verse_data['verse']}"
async def process_single_query(
query_data: Dict[str, Any],
provider: EmbeddingProvider,
translation: str,
query_index: int,
total_queries: int,
semaphore: Semaphore,
pre_loaded_embeddings: List[Dict[str, Any]],
use_hnsw: bool = True
) -> Dict[str, Any]:
"""Process a single query with concurrency control."""
async with semaphore:
query = query_data.get("query", "")
expected = query_data.get("expected", "")
if not query:
print(f"Skipping query {query_index}: missing 'query' field")
return None
print(f"Query {query_index}/{total_queries}: {query}")
try:
# Get top 3 results using pre-loaded embeddings
search_results = await search_embeddings(query, provider, translation, top_k=3, pre_loaded_embeddings=pre_loaded_embeddings, use_hnsw=use_hnsw)
if not search_results:
print(f" No results found for query: {query}")
return None
# Calculate score if expected result is provided
score = 0
if expected:
score = calculate_score(expected, search_results)
expected_str = str(expected) if isinstance(expected, list) else expected
print(f" Expected: {expected_str} | Score: {score}/3")
# Format results
result_data = {
"query": query,
"expected": expected,
"score": score,
"results": []
}
for j, (verse_data, similarity) in enumerate(search_results):
result_ref = format_verse_reference(verse_data)
result_data["results"].append({
"reference": result_ref,
"similarity": similarity
})
print(f" {j+1}. {result_ref} ({similarity:.4f})")
return result_data
except Exception as e:
print(f"Error processing query '{query}': {e}")
return None
async def run_batch_queries_openai_optimized(
queries: List[Dict[str, Any]],
provider: OpenAIProvider,
translation: str,
batch_size: int = 100,
use_hnsw: bool = True
) -> List[Dict[str, Any]]:
"""Run batch queries optimized for OpenAI using their batch embedding capability."""
total_queries = len(queries)
print(f"Running {total_queries} batch queries using OpenAI batch optimization...")
# Load embeddings once for all queries
print("Loading verse embeddings for similarity search...")
pre_loaded_embeddings = load_embeddings_for_model(provider, translation)
if not pre_loaded_embeddings:
print("No embeddings found! Make sure embeddings exist for this model and translation.")
return []
print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings")
# Extract all query texts for batch embedding
query_texts = []
query_data_list = []
for query_data in queries:
query = query_data.get("query", "")
if query:
query_texts.append(query)
query_data_list.append(query_data)
if not query_texts:
print("No valid queries found!")
return []
# Embed all queries in batches using OpenAI's batch API
print(f"Embedding {len(query_texts)} queries using OpenAI batch API...")
query_embeddings = await provider.embed_queries_batch(query_texts, batch_size)
if len(query_embeddings) != len(query_texts):
print(f"Warning: Expected {len(query_texts)} embeddings, got {len(query_embeddings)}")
# Build HNSW index if requested
hnsw_index = None
if use_hnsw:
hnsw_index = get_hnsw_index(provider, translation, pre_loaded_embeddings)
# Process results
results = []
for i, (query_data, query_embedding) in enumerate(zip(query_data_list, query_embeddings)):
query = query_data.get("query", "")
expected = query_data.get("expected", "")
print(f"Processing query {i+1}/{len(query_texts)}: {query}")
# Use HNSW for similarity search if available, otherwise brute-force
if hnsw_index:
search_results = hnsw_index.search(query_embedding, k=3)
else:
# Calculate similarities with all verses
similarities = []
for embedding_data in pre_loaded_embeddings:
similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
similarities.append((embedding_data, similarity))
# Sort by similarity and get top 3
similarities.sort(key=lambda x: x[1], reverse=True)
search_results = similarities[:3]
# Calculate score if expected result is provided
score = 0
if expected:
score = calculate_score(expected, search_results)
expected_str = str(expected) if isinstance(expected, list) else expected
print(f" Expected: {expected_str} | Score: {score}/3")
# Format results
result_data = {
"query": query,
"expected": expected,
"score": score,
"results": []
}
for j, (verse_data, similarity) in enumerate(search_results):
result_ref = format_verse_reference(verse_data)
result_data["results"].append({
"reference": result_ref,
"similarity": similarity
})
print(f" {j+1}. {result_ref} ({similarity:.4f})")
results.append(result_data)
print(f"\nProcessed {len(results)} queries successfully")
return results
async def run_batch_queries_optimized(
queries: List[Dict[str, Any]],
provider: EmbeddingProvider,
translation: str,
batch_size: int = 100,
use_hnsw: bool = True
) -> List[Dict[str, Any]]:
"""Run batch queries optimized for providers that support batch embedding."""
total_queries = len(queries)
print(f"Running {total_queries} batch queries using batch optimization...")
# Load embeddings once for all queries
print("Loading verse embeddings for similarity search...")
pre_loaded_embeddings = load_embeddings_for_model(provider, translation)
if not pre_loaded_embeddings:
print("No embeddings found! Make sure embeddings exist for this model and translation.")
return []
print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings")
# Extract all query texts for batch embedding
query_texts = []
query_data_list = []
for query_data in queries:
query = query_data.get("query", "")
if query:
query_texts.append(query)
query_data_list.append(query_data)
if not query_texts:
print("No valid queries found!")
return []
# Embed all queries in batches using provider's batch API
print(f"Embedding {len(query_texts)} queries using batch API...")
# Adjust batch size based on provider type
if isinstance(provider, (HuggingFaceProvider, BGEM3Provider, BGEM3HybridProvider)):
batch_size = 500 # Local models can handle larger batches
elif isinstance(provider, (OpenAIProvider, GeminiProvider, VoyageProvider)):
batch_size = 100 # API models use smaller batches
query_embeddings = await provider.embed_queries_batch(query_texts, batch_size)
if len(query_embeddings) != len(query_texts):
print(f"Warning: Expected {len(query_texts)} embeddings, got {len(query_embeddings)}")
# Build HNSW index if requested
hnsw_index = None
if use_hnsw:
hnsw_index = get_hnsw_index(provider, translation, pre_loaded_embeddings)
# Process results
results = []
for i, (query_data, query_embedding) in enumerate(zip(query_data_list, query_embeddings)):
query = query_data.get("query", "")
expected = query_data.get("expected", "")
print(f"Processing query {i+1}/{len(query_texts)}: {query}")
# Use HNSW for similarity search if available, otherwise brute-force
if hnsw_index:
search_results = hnsw_index.search(query_embedding, k=3)
else:
# Calculate similarities with all verses
similarities = []
for embedding_data in pre_loaded_embeddings:
similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
similarities.append((embedding_data, similarity))
# Sort by similarity and get top 3
similarities.sort(key=lambda x: x[1], reverse=True)
search_results = similarities[:3]
# Calculate score if expected result is provided
score = 0
if expected:
score = calculate_score(expected, search_results)
expected_str = str(expected) if isinstance(expected, list) else expected
print(f" Expected: {expected_str} | Score: {score}/3")
# Format results
result_data = {
"query": query,
"expected": expected,
"score": score,
"results": []
}
for j, (verse_data, similarity) in enumerate(search_results):
result_ref = format_verse_reference(verse_data)
result_data["results"].append({
"reference": result_ref,
"similarity": similarity
})
print(f" {j+1}. {result_ref} ({similarity:.4f})")
results.append(result_data)
print(f"\nProcessed {len(results)} queries successfully")
return results
async def run_batch_queries(
queries: List[Dict[str, Any]],
provider: EmbeddingProvider,
translation: str,
concurrency: int = 5,
use_hnsw: bool = True
) -> List[Dict[str, Any]]:
"""Run all batch queries with provider-specific optimizations."""
# Check if provider supports batch optimization
if hasattr(provider, 'embed_queries_batch'):
provider_name = type(provider).__name__
print(f"Using {provider_name} batch optimization...")
return await run_batch_queries_optimized(queries, provider, translation, use_hnsw=use_hnsw)
# Fall back to standard concurrent processing for other providers
total_queries = len(queries)
print(f"Running {total_queries} batch queries with concurrency limit of {concurrency}...")
# Load embeddings once for all queries
print("Loading embeddings for all queries...")
pre_loaded_embeddings = load_embeddings_for_model(provider, translation)
if not pre_loaded_embeddings:
print("No embeddings found! Make sure embeddings exist for this model and translation.")
return []
print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings")
# Create semaphore to limit concurrent requests
semaphore = Semaphore(concurrency)
# Create tasks for all queries
tasks = []
for i, query_data in enumerate(queries, 1):
task = process_single_query(
query_data, provider, translation, i, total_queries, semaphore, pre_loaded_embeddings, use_hnsw
)
tasks.append(task)
# Run all tasks concurrently
print(f"Starting concurrent processing...")
results = await asyncio.gather(*tasks, return_exceptions=True)
# Filter out None results and exceptions
valid_results = []
for result in results:
if isinstance(result, Exception):
print(f"Exception in batch processing: {result}")
elif result is not None:
valid_results.append(result)
print(f"\nProcessed {len(valid_results)} queries successfully out of {total_queries}")
return valid_results
def write_to_csv(
results: List[Dict[str, Any]],
provider: EmbeddingProvider,
translation: str
) -> Path:
"""Write results to CSV file using new directory structure."""
provider_name = get_model_provider(provider)
model_name = provider.get_name()
# Create results directory structure
results_dir = Path("results") / provider_name
results_dir.mkdir(parents=True, exist_ok=True)
# Create CSV file path
results_file = results_dir / f"{model_name}.csv"
# Always overwrite the file (no appending)
with open(results_file, "w", newline="", encoding="utf-8") as f:
fieldnames = [
"timestamp", "provider", "model", "translation", "query", "expected",
"result1", "score1", "result2", "score2", "result3", "score3", "points"
]
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
timestamp = datetime.now().isoformat()
for result in results:
# Pad results to ensure we have 3 entries
padded_results = result["results"] + [{"reference": "", "similarity": 0.0}] * 3
padded_results = padded_results[:3] # Take only first 3
row = {
"timestamp": timestamp,
"provider": provider_name,
"model": model_name,
"translation": translation,
"query": result["query"],
"expected": result["expected"],
"result1": padded_results[0]["reference"],
"score1": f"{padded_results[0]['similarity']:.4f}",
"result2": padded_results[1]["reference"],
"score2": f"{padded_results[1]['similarity']:.4f}",
"result3": padded_results[2]["reference"],
"score3": f"{padded_results[2]['similarity']:.4f}",
"points": result["score"]
}
writer.writerow(row)
return results_file
async def batch_mode(args):
"""Batch query processing mode."""
print("Bible Verse Batch Query Mode")
print("=" * 35)
print()
# Load queries
queries_file = Path(args.queries_file)
queries = load_queries(queries_file)
print(f"Loaded {len(queries)} queries from {queries_file}")
# Select translation
if args.translation:
translation = args.translation
if translation not in get_available_translations():
print(f"Translation '{translation}' not found!")
return
else:
translation = select_translation_for_query()
print(f"Using translation: {translation}")
print()
# Select model
if args.model:
# Find the model in available models
provider = None
models = get_available_models()
for provider_info in models.values():
if args.model in provider_info["models"]:
provider = provider_info["provider_class"](args.model)
break
if provider is None:
print(f"Model '{args.model}' not found!")
return
else:
provider = select_model_for_query()
provider_name = get_model_provider(provider)
model_name = provider.get_name()
print(f"Using model: {model_name} ({provider_name})")
print()
# Run batch queries
use_hnsw = not args.no_hnsw
results = await run_batch_queries(queries, provider, translation, args.concurrency, use_hnsw)
if not results:
print("No results to save.")
return
# Calculate summary statistics
total_queries = len(results)
total_points = sum(r["score"] for r in results)
max_possible_points = total_queries * 3
accuracy = (total_points / max_possible_points) * 100 if max_possible_points > 0 else 0
print(f"\nBatch Query Summary:")
print(f" Total queries: {total_queries}")
print(f" Total points: {total_points}/{max_possible_points}")
print(f" Accuracy: {accuracy:.1f}%")
# Save to CSV using new structure
results_file = write_to_csv(results, provider, translation)
print(f"\nResults written to: {results_file}")
def get_models_without_results(translation: str) -> List[tuple]:
"""Get list of (provider_name, model_name, provider_class) for models without results."""
models = get_available_models()
results_dir = Path("results")
models_without_results = []
for provider_name, provider_info in models.items():
provider_class = provider_info["provider_class"]
for model_name in provider_info["models"]:
# Create a provider instance to get the correct directory/file names
provider_instance = provider_class(model_name)
provider_dir = get_model_provider(provider_instance)
model_file_name = provider_instance.get_name()
# Check if results file exists
results_file = results_dir / provider_dir / f"{model_file_name}.csv"
if not results_file.exists():
models_without_results.append((provider_name, model_name, provider_class))
return models_without_results
def get_models_without_embeddings(translation: str, include_providers=None, exclude_providers=None) -> List[tuple]:
"""Get list of (provider_name, model_name, provider_class) for models without embeddings."""
models = get_available_models()
models_without_embeddings = []
for provider_name, provider_info in models.items():
# Apply provider filtering
if include_providers and provider_name not in include_providers:
continue
if exclude_providers and provider_name in exclude_providers:
continue
provider_class = provider_info["provider_class"]
for model_name in provider_info["models"]:
# Create a provider instance to check embeddings
provider_instance = provider_class(model_name)
if not check_embeddings_exist(provider_instance, translation):
models_without_embeddings.append((provider_name, model_name, provider_class))
return models_without_embeddings
def check_embeddings_exist(provider: EmbeddingProvider, translation: str) -> bool:
"""Check if embeddings exist for a given provider and translation."""
provider_name = get_model_provider(provider)
model_name = provider.get_name()
embeddings_dir = Path("embeddings") / provider_name / model_name
if not embeddings_dir.exists():
return False
# Check if there are any embedding files
for book_dir in embeddings_dir.iterdir():
if book_dir.is_dir() and any(book_dir.glob("*.json")):
return True
return False
async def autobatch_mode(args):
"""Automatically run batch processing for all models without results."""
print("Bible Verse Auto-Batch Processing Mode")
print("=" * 40)
print()
# Load queries
queries_file = Path(args.queries_file)
queries = load_queries(queries_file)
print(f"Loaded {len(queries)} queries from {queries_file}")
# Select translation
if args.translation:
translation = args.translation
if translation not in get_available_translations():
print(f"Translation '{translation}' not found!")
return
else:
translation = select_translation_for_query()
print(f"Using translation: {translation}")
print()
# Get models without results
models_without_results = get_models_without_results(translation)
if not models_without_results:
print("All models already have results! No processing needed.")
return
print(f"Found {len(models_without_results)} models without results:")
# Filter models that have embeddings
processable_models = []
for provider_name, model_name, provider_class in models_without_results:
provider_instance = provider_class(model_name)
if check_embeddings_exist(provider_instance, translation):
processable_models.append((provider_name, model_name, provider_class))
print(f" ✅ {provider_name}/{model_name} - has embeddings")
else:
print(f" ❌ {provider_name}/{model_name} - missing embeddings (skipping)")
if not processable_models:
print("\nNo models with embeddings found. Run embedding generation first!")
return
print(f"\nWill process {len(processable_models)} models with embeddings.")
if args.dry_run:
print("\nDry run complete. Use --no-dry-run or remove --dry-run to actually process.")
return
print()
# Process each model
successful_models = 0
failed_models = 0
for i, (provider_name, model_name, provider_class) in enumerate(processable_models, 1):
print(f"Processing model {i}/{len(processable_models)}: {provider_name}/{model_name}")
print("-" * 60)
try:
provider = provider_class(model_name)
# Run batch queries
use_hnsw = not args.no_hnsw
results = await run_batch_queries(queries, provider, translation, args.concurrency, use_hnsw)
if results:
# Save results
results_file = write_to_csv(results, provider, translation)
# Calculate summary statistics
total_queries = len(results)
total_points = sum(r["score"] for r in results)
max_possible_points = total_queries * 3
accuracy = (total_points / max_possible_points) * 100 if max_possible_points > 0 else 0
print(f" ✅ Completed: {accuracy:.1f}% accuracy ({total_points}/{max_possible_points} points)")
print(f" 📁 Results saved to: {results_file}")
successful_models += 1
else:
print(f" ❌ Failed: No results generated")
failed_models += 1
except Exception as e:
print(f" ❌ Failed with error: {e}")
failed_models += 1
print()
# Summary
print("=" * 60)
print("Auto-batch processing complete!")
print(f" ✅ Successful: {successful_models} models")
if failed_models > 0:
print(f" ❌ Failed: {failed_models} models")
print(f" 📊 Total processed: {successful_models + failed_models} models")
if successful_models > 0:
print(f"\n💡 Run 'python main.py report' to generate an updated report with all results.")
async def autoembed_mode(args):
"""Automatically generate embeddings for all models without embeddings."""
print("Bible Verse Auto-Embedding Generation Mode")
print("=" * 45)
print()
# Select translation
if args.translation:
translation = args.translation
if translation not in get_available_translations():
print(f"Translation '{translation}' not found!")
return
else:
translation = select_translation()
print(f"Using translation: {translation}")
print()
# Load verses for the translation
print(f"Loading verses for translation: {translation}")
try:
verses = load_translation_text(translation)
print(f"Loaded {len(verses)} verses")
except FileNotFoundError as e:
print(f"Error loading translation: {e}")
return
# Get models without embeddings
models_without_embeddings = get_models_without_embeddings(
translation,
include_providers=args.include_providers,
exclude_providers=args.exclude_providers
)
if not models_without_embeddings:
print("All models already have embeddings! No processing needed.")
return
print(f"Found {len(models_without_embeddings)} models without embeddings:")
for provider_name, model_name, _ in models_without_embeddings:
print(f" - {provider_name}/{model_name}")
if args.dry_run:
print(f"\nDry run complete. Found {len(models_without_embeddings)} models that need embeddings.")
print("Use --no-dry-run or remove --dry-run to actually generate embeddings.")
return
print()
# Process each model
successful_models = 0
failed_models = 0
for i, (provider_name, model_name, provider_class) in enumerate(models_without_embeddings, 1):
print(f"Processing model {i}/{len(models_without_embeddings)}: {provider_name}/{model_name}")
print("-" * 60)
try:
# Create provider instance
provider = provider_class(model_name)
# Create output directories
create_output_directories(verses, provider)
# Check for existing embeddings and skip if requested
verses_to_process = verses
if args.skip_existing:
original_count = len(verses)
verses_to_process = check_existing_embeddings(translation, provider, verses)
skipped_count = original_count - len(verses_to_process)
if skipped_count > 0:
print(f" Skipping {skipped_count} verses with existing embeddings")
if not verses_to_process:
print(f" ✅ All verses already have embeddings for this model")
successful_models += 1
continue
# Generate embeddings
print(f" Generating embeddings for {len(verses_to_process)} verses...")
await generate_embeddings(translation, provider, verses_to_process, args.batch_size)
print(f" ✅ Completed: Generated embeddings for {len(verses_to_process)} verses")
successful_models += 1
except Exception as e:
print(f" ❌ Failed with error: {e}")
failed_models += 1
print()
# Summary
print("=" * 60)
print("Auto-embedding generation complete!")
print(f" ✅ Successful: {successful_models} models")
if failed_models > 0:
print(f" ❌ Failed: {failed_models} models")
print(f" 📊 Total processed: {successful_models + failed_models} models")
if successful_models > 0:
print(f"\n💡 New embeddings have been generated. You can now:")
print(f" - Use 'uv run main.py query' to search verses")
print(f" - Use 'uv run main.py autobatch' to evaluate all models")
print(f" - Use 'uv run main.py report' to generate updated reports")
def read_all_results_csv(results_dir: Path = None) -> List[Dict[str, Any]]:
"""Read and parse results from all CSV files in the results directory structure."""
if results_dir is None:
results_dir = Path("results")
if not results_dir.exists():
print(f"Results directory not found: {results_dir}")
return []
all_results = []
# Walk through the directory structure: results/{provider}/{model}.csv
for provider_dir in results_dir.iterdir():
if not provider_dir.is_dir():
continue
print(f"Reading results from provider: {provider_dir.name}")
for csv_file in provider_dir.glob("*.csv"):
print(f" Reading: {csv_file}")
try:
with open(csv_file, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
# Convert numeric fields
try:
row["points"] = int(row["points"])
except (ValueError, KeyError):
row["points"] = 0
# Convert similarity scores to float
for score_field in ["score1", "score2", "score3"]:
try:
if score_field in row and row[score_field]:
row[score_field] = float(row[score_field])
else:
row[score_field] = 0.0
except (ValueError, TypeError):
row[score_field] = 0.0
all_results.append(row)
except Exception as e:
print(f" Error reading CSV file {csv_file}: {e}")
continue
print(f"Total results loaded: {len(all_results)}")
return all_results
def read_results_csv(results_file: Path) -> List[Dict[str, Any]]:
"""Read and parse results from a single CSV file (backward compatibility)."""
if not results_file.exists():
print(f"Results file not found: {results_file}")
return []
results = []
try:
with open(results_file, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
# Convert numeric fields
try:
row["points"] = int(row["points"])
except (ValueError, KeyError):
row["points"] = 0
# Convert similarity scores to float
for score_field in ["score1", "score2", "score3"]:
try:
if score_field in row and row[score_field]:
row[score_field] = float(row[score_field])
else:
row[score_field] = 0.0
except (ValueError, TypeError):
row[score_field] = 0.0
results.append(row)
except Exception as e:
print(f"Error reading CSV file {results_file}: {e}")
return []
return results
def analyze_results(results: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Analyze results and generate summary statistics."""
if not results:
return {}
# Group by provider and model
provider_model_stats = {}
for result in results:
provider = result.get("provider", "unknown")
model = result.get("model", "unknown")
translation = result.get("translation", "unknown")
points = result.get("points", 0)
key = (provider, model, translation)
if key not in provider_model_stats:
provider_model_stats[key] = {
"provider": provider,
"model": model,
"translation": translation,
"total_queries": 0,
"total_points": 0,
"max_points": 0,
"correct_top1": 0, # 3 points
"correct_top2": 0, # 2 points
"correct_top3": 0, # 1 point
"incorrect": 0, # 0 points
"queries": []
}
stats = provider_model_stats[key]
stats["total_queries"] += 1
stats["total_points"] += points
stats["max_points"] += 3 # Maximum possible points per query
# Count by position
if points == 3:
stats["correct_top1"] += 1
elif points == 2:
stats["correct_top2"] += 1
elif points == 1:
stats["correct_top3"] += 1
else:
stats["incorrect"] += 1
# Store query details for detailed table
stats["queries"].append({
"query": result.get("query", ""),
"expected": result.get("expected", ""),
"result1": result.get("result1", ""),
"score1": result.get("score1", "0.0000"),
"result2": result.get("result2", ""),
"score2": result.get("score2", "0.0000"),
"result3": result.get("result3", ""),
"score3": result.get("score3", "0.0000"),
"points": points
})
# Calculate accuracy percentages
for stats in provider_model_stats.values():
if stats["max_points"] > 0:
stats["accuracy"] = (stats["total_points"] / stats["max_points"]) * 100
else:
stats["accuracy"] = 0
return provider_model_stats
def generate_summary_table(stats: Dict[tuple, Dict[str, Any]]) -> str:
"""Generate summary table markdown."""
if not stats:
return "No results to display.\n\n"
markdown = "## Model Performance Summary\n\n"
markdown += "| Provider | Model | Translation | Accuracy | Top 1 | Top 2 | Top 3 | Failed | Total |\n"
markdown += "|----------|-------|-------------|----------|-------|-------|-------|-----------|-------|\n"
# Sort by accuracy (descending)
sorted_stats = sorted(stats.items(), key=lambda x: x[1]["accuracy"], reverse=True)
for (provider, model, translation), stat in sorted_stats:
markdown += f"| {provider} | {model} | {translation} | "
markdown += f"{stat['accuracy']:.1f}% | "
markdown += f"{stat['correct_top1']} | "
markdown += f"{stat['correct_top2']} | "
markdown += f"{stat['correct_top3']} | "
markdown += f"{stat['incorrect']} | "
markdown += f"{stat['total_queries']} |\n"
markdown += "\n"
return markdown
def generate_detailed_table(stats: Dict[tuple, Dict[str, Any]], max_queries: int = 10) -> str:
"""Generate detailed results with individual sections for each model."""
if not stats:
return ""
markdown = "## Results\n\n"
markdown += "✅ denotes accurate result.\n\n"
# Sort models by accuracy (best first)
sorted_stats = sorted(stats.items(), key=lambda x: x[1]["accuracy"], reverse=True)
for (provider, model, translation), stat in sorted_stats:
# Create model heading
model_display = f"{provider}/{model}" if provider != "HuggingFace" else model
markdown += f"### {model_display} ({translation.upper()})\n\n"
# Add summary statistics
accuracy = stat["accuracy"]
total_points = stat["total_points"]
max_points = stat["max_points"]
total_queries = len(stat["queries"])
markdown += f"**Accuracy: {accuracy:.1f}%** ({total_points}/{max_points} points across {total_queries} queries)\n\n"
# Create table for this model
markdown += "| Query | Expected | Top Result | Score | ✓ |\n"
markdown += "|-------|----------|------------|-------|---|\n"
# Show up to max_queries results
queries = stat["queries"][:max_queries]
for query_result in queries:
query = query_result["query"]
# Truncate long queries for table readability
display_query = query[:50] + "..." if len(query) > 50 else query
expected = query_result["expected"]
# Get the top result
if "result1" in query_result and query_result["result1"]:
result1 = query_result["result1"]
score1 = query_result.get("score1", 0.0)
points = query_result.get("points", 0)
# Add checkmark based on points
if points == 3:
checkmark = "✅" # Perfect match (1st place)
elif points == 2:
checkmark = "⚠️" # Good match (2nd place)
elif points == 1:
checkmark = "❌" # Poor match (3rd place)
else:
checkmark = "❌" # No match
markdown += f"| {display_query} | {expected} | {result1} | {score1:.4f} | {checkmark} |\n"
else:
markdown += f"| {display_query} | {expected} | No results | 0.0000 | ❌ |\n"
if len(stat["queries"]) > max_queries:
remaining = len(stat["queries"]) - max_queries
markdown += f"\n*... and {remaining} more queries*\n"
markdown += "\n"
return markdown
def generate_report(args):
"""Generate markdown report and update only the Query Examples section in README.md."""
print("Bible Verse Embedding Evaluation Report Generator")
print("=" * 55)
print()
output_file = Path(args.output_file)
# Check if args.results_file is provided and is a specific file
if hasattr(args, 'results_file') and args.results_file:
# Read from specific file (backward compatibility)
results_file = Path(args.results_file)
print(f"Reading results from specific file: {results_file}")
results = read_results_csv(results_file)
else:
# Read from all files in the results directory structure
print("Reading results from directory structure: ./results/")
results = read_all_results_csv()
if not results:
print("No results found to process.")
return
print(f"Loaded {len(results)} result entries")
# Analyze results
print("Analyzing results...")
stats = analyze_results(results)
if not stats:
print("No statistics to generate.")
return
print(f"Found results for {len(stats)} model/provider combinations")
# Read existing README.md file
try:
with open(output_file, "r", encoding="utf-8") as f:
existing_content = f.read()
except FileNotFoundError:
print(f"README.md not found at {output_file}, creating new file")
existing_content = "# Bible Embeddings\n\n"
# First, remove any existing Legend sections from the content
import re
# Remove all Legend sections (including content until next ## section or end)
legend_pattern = r'## Legend\n\n.*?(?=\n## |\Z)'
existing_content = re.sub(legend_pattern, '', existing_content, flags=re.DOTALL)
# Find the Results section (or Query Examples for backward compatibility) and replace it
results_section_start = existing_content.find("## Results")
if results_section_start == -1:
results_section_start = existing_content.find("## Query Examples")
if results_section_start == -1:
# No Results section found, append to end
print("No existing Results section found, appending to end")
new_results_section = generate_detailed_table(stats, args.max_queries)
# Add legend at the end
new_results_section += "## Legend\n\n"
new_results_section += "- ✅ **Perfect Match** - Expected result appears as #1 result (3 points)\n"
new_results_section += "- ⚠️ **Good Match** - Expected result appears as #2 result (2 points)\n"
new_results_section += "- ❌ **Poor/No Match** - Expected result appears as #3 result (1 point) or not in top 3 (0 points)\n"
new_content = existing_content.rstrip() + "\n\n" + new_results_section
else:
# Find the end of the Results section
next_section_start = existing_content.find("\n## ", results_section_start + 1)
if next_section_start == -1:
# Results is the last section
before_section = existing_content[:results_section_start]
after_section = ""
else:
before_section = existing_content[:results_section_start]
after_section = existing_content[next_section_start:]
# Replace the Results section
new_results_section = generate_detailed_table(stats, args.max_queries)
# Add legend at the end
new_results_section += "## Legend\n\n"
new_results_section += "- ✅ **Perfect Match** - Expected result appears as #1 result (3 points)\n"
new_results_section += "- ⚠️ **Good Match** - Expected result appears as #2 result (2 points)\n"
new_results_section += "- ❌ **Poor/No Match** - Expected result appears as #3 result (1 point) or not in top 3 (0 points)\n"
new_content = before_section + new_results_section + after_section
# Write updated content to file
try:
with open(output_file, "w", encoding="utf-8") as f:
f.write(new_content)
print(f"\nResults section updated in: {output_file}")
except Exception as e:
print(f"Error writing report file: {e}")
if __name__ == "__main__":
asyncio.run(main())