|
|
|
|
|
""" |
|
|
Interactive Bible verse embedding generator. |
|
|
|
|
|
This script generates embeddings for Bible verses using various embedding models. |
|
|
It supports both commercial APIs and open-source models. |
|
|
""" |
|
|
|
|
|
import os |
|
|
|
|
|
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
import asyncio |
|
|
from asyncio import Semaphore |
|
|
from pathlib import Path |
|
|
from typing import List, Dict, Any, Optional, Tuple |
|
|
from abc import ABC, abstractmethod |
|
|
import argparse |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
|
from datetime import datetime |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class EmbeddingProvider(ABC): |
|
|
"""Abstract base class for embedding providers.""" |
|
|
|
|
|
@abstractmethod |
|
|
def get_name(self) -> str: |
|
|
"""Return the model name for file naming.""" |
|
|
pass |
|
|
|
|
|
@abstractmethod |
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
"""Generate embedding for a single text.""" |
|
|
pass |
|
|
|
|
|
@abstractmethod |
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
"""Generate embeddings for a batch of texts.""" |
|
|
pass |
|
|
|
|
|
async def embed_query(self, query: str) -> List[float]: |
|
|
"""Generate embedding for a query. Base implementation falls back to embed_text.""" |
|
|
return await self.embed_text(query) |
|
|
|
|
|
|
|
|
class OpenAIProvider(EmbeddingProvider): |
|
|
"""OpenAI embedding provider.""" |
|
|
|
|
|
def __init__(self, model_name: str = "text-embedding-3-small"): |
|
|
self.model_name = model_name |
|
|
self.client = None |
|
|
|
|
|
def get_name(self) -> str: |
|
|
return self.model_name |
|
|
|
|
|
async def _get_client(self): |
|
|
if self.client is None: |
|
|
from openai import AsyncOpenAI |
|
|
|
|
|
self.client = AsyncOpenAI() |
|
|
return self.client |
|
|
|
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
embeddings = await self.embed_batch([text]) |
|
|
return embeddings[0] |
|
|
|
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
client = await self._get_client() |
|
|
response = await client.embeddings.create(input=texts, model=self.model_name) |
|
|
|
|
|
return [data.embedding for data in response.data] |
|
|
|
|
|
async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]: |
|
|
"""Efficiently embed multiple queries using OpenAI's batch API capability.""" |
|
|
all_embeddings = [] |
|
|
|
|
|
|
|
|
for i in range(0, len(queries), batch_size): |
|
|
batch = queries[i:i + batch_size] |
|
|
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)") |
|
|
|
|
|
try: |
|
|
batch_embeddings = await self.embed_batch(batch) |
|
|
all_embeddings.extend(batch_embeddings) |
|
|
except Exception as e: |
|
|
print(f"Error in batch embedding: {e}") |
|
|
|
|
|
for query in batch: |
|
|
try: |
|
|
embedding = await self.embed_text(query) |
|
|
all_embeddings.append(embedding) |
|
|
except Exception as e2: |
|
|
print(f"Error embedding query '{query}': {e2}") |
|
|
|
|
|
all_embeddings.append([0.0] * 1536) |
|
|
|
|
|
return all_embeddings |
|
|
|
|
|
|
|
|
class GeminiProvider(EmbeddingProvider): |
|
|
"""Google Gemini embedding provider.""" |
|
|
|
|
|
def __init__(self, model_name: str = "text-embedding-004"): |
|
|
self.model_name = model_name |
|
|
self.client = None |
|
|
|
|
|
def get_name(self) -> str: |
|
|
return self.model_name |
|
|
|
|
|
async def _get_client(self): |
|
|
if self.client is None: |
|
|
import google.generativeai as genai |
|
|
|
|
|
self.client = genai |
|
|
genai.configure() |
|
|
return self.client |
|
|
|
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
embeddings = await self.embed_batch([text]) |
|
|
return embeddings[0] |
|
|
|
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
client = await self._get_client() |
|
|
result = client.embed_content(model=f"models/{self.model_name}", content=texts) |
|
|
|
|
|
|
|
|
|
|
|
if isinstance(result, dict) and 'embedding' in result: |
|
|
embeddings = result['embedding'] |
|
|
|
|
|
|
|
|
if isinstance(embeddings, list): |
|
|
|
|
|
if len(embeddings) > 0: |
|
|
|
|
|
if isinstance(embeddings[0], (list, tuple)) and all(isinstance(x, (int, float)) for x in embeddings[0][:5]): |
|
|
return embeddings |
|
|
|
|
|
elif isinstance(embeddings[0], dict) and 'embedding' in embeddings[0]: |
|
|
return [emb['embedding'] for emb in embeddings] |
|
|
|
|
|
elif hasattr(embeddings[0], 'embedding'): |
|
|
return [emb.embedding for emb in embeddings] |
|
|
else: |
|
|
|
|
|
return [embeddings] * len(texts) |
|
|
else: |
|
|
return [] |
|
|
|
|
|
elif isinstance(embeddings, (list, tuple)): |
|
|
return [embeddings] |
|
|
else: |
|
|
return [] |
|
|
else: |
|
|
raise ValueError(f"Unexpected Gemini API response format: {type(result)}") |
|
|
|
|
|
async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]: |
|
|
"""Efficiently embed multiple queries using Gemini's batch capability.""" |
|
|
all_embeddings = [] |
|
|
|
|
|
for i in range(0, len(queries), batch_size): |
|
|
batch = queries[i:i + batch_size] |
|
|
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)") |
|
|
|
|
|
try: |
|
|
batch_embeddings = await self.embed_batch(batch) |
|
|
all_embeddings.extend(batch_embeddings) |
|
|
except Exception as e: |
|
|
print(f"Error in batch embedding: {e}") |
|
|
|
|
|
for query in batch: |
|
|
try: |
|
|
embedding = await self.embed_text(query) |
|
|
all_embeddings.append(embedding) |
|
|
except Exception as e2: |
|
|
print(f"Error embedding query '{query}': {e2}") |
|
|
|
|
|
all_embeddings.append([0.0] * 768) |
|
|
|
|
|
return all_embeddings |
|
|
|
|
|
|
|
|
class VoyageProvider(EmbeddingProvider): |
|
|
"""Voyage AI embedding provider.""" |
|
|
|
|
|
def __init__(self, model_name: str = "voyage-3"): |
|
|
self.model_name = model_name |
|
|
self.client = None |
|
|
|
|
|
def get_name(self) -> str: |
|
|
return self.model_name |
|
|
|
|
|
async def _get_client(self): |
|
|
if self.client is None: |
|
|
import voyageai |
|
|
|
|
|
self.client = voyageai.AsyncClient() |
|
|
return self.client |
|
|
|
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
embeddings = await self.embed_batch([text]) |
|
|
return embeddings[0] |
|
|
|
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
client = await self._get_client() |
|
|
response = await client.embed(texts, model=self.model_name) |
|
|
|
|
|
return response.embeddings |
|
|
|
|
|
async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]: |
|
|
"""Efficiently embed multiple queries using Voyage's batch capability.""" |
|
|
all_embeddings = [] |
|
|
|
|
|
for i in range(0, len(queries), batch_size): |
|
|
batch = queries[i:i + batch_size] |
|
|
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)") |
|
|
|
|
|
try: |
|
|
batch_embeddings = await self.embed_batch(batch) |
|
|
all_embeddings.extend(batch_embeddings) |
|
|
except Exception as e: |
|
|
print(f"Error in batch embedding: {e}") |
|
|
|
|
|
for query in batch: |
|
|
try: |
|
|
embedding = await self.embed_text(query) |
|
|
all_embeddings.append(embedding) |
|
|
except Exception as e2: |
|
|
print(f"Error embedding query '{query}': {e2}") |
|
|
|
|
|
all_embeddings.append([0.0] * 1024) |
|
|
|
|
|
return all_embeddings |
|
|
|
|
|
|
|
|
class HuggingFaceProvider(EmbeddingProvider): |
|
|
"""HuggingFace sentence-transformers provider.""" |
|
|
|
|
|
def __init__(self, model_name: str): |
|
|
self.model_name = model_name |
|
|
self.model = None |
|
|
self.query_prompt_config = self._get_query_prompt_config() |
|
|
|
|
|
def get_name(self) -> str: |
|
|
|
|
|
return self.model_name.replace("/", "-").replace("_", "-") |
|
|
|
|
|
def _get_query_prompt_config(self) -> dict: |
|
|
"""Get query prompt configuration for specific models.""" |
|
|
|
|
|
if "Qwen3-Embedding" in self.model_name: |
|
|
return { |
|
|
"enabled": True, |
|
|
"prompt_name": "query", |
|
|
"instruction_template": "Instruct: Given a biblical passage, retrieve verses that are semantically related or answer the query\nQuery: {query}" |
|
|
} |
|
|
elif "inf-retriever-v1" in self.model_name: |
|
|
return { |
|
|
"enabled": True, |
|
|
"prompt_name": "query" |
|
|
} |
|
|
elif "jina-embeddings-v4" in self.model_name: |
|
|
return { |
|
|
"enabled": True, |
|
|
"prompt_name": "query", |
|
|
"task": "retrieval" |
|
|
} |
|
|
elif "e5-" in self.model_name.lower(): |
|
|
|
|
|
return { |
|
|
"enabled": True, |
|
|
"query_prefix": "query: ", |
|
|
"document_prefix": "passage: " |
|
|
} |
|
|
|
|
|
return {"enabled": False} |
|
|
|
|
|
def _get_model(self): |
|
|
if self.model is None: |
|
|
import os |
|
|
from sentence_transformers import SentenceTransformer |
|
|
|
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN") |
|
|
|
|
|
|
|
|
|
|
|
if "jinaai" in self.model_name or "nomic-ai" in self.model_name: |
|
|
self.model = SentenceTransformer( |
|
|
self.model_name, |
|
|
trust_remote_code=True, |
|
|
token=hf_token |
|
|
|
|
|
) |
|
|
else: |
|
|
|
|
|
try: |
|
|
self.model = SentenceTransformer( |
|
|
self.model_name, |
|
|
local_files_only=True |
|
|
) |
|
|
except Exception: |
|
|
|
|
|
print(f"Model {self.model_name} not found offline, downloading...") |
|
|
self.model = SentenceTransformer( |
|
|
self.model_name, |
|
|
token=hf_token |
|
|
) |
|
|
return self.model |
|
|
|
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
model = self._get_model() |
|
|
|
|
|
|
|
|
if self.query_prompt_config.get("enabled") and "document_prefix" in self.query_prompt_config: |
|
|
prefixed_text = self.query_prompt_config["document_prefix"] + text |
|
|
embedding = model.encode([prefixed_text], normalize_embeddings=True)[0] |
|
|
elif self.query_prompt_config.get("enabled") and "prompt_name" in self.query_prompt_config and "jina-embeddings-v4" in self.model_name: |
|
|
|
|
|
encode_kwargs = { |
|
|
"normalize_embeddings": True, |
|
|
"prompt_name": "passage" |
|
|
} |
|
|
if "task" in self.query_prompt_config: |
|
|
encode_kwargs["task"] = self.query_prompt_config["task"] |
|
|
|
|
|
embedding = model.encode([text], **encode_kwargs)[0] |
|
|
else: |
|
|
embedding = model.encode([text], normalize_embeddings=True)[0] |
|
|
|
|
|
return embedding.tolist() |
|
|
|
|
|
async def embed_query(self, query: str) -> List[float]: |
|
|
"""Embed a query with model-specific prompting if needed.""" |
|
|
model = self._get_model() |
|
|
|
|
|
if self.query_prompt_config["enabled"]: |
|
|
|
|
|
if "prompt_name" in self.query_prompt_config: |
|
|
|
|
|
encode_kwargs = { |
|
|
"normalize_embeddings": True, |
|
|
"prompt_name": self.query_prompt_config["prompt_name"] |
|
|
} |
|
|
if "task" in self.query_prompt_config: |
|
|
encode_kwargs["task"] = self.query_prompt_config["task"] |
|
|
|
|
|
embedding = model.encode([query], **encode_kwargs)[0] |
|
|
elif "instruction_template" in self.query_prompt_config: |
|
|
|
|
|
formatted_query = self.query_prompt_config["instruction_template"].format(query=query) |
|
|
embedding = model.encode([formatted_query], normalize_embeddings=True)[0] |
|
|
elif "query_prefix" in self.query_prompt_config: |
|
|
|
|
|
prefixed_query = self.query_prompt_config["query_prefix"] + query |
|
|
embedding = model.encode([prefixed_query], normalize_embeddings=True)[0] |
|
|
else: |
|
|
|
|
|
embedding = model.encode([query], normalize_embeddings=True)[0] |
|
|
else: |
|
|
|
|
|
embedding = model.encode([query], normalize_embeddings=True)[0] |
|
|
|
|
|
return embedding.tolist() |
|
|
|
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
model = self._get_model() |
|
|
|
|
|
|
|
|
if self.query_prompt_config.get("enabled") and "document_prefix" in self.query_prompt_config: |
|
|
prefixed_texts = [ |
|
|
self.query_prompt_config["document_prefix"] + text |
|
|
for text in texts |
|
|
] |
|
|
embeddings = model.encode(prefixed_texts, normalize_embeddings=True) |
|
|
elif self.query_prompt_config.get("enabled") and "prompt_name" in self.query_prompt_config and "jina-embeddings-v4" in self.model_name: |
|
|
|
|
|
encode_kwargs = { |
|
|
"normalize_embeddings": True, |
|
|
"prompt_name": "passage" |
|
|
} |
|
|
if "task" in self.query_prompt_config: |
|
|
encode_kwargs["task"] = self.query_prompt_config["task"] |
|
|
|
|
|
embeddings = model.encode(texts, **encode_kwargs) |
|
|
else: |
|
|
embeddings = model.encode(texts, normalize_embeddings=True) |
|
|
|
|
|
return [emb.tolist() for emb in embeddings] |
|
|
|
|
|
async def embed_queries_batch_internal(self, queries: List[str]) -> List[List[float]]: |
|
|
"""Embed a batch of queries with model-specific prompting if needed.""" |
|
|
model = self._get_model() |
|
|
|
|
|
if self.query_prompt_config["enabled"]: |
|
|
|
|
|
if "prompt_name" in self.query_prompt_config: |
|
|
|
|
|
encode_kwargs = { |
|
|
"normalize_embeddings": True, |
|
|
"prompt_name": self.query_prompt_config["prompt_name"] |
|
|
} |
|
|
if "task" in self.query_prompt_config: |
|
|
encode_kwargs["task"] = self.query_prompt_config["task"] |
|
|
|
|
|
embeddings = model.encode(queries, **encode_kwargs) |
|
|
elif "instruction_template" in self.query_prompt_config: |
|
|
|
|
|
formatted_queries = [ |
|
|
self.query_prompt_config["instruction_template"].format(query=query) |
|
|
for query in queries |
|
|
] |
|
|
embeddings = model.encode(formatted_queries, normalize_embeddings=True) |
|
|
elif "query_prefix" in self.query_prompt_config: |
|
|
|
|
|
prefixed_queries = [ |
|
|
self.query_prompt_config["query_prefix"] + query |
|
|
for query in queries |
|
|
] |
|
|
embeddings = model.encode(prefixed_queries, normalize_embeddings=True) |
|
|
else: |
|
|
|
|
|
embeddings = model.encode(queries, normalize_embeddings=True) |
|
|
else: |
|
|
|
|
|
embeddings = model.encode(queries, normalize_embeddings=True) |
|
|
|
|
|
return [emb.tolist() for emb in embeddings] |
|
|
|
|
|
async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]: |
|
|
"""Efficiently embed multiple queries using HuggingFace's batch capability with model-specific prompting.""" |
|
|
all_embeddings = [] |
|
|
|
|
|
|
|
|
for i in range(0, len(queries), batch_size): |
|
|
batch = queries[i:i + batch_size] |
|
|
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)") |
|
|
|
|
|
try: |
|
|
batch_embeddings = await self.embed_queries_batch_internal(batch) |
|
|
all_embeddings.extend(batch_embeddings) |
|
|
except Exception as e: |
|
|
print(f"Error in batch embedding: {e}") |
|
|
|
|
|
for query in batch: |
|
|
try: |
|
|
embedding = await self.embed_query(query) |
|
|
all_embeddings.append(embedding) |
|
|
except Exception as e2: |
|
|
print(f"Error embedding query '{query}': {e2}") |
|
|
|
|
|
model = self._get_model() |
|
|
dim = getattr(model, 'get_sentence_embedding_dimension', lambda: 384)() |
|
|
all_embeddings.append([0.0] * dim) |
|
|
|
|
|
return all_embeddings |
|
|
|
|
|
|
|
|
class EmbeddingGemmaProvider(EmbeddingProvider): |
|
|
"""Google EmbeddingGemma provider with specialized encode_document method.""" |
|
|
|
|
|
def __init__(self, model_name: str): |
|
|
self.model_name = model_name |
|
|
self.model = None |
|
|
|
|
|
def get_name(self) -> str: |
|
|
|
|
|
return self.model_name.replace("/", "-").replace("_", "-") |
|
|
|
|
|
def _get_model(self): |
|
|
if self.model is None: |
|
|
import os |
|
|
from sentence_transformers import SentenceTransformer |
|
|
|
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN") |
|
|
|
|
|
|
|
|
self.model = SentenceTransformer( |
|
|
self.model_name, |
|
|
trust_remote_code=True, |
|
|
token=hf_token |
|
|
) |
|
|
return self.model |
|
|
|
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
model = self._get_model() |
|
|
|
|
|
embedding = model.encode_document([text])[0] |
|
|
return embedding.tolist() |
|
|
|
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
model = self._get_model() |
|
|
|
|
|
embeddings = model.encode_document(texts) |
|
|
return [emb.tolist() for emb in embeddings] |
|
|
|
|
|
async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]: |
|
|
"""Efficiently embed multiple queries using EmbeddingGemma's encode_query method.""" |
|
|
all_embeddings = [] |
|
|
|
|
|
for i in range(0, len(queries), batch_size): |
|
|
batch = queries[i:i + batch_size] |
|
|
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)") |
|
|
|
|
|
try: |
|
|
model = self._get_model() |
|
|
|
|
|
batch_embeddings = model.encode_query(batch) |
|
|
all_embeddings.extend([emb.tolist() for emb in batch_embeddings]) |
|
|
except Exception as e: |
|
|
print(f"Error in batch embedding: {e}") |
|
|
|
|
|
for query in batch: |
|
|
try: |
|
|
model = self._get_model() |
|
|
embedding = model.encode_query([query])[0] |
|
|
all_embeddings.append(embedding.tolist()) |
|
|
except Exception as e2: |
|
|
print(f"Error embedding query '{query}': {e2}") |
|
|
|
|
|
all_embeddings.append([0.0] * 768) |
|
|
|
|
|
return all_embeddings |
|
|
|
|
|
|
|
|
class JinaProvider(EmbeddingProvider): |
|
|
"""Jina AI embedding provider.""" |
|
|
|
|
|
def __init__(self, model_name: str = "jina-embeddings-v3"): |
|
|
self.model_name = model_name |
|
|
self.client = None |
|
|
|
|
|
def get_name(self) -> str: |
|
|
return self.model_name |
|
|
|
|
|
async def _get_client(self): |
|
|
if self.client is None: |
|
|
from jina import Client |
|
|
|
|
|
self.client = Client() |
|
|
return self.client |
|
|
|
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
embeddings = await self.embed_batch([text]) |
|
|
return embeddings[0] |
|
|
|
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
|
|
|
|
|
|
hf_provider = HuggingFaceProvider(f"jinaai/{self.model_name}") |
|
|
return await hf_provider.embed_batch(texts) |
|
|
|
|
|
|
|
|
class BGEM3Provider(EmbeddingProvider): |
|
|
"""BGE-M3 provider supporting dense mode.""" |
|
|
|
|
|
def __init__(self, model_name: str, mode: str = "dense"): |
|
|
self.model_name = model_name |
|
|
self.mode = mode |
|
|
self.model = None |
|
|
|
|
|
if mode != "dense": |
|
|
raise ValueError(f"Unsupported mode: {mode}. Only 'dense' mode is supported.") |
|
|
|
|
|
def get_name(self) -> str: |
|
|
|
|
|
return f"{self.model_name.replace('/', '-')}-{self.mode}" |
|
|
|
|
|
def _get_model(self): |
|
|
if self.model is None: |
|
|
try: |
|
|
from FlagEmbedding import BGEM3FlagModel |
|
|
except ImportError: |
|
|
raise ImportError( |
|
|
"FlagEmbedding library is required for BGE-M3. " |
|
|
"Install with: pip install FlagEmbedding" |
|
|
) |
|
|
|
|
|
import os |
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN") |
|
|
|
|
|
self.model = BGEM3FlagModel( |
|
|
self.model_name, |
|
|
use_fp16=True, |
|
|
|
|
|
**({'token': hf_token} if hf_token else {}) |
|
|
) |
|
|
return self.model |
|
|
|
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
model = self._get_model() |
|
|
result = model.encode([text]) |
|
|
return result['dense_vecs'][0].tolist() |
|
|
|
|
|
async def embed_query(self, query: str) -> List[float]: |
|
|
|
|
|
return await self.embed_text(query) |
|
|
|
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
model = self._get_model() |
|
|
result = model.encode(texts) |
|
|
return [vec.tolist() for vec in result['dense_vecs']] |
|
|
|
|
|
async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]: |
|
|
"""Efficiently embed multiple queries using BGE-M3's batch capability.""" |
|
|
all_embeddings = [] |
|
|
|
|
|
|
|
|
for i in range(0, len(queries), batch_size): |
|
|
batch = queries[i:i + batch_size] |
|
|
print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)") |
|
|
|
|
|
try: |
|
|
batch_embeddings = await self.embed_batch(batch) |
|
|
all_embeddings.extend(batch_embeddings) |
|
|
except Exception as e: |
|
|
print(f"Error in batch embedding: {e}") |
|
|
|
|
|
for query in batch: |
|
|
try: |
|
|
embedding = await self.embed_query(query) |
|
|
all_embeddings.append(embedding) |
|
|
except Exception as e2: |
|
|
print(f"Error embedding query '{query}': {e2}") |
|
|
|
|
|
dim = 1024 |
|
|
all_embeddings.append([0.0] * dim) |
|
|
|
|
|
return all_embeddings |
|
|
|
|
|
|
|
|
class BGEM3HybridProvider(EmbeddingProvider): |
|
|
"""BGE-M3 hybrid provider that generates both dense and sparse embeddings for hybrid search.""" |
|
|
|
|
|
def __init__(self, model_name: str): |
|
|
self.model_name = model_name |
|
|
self.model = None |
|
|
|
|
|
def get_name(self) -> str: |
|
|
return f"{self.model_name.replace('/', '-')}-hybrid" |
|
|
|
|
|
def _get_model(self): |
|
|
if self.model is None: |
|
|
try: |
|
|
from FlagEmbedding import BGEM3FlagModel |
|
|
except ImportError: |
|
|
raise ImportError( |
|
|
"FlagEmbedding library is required for BGE-M3. " |
|
|
"Install with: pip install FlagEmbedding" |
|
|
) |
|
|
|
|
|
import os |
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN") |
|
|
|
|
|
self.model = BGEM3FlagModel( |
|
|
self.model_name, |
|
|
use_fp16=True, |
|
|
|
|
|
**({'token': hf_token} if hf_token else {}) |
|
|
) |
|
|
return self.model |
|
|
|
|
|
async def embed_text(self, text: str) -> List[float]: |
|
|
"""For hybrid provider, return dense embedding by default for compatibility.""" |
|
|
model = self._get_model() |
|
|
result = model.encode([text], return_dense=True, return_sparse=True, return_colbert_vecs=False) |
|
|
return result['dense_vecs'][0].tolist() |
|
|
|
|
|
async def embed_query(self, query: str) -> List[float]: |
|
|
return await self.embed_text(query) |
|
|
|
|
|
async def embed_batch(self, texts: List[str]) -> List[List[float]]: |
|
|
"""For hybrid provider, return dense embeddings by default for compatibility.""" |
|
|
model = self._get_model() |
|
|
result = model.encode(texts, return_dense=True, return_sparse=True, return_colbert_vecs=False) |
|
|
return [vec.tolist() for vec in result['dense_vecs']] |
|
|
|
|
|
async def embed_hybrid_text(self, text: str) -> Dict[str, Any]: |
|
|
"""Generate both dense and sparse embeddings for hybrid search.""" |
|
|
model = self._get_model() |
|
|
result = model.encode([text], return_dense=True, return_sparse=True, return_colbert_vecs=False) |
|
|
|
|
|
|
|
|
sparse_dict = result['lexical_weights'][0] |
|
|
max_vocab_size = 30522 |
|
|
dense_sparse = [0.0] * max_vocab_size |
|
|
for token_id, weight in sparse_dict.items(): |
|
|
try: |
|
|
token_id_int = int(token_id) |
|
|
if token_id_int < max_vocab_size: |
|
|
dense_sparse[token_id_int] = float(weight) |
|
|
except (ValueError, TypeError): |
|
|
continue |
|
|
|
|
|
return { |
|
|
'dense': result['dense_vecs'][0].tolist(), |
|
|
'sparse': dense_sparse, |
|
|
'sparse_dict': sparse_dict |
|
|
} |
|
|
|
|
|
async def embed_hybrid_batch(self, texts: List[str]) -> List[Dict[str, Any]]: |
|
|
"""Generate both dense and sparse embeddings for multiple texts.""" |
|
|
model = self._get_model() |
|
|
result = model.encode(texts, return_dense=True, return_sparse=True, return_colbert_vecs=False) |
|
|
|
|
|
embeddings = [] |
|
|
max_vocab_size = 30522 |
|
|
|
|
|
for i, sparse_dict in enumerate(result['lexical_weights']): |
|
|
dense_sparse = [0.0] * max_vocab_size |
|
|
for token_id, weight in sparse_dict.items(): |
|
|
try: |
|
|
token_id_int = int(token_id) |
|
|
if token_id_int < max_vocab_size: |
|
|
dense_sparse[token_id_int] = float(weight) |
|
|
except (ValueError, TypeError): |
|
|
continue |
|
|
|
|
|
embeddings.append({ |
|
|
'dense': result['dense_vecs'][i].tolist(), |
|
|
'sparse': dense_sparse, |
|
|
'sparse_dict': sparse_dict |
|
|
}) |
|
|
|
|
|
return embeddings |
|
|
|
|
|
async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]: |
|
|
"""For compatibility, return dense embeddings.""" |
|
|
return await self.embed_batch(queries) |
|
|
|
|
|
|
|
|
def _create_bgem3_provider(model_name: str) -> BGEM3Provider: |
|
|
"""Create a BGE-M3 provider with the appropriate mode based on model name suffix.""" |
|
|
if model_name.endswith(":dense"): |
|
|
base_model = model_name.replace(":dense", "") |
|
|
return BGEM3Provider(base_model, "dense") |
|
|
elif model_name.endswith(":hybrid"): |
|
|
base_model = model_name.replace(":hybrid", "") |
|
|
return BGEM3HybridProvider(base_model) |
|
|
else: |
|
|
|
|
|
return BGEM3Provider(model_name, "dense") |
|
|
|
|
|
|
|
|
def get_available_models() -> Dict[str, Dict[str, Any]]: |
|
|
"""Return a dictionary of available models organized by provider.""" |
|
|
return { |
|
|
"OpenAI": { |
|
|
"models": [ |
|
|
"text-embedding-3-small", |
|
|
"text-embedding-3-large", |
|
|
"text-embedding-ada-002", |
|
|
], |
|
|
"provider_class": OpenAIProvider, |
|
|
}, |
|
|
"Google Gemini": { |
|
|
"models": ["text-embedding-004"], |
|
|
"provider_class": GeminiProvider, |
|
|
}, |
|
|
"Voyage AI": {"models": ["voyage-3"], "provider_class": VoyageProvider}, |
|
|
"HuggingFace": { |
|
|
"models": [ |
|
|
"answerdotai/ModernBERT-base", |
|
|
"answerdotai/ModernBERT-large", |
|
|
"BAAI/bge-large-en", |
|
|
"BAAI/bge-base-en", |
|
|
"BAAI/bge-small-en", |
|
|
"ibm-granite/granite-embedding-30m-english", |
|
|
"ibm-granite/granite-embedding-125m-english", |
|
|
"ibm-granite/granite-embedding-107m-multilingual", |
|
|
"ibm-granite/granite-embedding-278m-multilingual", |
|
|
"infly/inf-retriever-v1", |
|
|
"intfloat/e5-large-v2", |
|
|
"intfloat/e5-base-v2", |
|
|
"intfloat/e5-small-v2", |
|
|
"jinaai/jina-embeddings-v4", |
|
|
"nomic-ai/nomic-embed-text-v1.5", |
|
|
"nvidia/NV-Embed-V2", |
|
|
"Qwen/Qwen3-Embedding-0.6B", |
|
|
"Qwen/Qwen3-Embedding-4B", |
|
|
"Qwen/Qwen3-Embedding-8B", |
|
|
"sentence-transformers/all-MiniLM-L6-v2", |
|
|
"Salesforce/SFR-Embedding-Mistral", |
|
|
"Snowflake/snowflake-arctic-embed-l-v2.0", |
|
|
"thenlper/gte-large", |
|
|
"thenlper/gte-base", |
|
|
"thenlper/gte-small", |
|
|
], |
|
|
"provider_class": HuggingFaceProvider, |
|
|
}, |
|
|
"Google EmbeddingGemma": { |
|
|
"models": ["google/embeddinggemma-300m"], |
|
|
"provider_class": EmbeddingGemmaProvider, |
|
|
}, |
|
|
"BGE-M3": { |
|
|
"models": ["BAAI/bge-m3", "BAAI/bge-m3:dense", "BAAI/bge-m3:hybrid"], |
|
|
"provider_class": lambda model_name: _create_bgem3_provider(model_name), |
|
|
}, |
|
|
"Jina AI": {"models": ["jina-embeddings-v3"], "provider_class": JinaProvider}, |
|
|
} |
|
|
|
|
|
|
|
|
def get_available_translations() -> List[str]: |
|
|
"""Get available translations by listing JSON files in the text directory.""" |
|
|
text_dir = Path("text") |
|
|
if not text_dir.exists(): |
|
|
return [] |
|
|
|
|
|
translations = [] |
|
|
for file in text_dir.glob("*.json"): |
|
|
translation_name = file.stem |
|
|
translations.append(translation_name) |
|
|
|
|
|
return sorted(translations) |
|
|
|
|
|
|
|
|
def select_translation() -> str: |
|
|
"""Interactive translation selection.""" |
|
|
translations = get_available_translations() |
|
|
|
|
|
if not translations: |
|
|
print("No translations found in the text directory!") |
|
|
exit(1) |
|
|
|
|
|
if len(translations) == 1: |
|
|
print(f"Using translation: {translations[0]}") |
|
|
return translations[0] |
|
|
|
|
|
print("Available translations:") |
|
|
for i, translation in enumerate(translations, 1): |
|
|
print(f" {i}. {translation}") |
|
|
|
|
|
while True: |
|
|
try: |
|
|
choice = input(f"\nSelect translation (1-{len(translations)}): ").strip() |
|
|
idx = int(choice) - 1 |
|
|
if 0 <= idx < len(translations): |
|
|
return translations[idx] |
|
|
else: |
|
|
print(f"Please enter a number between 1 and {len(translations)}") |
|
|
except (ValueError, KeyboardInterrupt): |
|
|
print("\nExiting...") |
|
|
exit(0) |
|
|
|
|
|
|
|
|
def select_model() -> EmbeddingProvider: |
|
|
"""Interactive model selection.""" |
|
|
models = get_available_models() |
|
|
|
|
|
print("Available embedding models:") |
|
|
print() |
|
|
|
|
|
all_choices = [] |
|
|
choice_num = 1 |
|
|
|
|
|
for provider_name, provider_info in models.items(): |
|
|
print(f"{provider_name}:") |
|
|
for model in provider_info["models"]: |
|
|
print(f" {choice_num}. {model}") |
|
|
all_choices.append((provider_name, model, provider_info["provider_class"])) |
|
|
choice_num += 1 |
|
|
print() |
|
|
|
|
|
while True: |
|
|
try: |
|
|
choice = input(f"Select model (1-{len(all_choices)}): ").strip() |
|
|
idx = int(choice) - 1 |
|
|
if 0 <= idx < len(all_choices): |
|
|
provider_name, model_name, provider_class = all_choices[idx] |
|
|
print(f"Selected: {model_name} ({provider_name})") |
|
|
return provider_class(model_name) |
|
|
else: |
|
|
print(f"Please enter a number between 1 and {len(all_choices)}") |
|
|
except (ValueError, KeyboardInterrupt): |
|
|
print("\nExiting...") |
|
|
exit(0) |
|
|
|
|
|
|
|
|
def load_translation_text(translation: str) -> List[Dict[str, Any]]: |
|
|
"""Load the text for a specific translation.""" |
|
|
text_file = Path("text") / f"{translation}.json" |
|
|
|
|
|
if not text_file.exists(): |
|
|
raise FileNotFoundError(f"Translation file not found: {text_file}") |
|
|
|
|
|
with open(text_file, "r", encoding="utf-8") as f: |
|
|
return json.load(f) |
|
|
|
|
|
|
|
|
def get_model_provider(provider: EmbeddingProvider) -> str: |
|
|
"""Get the provider name for a given embedding provider.""" |
|
|
if isinstance(provider, OpenAIProvider): |
|
|
return "openai" |
|
|
elif isinstance(provider, GeminiProvider): |
|
|
return "google-gemini" |
|
|
elif isinstance(provider, VoyageProvider): |
|
|
return "voyage" |
|
|
elif isinstance(provider, HuggingFaceProvider): |
|
|
return "huggingface" |
|
|
elif isinstance(provider, EmbeddingGemmaProvider): |
|
|
return "huggingface" |
|
|
elif isinstance(provider, (BGEM3Provider, BGEM3HybridProvider)): |
|
|
return "huggingface" |
|
|
elif isinstance(provider, JinaProvider): |
|
|
return "huggingface" |
|
|
else: |
|
|
return "unknown" |
|
|
|
|
|
|
|
|
def create_output_directories( |
|
|
verses: List[Dict[str, Any]], provider: EmbeddingProvider |
|
|
) -> None: |
|
|
"""Create the necessary output directory structure.""" |
|
|
provider_name = get_model_provider(provider) |
|
|
model_name = provider.get_name() |
|
|
books = set(verse["book"] for verse in verses) |
|
|
|
|
|
for book in books: |
|
|
book_dir = Path("embeddings") / provider_name / model_name / book |
|
|
book_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
def check_existing_embeddings( |
|
|
translation: str, provider: EmbeddingProvider, verses: List[Dict[str, Any]] |
|
|
) -> List[Dict[str, Any]]: |
|
|
"""Check which verses already have embeddings and return only missing ones.""" |
|
|
provider_name = get_model_provider(provider) |
|
|
model_name = provider.get_name() |
|
|
missing_verses = [] |
|
|
|
|
|
for verse in verses: |
|
|
output_file = ( |
|
|
Path("embeddings") |
|
|
/ provider_name |
|
|
/ model_name |
|
|
/ verse["book"] |
|
|
/ f"{verse['chapter']:03d}.json" |
|
|
) |
|
|
|
|
|
if output_file.exists(): |
|
|
|
|
|
try: |
|
|
with open(output_file, "r", encoding="utf-8") as f: |
|
|
existing_verses = json.load(f) |
|
|
|
|
|
verse_exists = any( |
|
|
v["book"] == verse["book"] |
|
|
and v["chapter"] == verse["chapter"] |
|
|
and v["verse"] == verse["verse"] |
|
|
for v in existing_verses |
|
|
) |
|
|
|
|
|
if not verse_exists: |
|
|
missing_verses.append(verse) |
|
|
except (json.JSONDecodeError, KeyError): |
|
|
missing_verses.append(verse) |
|
|
else: |
|
|
missing_verses.append(verse) |
|
|
|
|
|
return missing_verses |
|
|
|
|
|
|
|
|
async def generate_embeddings( |
|
|
translation: str, |
|
|
provider: EmbeddingProvider, |
|
|
verses: List[Dict[str, Any]], |
|
|
batch_size: int = 100, |
|
|
) -> None: |
|
|
"""Generate embeddings for verses and save them.""" |
|
|
provider_name = get_model_provider(provider) |
|
|
model_name = provider.get_name() |
|
|
|
|
|
print( |
|
|
f"Generating embeddings for {len(verses)} verses using {model_name} ({provider_name})..." |
|
|
) |
|
|
|
|
|
|
|
|
verses_by_chapter = {} |
|
|
for verse in verses: |
|
|
key = (verse["book"], verse["chapter"]) |
|
|
if key not in verses_by_chapter: |
|
|
verses_by_chapter[key] = [] |
|
|
verses_by_chapter[key].append(verse) |
|
|
|
|
|
|
|
|
for (book, chapter), chapter_verses in verses_by_chapter.items(): |
|
|
print(f"Processing {book} {chapter} ({len(chapter_verses)} verses)...") |
|
|
|
|
|
|
|
|
output_file = ( |
|
|
Path("embeddings") |
|
|
/ provider_name |
|
|
/ model_name |
|
|
/ book |
|
|
/ f"{chapter:03d}.json" |
|
|
) |
|
|
existing_embeddings = [] |
|
|
if output_file.exists(): |
|
|
try: |
|
|
with open(output_file, "r", encoding="utf-8") as f: |
|
|
existing_embeddings = json.load(f) |
|
|
except (json.JSONDecodeError, KeyError): |
|
|
existing_embeddings = [] |
|
|
|
|
|
|
|
|
new_embeddings = [] |
|
|
for i in range(0, len(chapter_verses), batch_size): |
|
|
batch = chapter_verses[i : i + batch_size] |
|
|
texts = [verse["text"] for verse in batch] |
|
|
|
|
|
try: |
|
|
embeddings = await provider.embed_batch(texts) |
|
|
|
|
|
for verse, embedding in zip(batch, embeddings): |
|
|
new_embeddings.append( |
|
|
{ |
|
|
"book": verse["book"], |
|
|
"chapter": verse["chapter"], |
|
|
"verse": verse["verse"], |
|
|
"embedding": embedding, |
|
|
} |
|
|
) |
|
|
|
|
|
print( |
|
|
f" Processed batch {i // batch_size + 1}/{(len(chapter_verses) + batch_size - 1) // batch_size}" |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error processing batch: {e}") |
|
|
continue |
|
|
|
|
|
|
|
|
all_embeddings = existing_embeddings + new_embeddings |
|
|
|
|
|
|
|
|
all_embeddings.sort(key=lambda x: x["verse"]) |
|
|
|
|
|
|
|
|
output_file.parent.mkdir(parents=True, exist_ok=True) |
|
|
with open(output_file, "w", encoding="utf-8") as f: |
|
|
json.dump(all_embeddings, f, indent=2) |
|
|
|
|
|
print(f" Saved {len(new_embeddings)} new embeddings to {output_file}") |
|
|
|
|
|
|
|
|
async def main(): |
|
|
"""Main entry point.""" |
|
|
parser = argparse.ArgumentParser(description="Bible verse embeddings tool") |
|
|
subparsers = parser.add_subparsers(dest="command", help="Available commands") |
|
|
|
|
|
|
|
|
embed_parser = subparsers.add_parser( |
|
|
"embed", help="Generate embeddings for Bible verses" |
|
|
) |
|
|
embed_parser.add_argument("--translation", "-t", help="Translation to use") |
|
|
embed_parser.add_argument("--model", "-m", help="Model to use") |
|
|
embed_parser.add_argument( |
|
|
"--batch-size", "-b", type=int, default=100, help="Batch size for processing" |
|
|
) |
|
|
embed_parser.add_argument( |
|
|
"--skip-existing", |
|
|
"-s", |
|
|
action="store_true", |
|
|
help="Skip verses that already have embeddings", |
|
|
) |
|
|
|
|
|
|
|
|
query_parser = subparsers.add_parser( |
|
|
"query", help="Search Bible verses using embeddings" |
|
|
) |
|
|
query_parser.add_argument( |
|
|
"--no-hnsw", |
|
|
action="store_true", |
|
|
help="Disable HNSW optimization and use brute-force search" |
|
|
) |
|
|
|
|
|
|
|
|
batch_parser = subparsers.add_parser( |
|
|
"batch", help="Run batch queries from YAML file and evaluate results" |
|
|
) |
|
|
|
|
|
|
|
|
autobatch_parser = subparsers.add_parser( |
|
|
"autobatch", help="Run batch queries for all models that don't have results yet" |
|
|
) |
|
|
|
|
|
|
|
|
autoembed_parser = subparsers.add_parser( |
|
|
"autoembed", help="Automatically generate embeddings for all models missing embeddings" |
|
|
) |
|
|
|
|
|
|
|
|
report_parser = subparsers.add_parser( |
|
|
"report", help="Generate markdown report from batch results CSV" |
|
|
) |
|
|
report_parser.add_argument( |
|
|
"--results-file", |
|
|
"-r", |
|
|
help="Specific CSV file to read results from (default: reads all files from ./results/ directory)" |
|
|
) |
|
|
report_parser.add_argument( |
|
|
"--output-file", |
|
|
"-o", |
|
|
default="README.md", |
|
|
help="Output markdown file (default: README.md)" |
|
|
) |
|
|
report_parser.add_argument( |
|
|
"--max-queries", |
|
|
"-q", |
|
|
type=int, |
|
|
default=10, |
|
|
help="Maximum number of queries to show in detailed results (default: 10)" |
|
|
) |
|
|
|
|
|
batch_parser.add_argument("--translation", "-t", help="Translation to use") |
|
|
batch_parser.add_argument("--model", "-m", help="Model to use") |
|
|
batch_parser.add_argument( |
|
|
"--queries-file", |
|
|
"-q", |
|
|
default="queries.yaml", |
|
|
help="YAML file containing queries and expected results (default: queries.yaml)" |
|
|
) |
|
|
batch_parser.add_argument( |
|
|
"--concurrency", |
|
|
"-c", |
|
|
type=int, |
|
|
default=5, |
|
|
help="Number of concurrent queries to process (default: 5)" |
|
|
) |
|
|
batch_parser.add_argument( |
|
|
"--no-hnsw", |
|
|
action="store_true", |
|
|
help="Disable HNSW optimization and use brute-force search" |
|
|
) |
|
|
|
|
|
autobatch_parser.add_argument("--translation", "-t", help="Translation to use") |
|
|
autobatch_parser.add_argument( |
|
|
"--queries-file", |
|
|
"-q", |
|
|
default="queries.yaml", |
|
|
help="YAML file containing queries and expected results (default: queries.yaml)" |
|
|
) |
|
|
autobatch_parser.add_argument( |
|
|
"--concurrency", |
|
|
"-c", |
|
|
type=int, |
|
|
default=5, |
|
|
help="Number of concurrent queries to process (default: 5)" |
|
|
) |
|
|
autobatch_parser.add_argument( |
|
|
"--no-hnsw", |
|
|
action="store_true", |
|
|
help="Disable HNSW optimization and use brute-force search" |
|
|
) |
|
|
autobatch_parser.add_argument( |
|
|
"--dry-run", |
|
|
action="store_true", |
|
|
help="Show which models would be processed without actually running them" |
|
|
) |
|
|
|
|
|
|
|
|
autoembed_parser.add_argument("--translation", "-t", help="Translation to use") |
|
|
autoembed_parser.add_argument( |
|
|
"--batch-size", "-b", type=int, default=100, help="Batch size for processing" |
|
|
) |
|
|
autoembed_parser.add_argument( |
|
|
"--skip-existing", |
|
|
"-s", |
|
|
action="store_true", |
|
|
help="Skip verses that already have embeddings", |
|
|
) |
|
|
autoembed_parser.add_argument( |
|
|
"--dry-run", |
|
|
action="store_true", |
|
|
help="Show which models would be processed without actually generating embeddings" |
|
|
) |
|
|
autoembed_parser.add_argument( |
|
|
"--include-providers", |
|
|
nargs="+", |
|
|
help="Only process models from these providers (e.g., 'OpenAI' 'HuggingFace')" |
|
|
) |
|
|
autoembed_parser.add_argument( |
|
|
"--exclude-providers", |
|
|
nargs="+", |
|
|
help="Exclude models from these providers (e.g., 'OpenAI' 'HuggingFace')" |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
if not args.command: |
|
|
parser.print_help() |
|
|
return |
|
|
|
|
|
if args.command == "query": |
|
|
await query_mode(args) |
|
|
return |
|
|
|
|
|
if args.command == "batch": |
|
|
await batch_mode(args) |
|
|
return |
|
|
|
|
|
if args.command == "autobatch": |
|
|
await autobatch_mode(args) |
|
|
return |
|
|
|
|
|
if args.command == "autoembed": |
|
|
await autoembed_mode(args) |
|
|
return |
|
|
|
|
|
if args.command == "report": |
|
|
generate_report(args) |
|
|
return |
|
|
|
|
|
if args.command == "embed": |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
if args.translation: |
|
|
translation = args.translation |
|
|
if translation not in get_available_translations(): |
|
|
print(f"Translation '{translation}' not found!") |
|
|
return |
|
|
else: |
|
|
translation = select_translation() |
|
|
|
|
|
|
|
|
if args.model: |
|
|
|
|
|
provider = None |
|
|
models = get_available_models() |
|
|
for provider_info in models.values(): |
|
|
if args.model in provider_info["models"]: |
|
|
provider = provider_info["provider_class"](args.model) |
|
|
break |
|
|
|
|
|
if provider is None: |
|
|
print(f"Model '{args.model}' not found!") |
|
|
return |
|
|
else: |
|
|
provider = select_model() |
|
|
|
|
|
|
|
|
print(f"\nLoading verses for translation: {translation}") |
|
|
verses = load_translation_text(translation) |
|
|
print(f"Loaded {len(verses)} verses") |
|
|
|
|
|
|
|
|
create_output_directories(verses, provider) |
|
|
|
|
|
|
|
|
if args.skip_existing: |
|
|
original_count = len(verses) |
|
|
verses = check_existing_embeddings(translation, provider, verses) |
|
|
print( |
|
|
f"Skipping {original_count - len(verses)} verses with existing embeddings" |
|
|
) |
|
|
|
|
|
if not verses: |
|
|
print("All verses already have embeddings!") |
|
|
return |
|
|
|
|
|
|
|
|
await generate_embeddings(translation, provider, verses, args.batch_size) |
|
|
|
|
|
print(f"\nCompleted! Generated embeddings for {len(verses)} verses.") |
|
|
|
|
|
|
|
|
def load_embeddings_for_model( |
|
|
provider: EmbeddingProvider, translation: str |
|
|
) -> List[Dict[str, Any]]: |
|
|
"""Load all embeddings for a specific model and translation.""" |
|
|
provider_name = get_model_provider(provider) |
|
|
model_name = provider.get_name() |
|
|
|
|
|
embeddings_dir = Path("embeddings") / provider_name / model_name |
|
|
all_embeddings = [] |
|
|
|
|
|
if not embeddings_dir.exists(): |
|
|
print(f"No embeddings found for {model_name} ({provider_name})") |
|
|
return [] |
|
|
|
|
|
print(f"Loading embeddings for {model_name} ({provider_name})...") |
|
|
|
|
|
|
|
|
for book_dir in embeddings_dir.iterdir(): |
|
|
if not book_dir.is_dir(): |
|
|
continue |
|
|
|
|
|
book_name = book_dir.name |
|
|
|
|
|
|
|
|
for chapter_file in book_dir.glob("*.json"): |
|
|
try: |
|
|
with open(chapter_file, "r", encoding="utf-8") as f: |
|
|
chapter_embeddings = json.load(f) |
|
|
|
|
|
for embedding_data in chapter_embeddings: |
|
|
|
|
|
embedding_data["translation"] = translation |
|
|
embedding_data["model"] = model_name |
|
|
embedding_data["provider"] = provider_name |
|
|
all_embeddings.append(embedding_data) |
|
|
|
|
|
except (json.JSONDecodeError, KeyError) as e: |
|
|
print(f"Error loading {chapter_file}: {e}") |
|
|
continue |
|
|
|
|
|
print(f"Loaded {len(all_embeddings)} verse embeddings") |
|
|
return all_embeddings |
|
|
|
|
|
|
|
|
def cosine_similarity(a: List[float], b: List[float]) -> float: |
|
|
"""Calculate cosine similarity between two vectors.""" |
|
|
import numpy as np |
|
|
a_np = np.array(a) |
|
|
b_np = np.array(b) |
|
|
|
|
|
|
|
|
dot_product = np.dot(a_np, b_np) |
|
|
norm_a = np.linalg.norm(a_np) |
|
|
norm_b = np.linalg.norm(b_np) |
|
|
|
|
|
if norm_a == 0 or norm_b == 0: |
|
|
return 0.0 |
|
|
|
|
|
return dot_product / (norm_a * norm_b) |
|
|
|
|
|
|
|
|
def sparse_cosine_similarity(sparse_dict_a: Dict[str, float], sparse_dict_b: Dict[str, float]) -> float: |
|
|
"""Calculate cosine similarity between two sparse dictionaries.""" |
|
|
import math |
|
|
|
|
|
|
|
|
dict_a = {int(k): float(v) for k, v in sparse_dict_a.items() if float(v) > 0} |
|
|
dict_b = {int(k): float(v) for k, v in sparse_dict_b.items() if float(v) > 0} |
|
|
|
|
|
|
|
|
dot_product = 0.0 |
|
|
for term_id in dict_a: |
|
|
if term_id in dict_b: |
|
|
dot_product += dict_a[term_id] * dict_b[term_id] |
|
|
|
|
|
|
|
|
magnitude_a = math.sqrt(sum(v * v for v in dict_a.values())) |
|
|
magnitude_b = math.sqrt(sum(v * v for v in dict_b.values())) |
|
|
|
|
|
if magnitude_a == 0.0 or magnitude_b == 0.0: |
|
|
return 0.0 |
|
|
|
|
|
return dot_product / (magnitude_a * magnitude_b) |
|
|
|
|
|
|
|
|
def reciprocal_rank_fusion(dense_results: List[Tuple[Dict[str, Any], float]], |
|
|
sparse_results: List[Tuple[Dict[str, Any], float]], |
|
|
dense_weight: float = 1.0, |
|
|
sparse_weight: float = 0.7, |
|
|
k: int = 60) -> List[Tuple[Dict[str, Any], float]]: |
|
|
""" |
|
|
Combine dense and sparse search results using Reciprocal Rank Fusion (RRF). |
|
|
|
|
|
RRF formula: RRF(d) = Σ(1 / (k + rank(d))) |
|
|
where k is typically 60, rank(d) is the rank of document d in each ranking |
|
|
""" |
|
|
|
|
|
verse_scores = {} |
|
|
|
|
|
|
|
|
for rank, (verse_data, score) in enumerate(dense_results, 1): |
|
|
verse_id = f"{verse_data['book']}_{verse_data['chapter']}_{verse_data['verse']}" |
|
|
if verse_id not in verse_scores: |
|
|
verse_scores[verse_id] = { |
|
|
'verse_data': verse_data, |
|
|
'dense_score': score, |
|
|
'sparse_score': 0.0, |
|
|
'dense_rank': rank, |
|
|
'sparse_rank': float('inf'), |
|
|
'rrf_score': 0.0 |
|
|
} |
|
|
else: |
|
|
verse_scores[verse_id]['dense_score'] = score |
|
|
verse_scores[verse_id]['dense_rank'] = rank |
|
|
|
|
|
|
|
|
for rank, (verse_data, score) in enumerate(sparse_results, 1): |
|
|
verse_id = f"{verse_data['book']}_{verse_data['chapter']}_{verse_data['verse']}" |
|
|
if verse_id not in verse_scores: |
|
|
verse_scores[verse_id] = { |
|
|
'verse_data': verse_data, |
|
|
'dense_score': 0.0, |
|
|
'sparse_score': score, |
|
|
'dense_rank': float('inf'), |
|
|
'sparse_rank': rank, |
|
|
'rrf_score': 0.0 |
|
|
} |
|
|
else: |
|
|
verse_scores[verse_id]['sparse_score'] = score |
|
|
verse_scores[verse_id]['sparse_rank'] = rank |
|
|
|
|
|
|
|
|
for verse_id, data in verse_scores.items(): |
|
|
dense_rrf = (dense_weight / (k + data['dense_rank'])) if data['dense_rank'] != float('inf') else 0.0 |
|
|
sparse_rrf = (sparse_weight / (k + data['sparse_rank'])) if data['sparse_rank'] != float('inf') else 0.0 |
|
|
data['rrf_score'] = dense_rrf + sparse_rrf |
|
|
|
|
|
|
|
|
sorted_results = sorted(verse_scores.values(), key=lambda x: x['rrf_score'], reverse=True) |
|
|
|
|
|
|
|
|
return [(result['verse_data'], result['rrf_score']) for result in sorted_results] |
|
|
|
|
|
|
|
|
async def hybrid_search_embeddings( |
|
|
query: str, |
|
|
dense_provider: EmbeddingProvider, |
|
|
sparse_provider: EmbeddingProvider, |
|
|
translation: str, |
|
|
top_k: int = 10, |
|
|
dense_weight: float = 1.0, |
|
|
sparse_weight: float = 0.7, |
|
|
pre_loaded_dense_embeddings: List[Dict[str, Any]] = None, |
|
|
pre_loaded_sparse_embeddings: List[Dict[str, Any]] = None, |
|
|
use_hnsw: bool = True |
|
|
) -> List[Tuple[Dict[str, Any], float]]: |
|
|
""" |
|
|
Perform hybrid search combining dense and sparse embeddings using RRF. |
|
|
""" |
|
|
print(f"Performing hybrid search for: '{query}'") |
|
|
|
|
|
|
|
|
print("Performing dense search...") |
|
|
dense_results = await search_embeddings( |
|
|
query, dense_provider, translation, top_k * 2, |
|
|
pre_loaded_dense_embeddings, use_hnsw |
|
|
) |
|
|
|
|
|
|
|
|
print("Performing sparse search...") |
|
|
sparse_results = await search_embeddings( |
|
|
query, sparse_provider, translation, top_k * 2, |
|
|
pre_loaded_sparse_embeddings, use_hnsw |
|
|
) |
|
|
|
|
|
print(f"Dense search returned {len(dense_results)} results") |
|
|
print(f"Sparse search returned {len(sparse_results)} results") |
|
|
|
|
|
|
|
|
print("Combining results with Reciprocal Rank Fusion...") |
|
|
hybrid_results = reciprocal_rank_fusion( |
|
|
dense_results, sparse_results, dense_weight, sparse_weight |
|
|
) |
|
|
|
|
|
return hybrid_results[:top_k] |
|
|
|
|
|
|
|
|
async def bgem3_hybrid_search( |
|
|
query: str, |
|
|
hybrid_provider: BGEM3HybridProvider, |
|
|
translation: str, |
|
|
top_k: int = 10, |
|
|
dense_weight: float = 1.0, |
|
|
sparse_weight: float = 0.7 |
|
|
) -> List[Tuple[Dict[str, Any], float]]: |
|
|
""" |
|
|
Perform hybrid search using a single BGE-M3 hybrid provider. |
|
|
This is more efficient as it uses the same embeddings for both searches. |
|
|
""" |
|
|
print(f"Performing BGE-M3 hybrid search for: '{query}'") |
|
|
|
|
|
|
|
|
all_embeddings = load_embeddings_for_model(hybrid_provider, translation) |
|
|
if not all_embeddings: |
|
|
return [] |
|
|
|
|
|
|
|
|
query_hybrid = await hybrid_provider.embed_hybrid_text(query) |
|
|
query_dense = query_hybrid['dense'] |
|
|
query_sparse_dict = query_hybrid['sparse_dict'] |
|
|
|
|
|
|
|
|
print("Performing dense similarity search...") |
|
|
dense_results = [] |
|
|
for embedding_data in all_embeddings: |
|
|
similarity = cosine_similarity(query_dense, embedding_data["embedding"]) |
|
|
dense_results.append((embedding_data, similarity)) |
|
|
|
|
|
|
|
|
dense_results.sort(key=lambda x: x[1], reverse=True) |
|
|
dense_results = dense_results[:top_k * 2] |
|
|
|
|
|
|
|
|
|
|
|
print("Hybrid search complete!") |
|
|
|
|
|
return dense_results[:top_k] |
|
|
|
|
|
|
|
|
class HNSWIndex: |
|
|
"""FAISS-based index for fast approximate nearest neighbor search.""" |
|
|
|
|
|
def __init__(self, dimension: int, max_elements: int = 100000): |
|
|
import faiss |
|
|
import os |
|
|
|
|
|
|
|
|
os.environ["OMP_NUM_THREADS"] = "1" |
|
|
faiss.omp_set_num_threads(1) |
|
|
|
|
|
self.dimension = dimension |
|
|
self.max_elements = max_elements |
|
|
|
|
|
|
|
|
self.index = faiss.IndexHNSWFlat(dimension, 16) |
|
|
|
|
|
self.index.hnsw.efConstruction = 40 |
|
|
self.embeddings_data = [] |
|
|
self.built = False |
|
|
|
|
|
def add_embeddings(self, embeddings_data: List[Dict[str, Any]]): |
|
|
"""Add embeddings to the index.""" |
|
|
import numpy as np |
|
|
import faiss |
|
|
import gc |
|
|
|
|
|
try: |
|
|
|
|
|
print(f"Converting {len(embeddings_data)} embeddings to numpy array...") |
|
|
embeddings_array = np.array([data["embedding"] for data in embeddings_data], dtype=np.float32) |
|
|
|
|
|
|
|
|
|
|
|
print("Normalizing embeddings...") |
|
|
faiss.normalize_L2(embeddings_array) |
|
|
|
|
|
|
|
|
print("Adding embeddings to FAISS index...") |
|
|
self.index.add(embeddings_array) |
|
|
|
|
|
|
|
|
del embeddings_array |
|
|
gc.collect() |
|
|
|
|
|
self.embeddings_data = embeddings_data |
|
|
self.built = True |
|
|
|
|
|
|
|
|
|
|
|
self.index.hnsw.efSearch = max(32, min(100, len(embeddings_data) // 20)) |
|
|
|
|
|
print(f"HNSW index built successfully with efSearch={self.index.hnsw.efSearch}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error in add_embeddings: {e}") |
|
|
raise |
|
|
|
|
|
def search(self, query_embedding: List[float], k: int = 10) -> List[Tuple[Dict[str, Any], float]]: |
|
|
"""Search for k nearest neighbors and return with exact cosine similarities.""" |
|
|
if not self.built: |
|
|
return [] |
|
|
|
|
|
import numpy as np |
|
|
import faiss |
|
|
query_array = np.array([query_embedding]).astype('float32').reshape(1, -1) |
|
|
|
|
|
|
|
|
faiss.normalize_L2(query_array) |
|
|
|
|
|
|
|
|
search_k = min(k * 3, len(self.embeddings_data)) |
|
|
distances, indices = self.index.search(query_array, search_k) |
|
|
|
|
|
|
|
|
results = [] |
|
|
for idx in indices[0]: |
|
|
if idx < len(self.embeddings_data) and idx >= 0: |
|
|
embedding_data = self.embeddings_data[idx] |
|
|
exact_similarity = cosine_similarity(query_embedding, embedding_data["embedding"]) |
|
|
results.append((embedding_data, exact_similarity)) |
|
|
|
|
|
|
|
|
results.sort(key=lambda x: x[1], reverse=True) |
|
|
return results[:k] |
|
|
|
|
|
|
|
|
|
|
|
_hnsw_cache = {} |
|
|
|
|
|
def get_hnsw_index(provider: EmbeddingProvider, translation: str, all_embeddings: List[Dict[str, Any]]) -> HNSWIndex: |
|
|
"""Get or create FAISS HNSW index for given provider and translation.""" |
|
|
cache_key = f"{provider.model_name}_{translation}" |
|
|
|
|
|
if cache_key in _hnsw_cache: |
|
|
return _hnsw_cache[cache_key] |
|
|
|
|
|
if not all_embeddings: |
|
|
return None |
|
|
|
|
|
|
|
|
if len(all_embeddings) > 50000: |
|
|
print(f"Warning: Building HNSW index for {len(all_embeddings)} embeddings may require significant memory and time") |
|
|
print("Consider using --no-hnsw if you encounter memory issues") |
|
|
|
|
|
|
|
|
dimension = len(all_embeddings[0]["embedding"]) |
|
|
|
|
|
try: |
|
|
|
|
|
print(f"Building FAISS HNSW index for {len(all_embeddings)} embeddings (dimension: {dimension})...") |
|
|
hnsw_index = HNSWIndex(dimension, max_elements=len(all_embeddings) * 2) |
|
|
hnsw_index.add_embeddings(all_embeddings) |
|
|
|
|
|
|
|
|
_hnsw_cache[cache_key] = hnsw_index |
|
|
print(f"FAISS HNSW index built and cached for {cache_key}") |
|
|
|
|
|
return hnsw_index |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error building HNSW index: {e}") |
|
|
print("Falling back to brute-force search") |
|
|
return None |
|
|
|
|
|
|
|
|
async def search_embeddings( |
|
|
query: str, provider: EmbeddingProvider, translation: str, top_k: int = 10, |
|
|
pre_loaded_embeddings: List[Dict[str, Any]] = None, use_hnsw: bool = True |
|
|
) -> List[Tuple[Dict[str, Any], float]]: |
|
|
"""Search for similar verses using embeddings with optional HNSW optimization.""" |
|
|
|
|
|
if pre_loaded_embeddings is not None: |
|
|
all_embeddings = pre_loaded_embeddings |
|
|
else: |
|
|
all_embeddings = load_embeddings_for_model(provider, translation) |
|
|
|
|
|
if not all_embeddings: |
|
|
return [] |
|
|
|
|
|
|
|
|
print(f"Generating embedding for query: '{query}'") |
|
|
query_embedding = await provider.embed_query(query) |
|
|
|
|
|
|
|
|
if use_hnsw: |
|
|
hnsw_index = get_hnsw_index(provider, translation, all_embeddings) |
|
|
if hnsw_index: |
|
|
return hnsw_index.search(query_embedding, top_k) |
|
|
|
|
|
|
|
|
print(f"Using brute-force search for {len(all_embeddings)} embeddings") |
|
|
results = [] |
|
|
for embedding_data in all_embeddings: |
|
|
similarity = cosine_similarity(query_embedding, embedding_data["embedding"]) |
|
|
results.append((embedding_data, similarity)) |
|
|
|
|
|
|
|
|
results.sort(key=lambda x: x[1], reverse=True) |
|
|
|
|
|
return results[:top_k] |
|
|
|
|
|
|
|
|
def display_search_results( |
|
|
results: List[Tuple[Dict[str, Any], float]], query: str |
|
|
) -> None: |
|
|
"""Display search results in a formatted way.""" |
|
|
print(f"\nTop {len(results)} results for query: '{query}'") |
|
|
print("=" * 80) |
|
|
|
|
|
for i, (verse_data, similarity) in enumerate(results, 1): |
|
|
book = verse_data["book"] |
|
|
chapter = verse_data["chapter"] |
|
|
verse = verse_data["verse"] |
|
|
|
|
|
print(f"{i:2d}. {book} {chapter}:{verse} (similarity: {similarity:.4f})") |
|
|
|
|
|
|
|
|
try: |
|
|
translation = verse_data["translation"] |
|
|
text_file = Path("text") / f"{translation}.json" |
|
|
|
|
|
if text_file.exists(): |
|
|
with open(text_file, "r", encoding="utf-8") as f: |
|
|
verses = json.load(f) |
|
|
|
|
|
|
|
|
verse_text = None |
|
|
for v in verses: |
|
|
if ( |
|
|
v["book"] == book |
|
|
and v["chapter"] == chapter |
|
|
and v["verse"] == verse |
|
|
): |
|
|
verse_text = v["text"] |
|
|
break |
|
|
|
|
|
if verse_text: |
|
|
|
|
|
wrapped_text = "\n ".join( |
|
|
[verse_text[i : i + 70] for i in range(0, len(verse_text), 70)] |
|
|
) |
|
|
print(f" {wrapped_text}") |
|
|
else: |
|
|
print(f" [Text not found]") |
|
|
else: |
|
|
print(f" [Translation file not found: {translation}]") |
|
|
|
|
|
except Exception as e: |
|
|
print(f" [Error loading text: {e}]") |
|
|
|
|
|
print() |
|
|
|
|
|
|
|
|
def select_translation_for_query() -> str: |
|
|
"""Interactive translation selection for querying.""" |
|
|
translations = get_available_translations() |
|
|
|
|
|
if not translations: |
|
|
print("No translations found in the text directory!") |
|
|
exit(1) |
|
|
|
|
|
if len(translations) == 1: |
|
|
print(f"Using translation: {translations[0]}") |
|
|
return translations[0] |
|
|
|
|
|
print("Available translations:") |
|
|
for i, translation in enumerate(translations, 1): |
|
|
print(f" {i}. {translation}") |
|
|
|
|
|
while True: |
|
|
try: |
|
|
choice = input( |
|
|
f"\nSelect translation for query (1-{len(translations)}): " |
|
|
).strip() |
|
|
idx = int(choice) - 1 |
|
|
if 0 <= idx < len(translations): |
|
|
return translations[idx] |
|
|
else: |
|
|
print(f"Please enter a number between 1 and {len(translations)}") |
|
|
except (ValueError, KeyboardInterrupt): |
|
|
print("\nExiting...") |
|
|
exit(0) |
|
|
|
|
|
|
|
|
def select_model_for_query() -> EmbeddingProvider: |
|
|
"""Interactive model selection for querying.""" |
|
|
models = get_available_models() |
|
|
|
|
|
print("Available embedding models:") |
|
|
print() |
|
|
|
|
|
all_choices = [] |
|
|
choice_num = 1 |
|
|
|
|
|
for provider_name, provider_info in models.items(): |
|
|
print(f"{provider_name}:") |
|
|
for model in provider_info["models"]: |
|
|
print(f" {choice_num}. {model}") |
|
|
all_choices.append((provider_name, model, provider_info["provider_class"])) |
|
|
choice_num += 1 |
|
|
print() |
|
|
|
|
|
while True: |
|
|
try: |
|
|
choice = input(f"Select model for query (1-{len(all_choices)}): ").strip() |
|
|
idx = int(choice) - 1 |
|
|
if 0 <= idx < len(all_choices): |
|
|
provider_name, model_name, provider_class = all_choices[idx] |
|
|
print(f"Selected: {model_name} ({provider_name})") |
|
|
return provider_class(model_name) |
|
|
else: |
|
|
print(f"Please enter a number between 1 and {len(all_choices)}") |
|
|
except (ValueError, KeyboardInterrupt): |
|
|
print("\nExiting...") |
|
|
exit(0) |
|
|
|
|
|
|
|
|
async def query_mode(args): |
|
|
"""Interactive query mode.""" |
|
|
use_hnsw = not args.no_hnsw |
|
|
print("Bible Verse Search Mode") |
|
|
print("=" * 30) |
|
|
print() |
|
|
|
|
|
|
|
|
translation = select_translation_for_query() |
|
|
print() |
|
|
|
|
|
|
|
|
provider = select_model_for_query() |
|
|
print() |
|
|
|
|
|
|
|
|
while True: |
|
|
try: |
|
|
query = input("\nEnter your search query (or 'quit' to exit): ").strip() |
|
|
|
|
|
if query.lower() in ["quit", "exit", "q"]: |
|
|
print("Goodbye!") |
|
|
break |
|
|
|
|
|
if not query: |
|
|
print("Please enter a query.") |
|
|
continue |
|
|
|
|
|
|
|
|
try: |
|
|
top_k_input = input("Number of results to show (default 10): ").strip() |
|
|
top_k = int(top_k_input) if top_k_input else 10 |
|
|
top_k = max(1, min(top_k, 50)) |
|
|
except ValueError: |
|
|
top_k = 10 |
|
|
|
|
|
|
|
|
results = await search_embeddings(query, provider, translation, top_k, use_hnsw=use_hnsw) |
|
|
|
|
|
if results: |
|
|
display_search_results(results, query) |
|
|
else: |
|
|
print( |
|
|
"No results found. Make sure embeddings exist for this model and translation." |
|
|
) |
|
|
|
|
|
except KeyboardInterrupt: |
|
|
print("\nGoodbye!") |
|
|
break |
|
|
except Exception as e: |
|
|
print(f"Error during search: {e}") |
|
|
print("Please try again.") |
|
|
|
|
|
|
|
|
def load_queries(queries_file: Path) -> List[Dict[str, Any]]: |
|
|
"""Load queries from YAML file.""" |
|
|
if not queries_file.exists(): |
|
|
print(f"Queries file not found: {queries_file}") |
|
|
print("Expected format:") |
|
|
print("""# Simple format - list of query/expected pairs |
|
|
- query: "love your enemies" |
|
|
expected: "Matthew 5:44" |
|
|
- query: "faith hope love" |
|
|
expected: "1 Corinthians 13:13" |
|
|
|
|
|
# Or with expected as list for multiple valid answers |
|
|
- query: "god is love" |
|
|
expected: ["1 John 4:8", "1 John 4:16"] |
|
|
""") |
|
|
exit(1) |
|
|
|
|
|
try: |
|
|
import yaml |
|
|
with open(queries_file, "r", encoding="utf-8") as f: |
|
|
queries = yaml.safe_load(f) |
|
|
|
|
|
|
|
|
if isinstance(queries, dict) and "queries" in queries: |
|
|
queries = queries["queries"] |
|
|
elif not isinstance(queries, list): |
|
|
print(f"Invalid queries file format. Expected list of queries in {queries_file}") |
|
|
exit(1) |
|
|
|
|
|
return queries |
|
|
except Exception as e: |
|
|
print(f"Error loading queries file {queries_file}: {e}") |
|
|
exit(1) |
|
|
|
|
|
|
|
|
def parse_verse_reference(ref: str) -> Tuple[str, int, int]: |
|
|
"""Parse a verse reference like 'Matthew 5:44' into (book, chapter, verse).""" |
|
|
try: |
|
|
|
|
|
parts = ref.rsplit(" ", 1) |
|
|
if len(parts) != 2: |
|
|
raise ValueError("Invalid format") |
|
|
|
|
|
book = parts[0].strip() |
|
|
chapter_verse = parts[1] |
|
|
|
|
|
|
|
|
if ":" not in chapter_verse: |
|
|
raise ValueError("Missing verse number") |
|
|
|
|
|
chapter_str, verse_str = chapter_verse.split(":", 1) |
|
|
chapter = int(chapter_str.strip()) |
|
|
verse = int(verse_str.strip()) |
|
|
|
|
|
return book, chapter, verse |
|
|
except Exception as e: |
|
|
print(f"Error parsing verse reference '{ref}': {e}") |
|
|
return None, None, None |
|
|
|
|
|
|
|
|
def calculate_score(expected_refs, results: List[Tuple[Dict[str, Any], float]]) -> int: |
|
|
"""Calculate score based on where expected result appears in top 3 results.""" |
|
|
|
|
|
if isinstance(expected_refs, str): |
|
|
expected_refs = [expected_refs] |
|
|
elif not expected_refs: |
|
|
return 0 |
|
|
|
|
|
|
|
|
expected_verses = [] |
|
|
for ref in expected_refs: |
|
|
book, chapter, verse = parse_verse_reference(ref) |
|
|
if book: |
|
|
expected_verses.append((book, chapter, verse)) |
|
|
|
|
|
if not expected_verses: |
|
|
return 0 |
|
|
|
|
|
|
|
|
for i, (verse_data, similarity) in enumerate(results[:3]): |
|
|
verse_tuple = (verse_data["book"], verse_data["chapter"], verse_data["verse"]) |
|
|
if verse_tuple in expected_verses: |
|
|
return 3 - i |
|
|
|
|
|
return 0 |
|
|
|
|
|
|
|
|
def format_verse_reference(verse_data: Dict[str, Any]) -> str: |
|
|
"""Format verse data into a reference string.""" |
|
|
return f"{verse_data['book']} {verse_data['chapter']}:{verse_data['verse']}" |
|
|
|
|
|
|
|
|
async def process_single_query( |
|
|
query_data: Dict[str, Any], |
|
|
provider: EmbeddingProvider, |
|
|
translation: str, |
|
|
query_index: int, |
|
|
total_queries: int, |
|
|
semaphore: Semaphore, |
|
|
pre_loaded_embeddings: List[Dict[str, Any]], |
|
|
use_hnsw: bool = True |
|
|
) -> Dict[str, Any]: |
|
|
"""Process a single query with concurrency control.""" |
|
|
async with semaphore: |
|
|
query = query_data.get("query", "") |
|
|
expected = query_data.get("expected", "") |
|
|
|
|
|
if not query: |
|
|
print(f"Skipping query {query_index}: missing 'query' field") |
|
|
return None |
|
|
|
|
|
print(f"Query {query_index}/{total_queries}: {query}") |
|
|
|
|
|
try: |
|
|
|
|
|
search_results = await search_embeddings(query, provider, translation, top_k=3, pre_loaded_embeddings=pre_loaded_embeddings, use_hnsw=use_hnsw) |
|
|
|
|
|
if not search_results: |
|
|
print(f" No results found for query: {query}") |
|
|
return None |
|
|
|
|
|
|
|
|
score = 0 |
|
|
if expected: |
|
|
score = calculate_score(expected, search_results) |
|
|
expected_str = str(expected) if isinstance(expected, list) else expected |
|
|
print(f" Expected: {expected_str} | Score: {score}/3") |
|
|
|
|
|
|
|
|
result_data = { |
|
|
"query": query, |
|
|
"expected": expected, |
|
|
"score": score, |
|
|
"results": [] |
|
|
} |
|
|
|
|
|
for j, (verse_data, similarity) in enumerate(search_results): |
|
|
result_ref = format_verse_reference(verse_data) |
|
|
result_data["results"].append({ |
|
|
"reference": result_ref, |
|
|
"similarity": similarity |
|
|
}) |
|
|
print(f" {j+1}. {result_ref} ({similarity:.4f})") |
|
|
|
|
|
return result_data |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error processing query '{query}': {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
async def run_batch_queries_openai_optimized( |
|
|
queries: List[Dict[str, Any]], |
|
|
provider: OpenAIProvider, |
|
|
translation: str, |
|
|
batch_size: int = 100, |
|
|
use_hnsw: bool = True |
|
|
) -> List[Dict[str, Any]]: |
|
|
"""Run batch queries optimized for OpenAI using their batch embedding capability.""" |
|
|
total_queries = len(queries) |
|
|
|
|
|
print(f"Running {total_queries} batch queries using OpenAI batch optimization...") |
|
|
|
|
|
|
|
|
print("Loading verse embeddings for similarity search...") |
|
|
pre_loaded_embeddings = load_embeddings_for_model(provider, translation) |
|
|
|
|
|
if not pre_loaded_embeddings: |
|
|
print("No embeddings found! Make sure embeddings exist for this model and translation.") |
|
|
return [] |
|
|
|
|
|
print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings") |
|
|
|
|
|
|
|
|
query_texts = [] |
|
|
query_data_list = [] |
|
|
for query_data in queries: |
|
|
query = query_data.get("query", "") |
|
|
if query: |
|
|
query_texts.append(query) |
|
|
query_data_list.append(query_data) |
|
|
|
|
|
if not query_texts: |
|
|
print("No valid queries found!") |
|
|
return [] |
|
|
|
|
|
|
|
|
print(f"Embedding {len(query_texts)} queries using OpenAI batch API...") |
|
|
query_embeddings = await provider.embed_queries_batch(query_texts, batch_size) |
|
|
|
|
|
if len(query_embeddings) != len(query_texts): |
|
|
print(f"Warning: Expected {len(query_texts)} embeddings, got {len(query_embeddings)}") |
|
|
|
|
|
|
|
|
hnsw_index = None |
|
|
if use_hnsw: |
|
|
hnsw_index = get_hnsw_index(provider, translation, pre_loaded_embeddings) |
|
|
|
|
|
|
|
|
results = [] |
|
|
for i, (query_data, query_embedding) in enumerate(zip(query_data_list, query_embeddings)): |
|
|
query = query_data.get("query", "") |
|
|
expected = query_data.get("expected", "") |
|
|
|
|
|
print(f"Processing query {i+1}/{len(query_texts)}: {query}") |
|
|
|
|
|
|
|
|
if hnsw_index: |
|
|
search_results = hnsw_index.search(query_embedding, k=3) |
|
|
else: |
|
|
|
|
|
similarities = [] |
|
|
for embedding_data in pre_loaded_embeddings: |
|
|
similarity = cosine_similarity(query_embedding, embedding_data["embedding"]) |
|
|
similarities.append((embedding_data, similarity)) |
|
|
|
|
|
|
|
|
similarities.sort(key=lambda x: x[1], reverse=True) |
|
|
search_results = similarities[:3] |
|
|
|
|
|
|
|
|
score = 0 |
|
|
if expected: |
|
|
score = calculate_score(expected, search_results) |
|
|
expected_str = str(expected) if isinstance(expected, list) else expected |
|
|
print(f" Expected: {expected_str} | Score: {score}/3") |
|
|
|
|
|
|
|
|
result_data = { |
|
|
"query": query, |
|
|
"expected": expected, |
|
|
"score": score, |
|
|
"results": [] |
|
|
} |
|
|
|
|
|
for j, (verse_data, similarity) in enumerate(search_results): |
|
|
result_ref = format_verse_reference(verse_data) |
|
|
result_data["results"].append({ |
|
|
"reference": result_ref, |
|
|
"similarity": similarity |
|
|
}) |
|
|
print(f" {j+1}. {result_ref} ({similarity:.4f})") |
|
|
|
|
|
results.append(result_data) |
|
|
|
|
|
print(f"\nProcessed {len(results)} queries successfully") |
|
|
return results |
|
|
|
|
|
|
|
|
async def run_batch_queries_optimized( |
|
|
queries: List[Dict[str, Any]], |
|
|
provider: EmbeddingProvider, |
|
|
translation: str, |
|
|
batch_size: int = 100, |
|
|
use_hnsw: bool = True |
|
|
) -> List[Dict[str, Any]]: |
|
|
"""Run batch queries optimized for providers that support batch embedding.""" |
|
|
total_queries = len(queries) |
|
|
|
|
|
print(f"Running {total_queries} batch queries using batch optimization...") |
|
|
|
|
|
|
|
|
print("Loading verse embeddings for similarity search...") |
|
|
pre_loaded_embeddings = load_embeddings_for_model(provider, translation) |
|
|
|
|
|
if not pre_loaded_embeddings: |
|
|
print("No embeddings found! Make sure embeddings exist for this model and translation.") |
|
|
return [] |
|
|
|
|
|
print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings") |
|
|
|
|
|
|
|
|
query_texts = [] |
|
|
query_data_list = [] |
|
|
for query_data in queries: |
|
|
query = query_data.get("query", "") |
|
|
if query: |
|
|
query_texts.append(query) |
|
|
query_data_list.append(query_data) |
|
|
|
|
|
if not query_texts: |
|
|
print("No valid queries found!") |
|
|
return [] |
|
|
|
|
|
|
|
|
print(f"Embedding {len(query_texts)} queries using batch API...") |
|
|
|
|
|
|
|
|
if isinstance(provider, (HuggingFaceProvider, BGEM3Provider, BGEM3HybridProvider)): |
|
|
batch_size = 500 |
|
|
elif isinstance(provider, (OpenAIProvider, GeminiProvider, VoyageProvider)): |
|
|
batch_size = 100 |
|
|
|
|
|
query_embeddings = await provider.embed_queries_batch(query_texts, batch_size) |
|
|
|
|
|
if len(query_embeddings) != len(query_texts): |
|
|
print(f"Warning: Expected {len(query_texts)} embeddings, got {len(query_embeddings)}") |
|
|
|
|
|
|
|
|
hnsw_index = None |
|
|
if use_hnsw: |
|
|
hnsw_index = get_hnsw_index(provider, translation, pre_loaded_embeddings) |
|
|
|
|
|
|
|
|
results = [] |
|
|
for i, (query_data, query_embedding) in enumerate(zip(query_data_list, query_embeddings)): |
|
|
query = query_data.get("query", "") |
|
|
expected = query_data.get("expected", "") |
|
|
|
|
|
print(f"Processing query {i+1}/{len(query_texts)}: {query}") |
|
|
|
|
|
|
|
|
if hnsw_index: |
|
|
search_results = hnsw_index.search(query_embedding, k=3) |
|
|
else: |
|
|
|
|
|
similarities = [] |
|
|
for embedding_data in pre_loaded_embeddings: |
|
|
similarity = cosine_similarity(query_embedding, embedding_data["embedding"]) |
|
|
similarities.append((embedding_data, similarity)) |
|
|
|
|
|
|
|
|
similarities.sort(key=lambda x: x[1], reverse=True) |
|
|
search_results = similarities[:3] |
|
|
|
|
|
|
|
|
score = 0 |
|
|
if expected: |
|
|
score = calculate_score(expected, search_results) |
|
|
expected_str = str(expected) if isinstance(expected, list) else expected |
|
|
print(f" Expected: {expected_str} | Score: {score}/3") |
|
|
|
|
|
|
|
|
result_data = { |
|
|
"query": query, |
|
|
"expected": expected, |
|
|
"score": score, |
|
|
"results": [] |
|
|
} |
|
|
|
|
|
for j, (verse_data, similarity) in enumerate(search_results): |
|
|
result_ref = format_verse_reference(verse_data) |
|
|
result_data["results"].append({ |
|
|
"reference": result_ref, |
|
|
"similarity": similarity |
|
|
}) |
|
|
print(f" {j+1}. {result_ref} ({similarity:.4f})") |
|
|
|
|
|
results.append(result_data) |
|
|
|
|
|
print(f"\nProcessed {len(results)} queries successfully") |
|
|
return results |
|
|
|
|
|
|
|
|
async def run_batch_queries( |
|
|
queries: List[Dict[str, Any]], |
|
|
provider: EmbeddingProvider, |
|
|
translation: str, |
|
|
concurrency: int = 5, |
|
|
use_hnsw: bool = True |
|
|
) -> List[Dict[str, Any]]: |
|
|
"""Run all batch queries with provider-specific optimizations.""" |
|
|
|
|
|
if hasattr(provider, 'embed_queries_batch'): |
|
|
provider_name = type(provider).__name__ |
|
|
print(f"Using {provider_name} batch optimization...") |
|
|
return await run_batch_queries_optimized(queries, provider, translation, use_hnsw=use_hnsw) |
|
|
|
|
|
|
|
|
total_queries = len(queries) |
|
|
|
|
|
print(f"Running {total_queries} batch queries with concurrency limit of {concurrency}...") |
|
|
|
|
|
|
|
|
print("Loading embeddings for all queries...") |
|
|
pre_loaded_embeddings = load_embeddings_for_model(provider, translation) |
|
|
|
|
|
if not pre_loaded_embeddings: |
|
|
print("No embeddings found! Make sure embeddings exist for this model and translation.") |
|
|
return [] |
|
|
|
|
|
print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings") |
|
|
|
|
|
|
|
|
semaphore = Semaphore(concurrency) |
|
|
|
|
|
|
|
|
tasks = [] |
|
|
for i, query_data in enumerate(queries, 1): |
|
|
task = process_single_query( |
|
|
query_data, provider, translation, i, total_queries, semaphore, pre_loaded_embeddings, use_hnsw |
|
|
) |
|
|
tasks.append(task) |
|
|
|
|
|
|
|
|
print(f"Starting concurrent processing...") |
|
|
results = await asyncio.gather(*tasks, return_exceptions=True) |
|
|
|
|
|
|
|
|
valid_results = [] |
|
|
for result in results: |
|
|
if isinstance(result, Exception): |
|
|
print(f"Exception in batch processing: {result}") |
|
|
elif result is not None: |
|
|
valid_results.append(result) |
|
|
|
|
|
print(f"\nProcessed {len(valid_results)} queries successfully out of {total_queries}") |
|
|
return valid_results |
|
|
|
|
|
|
|
|
def write_to_csv( |
|
|
results: List[Dict[str, Any]], |
|
|
provider: EmbeddingProvider, |
|
|
translation: str |
|
|
) -> Path: |
|
|
"""Write results to CSV file using new directory structure.""" |
|
|
provider_name = get_model_provider(provider) |
|
|
model_name = provider.get_name() |
|
|
|
|
|
|
|
|
results_dir = Path("results") / provider_name |
|
|
results_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
results_file = results_dir / f"{model_name}.csv" |
|
|
|
|
|
|
|
|
with open(results_file, "w", newline="", encoding="utf-8") as f: |
|
|
fieldnames = [ |
|
|
"timestamp", "provider", "model", "translation", "query", "expected", |
|
|
"result1", "score1", "result2", "score2", "result3", "score3", "points" |
|
|
] |
|
|
writer = csv.DictWriter(f, fieldnames=fieldnames) |
|
|
writer.writeheader() |
|
|
|
|
|
timestamp = datetime.now().isoformat() |
|
|
|
|
|
for result in results: |
|
|
|
|
|
padded_results = result["results"] + [{"reference": "", "similarity": 0.0}] * 3 |
|
|
padded_results = padded_results[:3] |
|
|
|
|
|
row = { |
|
|
"timestamp": timestamp, |
|
|
"provider": provider_name, |
|
|
"model": model_name, |
|
|
"translation": translation, |
|
|
"query": result["query"], |
|
|
"expected": result["expected"], |
|
|
"result1": padded_results[0]["reference"], |
|
|
"score1": f"{padded_results[0]['similarity']:.4f}", |
|
|
"result2": padded_results[1]["reference"], |
|
|
"score2": f"{padded_results[1]['similarity']:.4f}", |
|
|
"result3": padded_results[2]["reference"], |
|
|
"score3": f"{padded_results[2]['similarity']:.4f}", |
|
|
"points": result["score"] |
|
|
} |
|
|
writer.writerow(row) |
|
|
|
|
|
return results_file |
|
|
|
|
|
|
|
|
async def batch_mode(args): |
|
|
"""Batch query processing mode.""" |
|
|
print("Bible Verse Batch Query Mode") |
|
|
print("=" * 35) |
|
|
print() |
|
|
|
|
|
|
|
|
queries_file = Path(args.queries_file) |
|
|
queries = load_queries(queries_file) |
|
|
|
|
|
print(f"Loaded {len(queries)} queries from {queries_file}") |
|
|
|
|
|
|
|
|
if args.translation: |
|
|
translation = args.translation |
|
|
if translation not in get_available_translations(): |
|
|
print(f"Translation '{translation}' not found!") |
|
|
return |
|
|
else: |
|
|
translation = select_translation_for_query() |
|
|
|
|
|
print(f"Using translation: {translation}") |
|
|
print() |
|
|
|
|
|
|
|
|
if args.model: |
|
|
|
|
|
provider = None |
|
|
models = get_available_models() |
|
|
for provider_info in models.values(): |
|
|
if args.model in provider_info["models"]: |
|
|
provider = provider_info["provider_class"](args.model) |
|
|
break |
|
|
|
|
|
if provider is None: |
|
|
print(f"Model '{args.model}' not found!") |
|
|
return |
|
|
else: |
|
|
provider = select_model_for_query() |
|
|
|
|
|
provider_name = get_model_provider(provider) |
|
|
model_name = provider.get_name() |
|
|
print(f"Using model: {model_name} ({provider_name})") |
|
|
print() |
|
|
|
|
|
|
|
|
use_hnsw = not args.no_hnsw |
|
|
results = await run_batch_queries(queries, provider, translation, args.concurrency, use_hnsw) |
|
|
|
|
|
if not results: |
|
|
print("No results to save.") |
|
|
return |
|
|
|
|
|
|
|
|
total_queries = len(results) |
|
|
total_points = sum(r["score"] for r in results) |
|
|
max_possible_points = total_queries * 3 |
|
|
accuracy = (total_points / max_possible_points) * 100 if max_possible_points > 0 else 0 |
|
|
|
|
|
print(f"\nBatch Query Summary:") |
|
|
print(f" Total queries: {total_queries}") |
|
|
print(f" Total points: {total_points}/{max_possible_points}") |
|
|
print(f" Accuracy: {accuracy:.1f}%") |
|
|
|
|
|
|
|
|
results_file = write_to_csv(results, provider, translation) |
|
|
print(f"\nResults written to: {results_file}") |
|
|
|
|
|
|
|
|
def get_models_without_results(translation: str) -> List[tuple]: |
|
|
"""Get list of (provider_name, model_name, provider_class) for models without results.""" |
|
|
models = get_available_models() |
|
|
results_dir = Path("results") |
|
|
|
|
|
models_without_results = [] |
|
|
|
|
|
for provider_name, provider_info in models.items(): |
|
|
provider_class = provider_info["provider_class"] |
|
|
|
|
|
for model_name in provider_info["models"]: |
|
|
|
|
|
provider_instance = provider_class(model_name) |
|
|
provider_dir = get_model_provider(provider_instance) |
|
|
model_file_name = provider_instance.get_name() |
|
|
|
|
|
|
|
|
results_file = results_dir / provider_dir / f"{model_file_name}.csv" |
|
|
|
|
|
if not results_file.exists(): |
|
|
models_without_results.append((provider_name, model_name, provider_class)) |
|
|
|
|
|
return models_without_results |
|
|
|
|
|
|
|
|
def get_models_without_embeddings(translation: str, include_providers=None, exclude_providers=None) -> List[tuple]: |
|
|
"""Get list of (provider_name, model_name, provider_class) for models without embeddings.""" |
|
|
models = get_available_models() |
|
|
models_without_embeddings = [] |
|
|
|
|
|
for provider_name, provider_info in models.items(): |
|
|
|
|
|
if include_providers and provider_name not in include_providers: |
|
|
continue |
|
|
if exclude_providers and provider_name in exclude_providers: |
|
|
continue |
|
|
|
|
|
provider_class = provider_info["provider_class"] |
|
|
|
|
|
for model_name in provider_info["models"]: |
|
|
|
|
|
provider_instance = provider_class(model_name) |
|
|
|
|
|
if not check_embeddings_exist(provider_instance, translation): |
|
|
models_without_embeddings.append((provider_name, model_name, provider_class)) |
|
|
|
|
|
return models_without_embeddings |
|
|
|
|
|
|
|
|
def check_embeddings_exist(provider: EmbeddingProvider, translation: str) -> bool: |
|
|
"""Check if embeddings exist for a given provider and translation.""" |
|
|
provider_name = get_model_provider(provider) |
|
|
model_name = provider.get_name() |
|
|
|
|
|
embeddings_dir = Path("embeddings") / provider_name / model_name |
|
|
|
|
|
if not embeddings_dir.exists(): |
|
|
return False |
|
|
|
|
|
|
|
|
for book_dir in embeddings_dir.iterdir(): |
|
|
if book_dir.is_dir() and any(book_dir.glob("*.json")): |
|
|
return True |
|
|
|
|
|
return False |
|
|
|
|
|
|
|
|
async def autobatch_mode(args): |
|
|
"""Automatically run batch processing for all models without results.""" |
|
|
print("Bible Verse Auto-Batch Processing Mode") |
|
|
print("=" * 40) |
|
|
print() |
|
|
|
|
|
|
|
|
queries_file = Path(args.queries_file) |
|
|
queries = load_queries(queries_file) |
|
|
|
|
|
print(f"Loaded {len(queries)} queries from {queries_file}") |
|
|
|
|
|
|
|
|
if args.translation: |
|
|
translation = args.translation |
|
|
if translation not in get_available_translations(): |
|
|
print(f"Translation '{translation}' not found!") |
|
|
return |
|
|
else: |
|
|
translation = select_translation_for_query() |
|
|
|
|
|
print(f"Using translation: {translation}") |
|
|
print() |
|
|
|
|
|
|
|
|
models_without_results = get_models_without_results(translation) |
|
|
|
|
|
if not models_without_results: |
|
|
print("All models already have results! No processing needed.") |
|
|
return |
|
|
|
|
|
print(f"Found {len(models_without_results)} models without results:") |
|
|
|
|
|
|
|
|
processable_models = [] |
|
|
for provider_name, model_name, provider_class in models_without_results: |
|
|
provider_instance = provider_class(model_name) |
|
|
if check_embeddings_exist(provider_instance, translation): |
|
|
processable_models.append((provider_name, model_name, provider_class)) |
|
|
print(f" ✅ {provider_name}/{model_name} - has embeddings") |
|
|
else: |
|
|
print(f" ❌ {provider_name}/{model_name} - missing embeddings (skipping)") |
|
|
|
|
|
if not processable_models: |
|
|
print("\nNo models with embeddings found. Run embedding generation first!") |
|
|
return |
|
|
|
|
|
print(f"\nWill process {len(processable_models)} models with embeddings.") |
|
|
|
|
|
if args.dry_run: |
|
|
print("\nDry run complete. Use --no-dry-run or remove --dry-run to actually process.") |
|
|
return |
|
|
|
|
|
print() |
|
|
|
|
|
|
|
|
successful_models = 0 |
|
|
failed_models = 0 |
|
|
|
|
|
for i, (provider_name, model_name, provider_class) in enumerate(processable_models, 1): |
|
|
print(f"Processing model {i}/{len(processable_models)}: {provider_name}/{model_name}") |
|
|
print("-" * 60) |
|
|
|
|
|
try: |
|
|
provider = provider_class(model_name) |
|
|
|
|
|
|
|
|
use_hnsw = not args.no_hnsw |
|
|
results = await run_batch_queries(queries, provider, translation, args.concurrency, use_hnsw) |
|
|
|
|
|
if results: |
|
|
|
|
|
results_file = write_to_csv(results, provider, translation) |
|
|
|
|
|
|
|
|
total_queries = len(results) |
|
|
total_points = sum(r["score"] for r in results) |
|
|
max_possible_points = total_queries * 3 |
|
|
accuracy = (total_points / max_possible_points) * 100 if max_possible_points > 0 else 0 |
|
|
|
|
|
print(f" ✅ Completed: {accuracy:.1f}% accuracy ({total_points}/{max_possible_points} points)") |
|
|
print(f" 📁 Results saved to: {results_file}") |
|
|
successful_models += 1 |
|
|
else: |
|
|
print(f" ❌ Failed: No results generated") |
|
|
failed_models += 1 |
|
|
|
|
|
except Exception as e: |
|
|
print(f" ❌ Failed with error: {e}") |
|
|
failed_models += 1 |
|
|
|
|
|
print() |
|
|
|
|
|
|
|
|
print("=" * 60) |
|
|
print("Auto-batch processing complete!") |
|
|
print(f" ✅ Successful: {successful_models} models") |
|
|
if failed_models > 0: |
|
|
print(f" ❌ Failed: {failed_models} models") |
|
|
print(f" 📊 Total processed: {successful_models + failed_models} models") |
|
|
|
|
|
if successful_models > 0: |
|
|
print(f"\n💡 Run 'python main.py report' to generate an updated report with all results.") |
|
|
|
|
|
|
|
|
async def autoembed_mode(args): |
|
|
"""Automatically generate embeddings for all models without embeddings.""" |
|
|
print("Bible Verse Auto-Embedding Generation Mode") |
|
|
print("=" * 45) |
|
|
print() |
|
|
|
|
|
|
|
|
if args.translation: |
|
|
translation = args.translation |
|
|
if translation not in get_available_translations(): |
|
|
print(f"Translation '{translation}' not found!") |
|
|
return |
|
|
else: |
|
|
translation = select_translation() |
|
|
|
|
|
print(f"Using translation: {translation}") |
|
|
print() |
|
|
|
|
|
|
|
|
print(f"Loading verses for translation: {translation}") |
|
|
try: |
|
|
verses = load_translation_text(translation) |
|
|
print(f"Loaded {len(verses)} verses") |
|
|
except FileNotFoundError as e: |
|
|
print(f"Error loading translation: {e}") |
|
|
return |
|
|
|
|
|
|
|
|
models_without_embeddings = get_models_without_embeddings( |
|
|
translation, |
|
|
include_providers=args.include_providers, |
|
|
exclude_providers=args.exclude_providers |
|
|
) |
|
|
|
|
|
if not models_without_embeddings: |
|
|
print("All models already have embeddings! No processing needed.") |
|
|
return |
|
|
|
|
|
print(f"Found {len(models_without_embeddings)} models without embeddings:") |
|
|
for provider_name, model_name, _ in models_without_embeddings: |
|
|
print(f" - {provider_name}/{model_name}") |
|
|
|
|
|
if args.dry_run: |
|
|
print(f"\nDry run complete. Found {len(models_without_embeddings)} models that need embeddings.") |
|
|
print("Use --no-dry-run or remove --dry-run to actually generate embeddings.") |
|
|
return |
|
|
|
|
|
print() |
|
|
|
|
|
|
|
|
successful_models = 0 |
|
|
failed_models = 0 |
|
|
|
|
|
for i, (provider_name, model_name, provider_class) in enumerate(models_without_embeddings, 1): |
|
|
print(f"Processing model {i}/{len(models_without_embeddings)}: {provider_name}/{model_name}") |
|
|
print("-" * 60) |
|
|
|
|
|
try: |
|
|
|
|
|
provider = provider_class(model_name) |
|
|
|
|
|
|
|
|
create_output_directories(verses, provider) |
|
|
|
|
|
|
|
|
verses_to_process = verses |
|
|
if args.skip_existing: |
|
|
original_count = len(verses) |
|
|
verses_to_process = check_existing_embeddings(translation, provider, verses) |
|
|
skipped_count = original_count - len(verses_to_process) |
|
|
if skipped_count > 0: |
|
|
print(f" Skipping {skipped_count} verses with existing embeddings") |
|
|
|
|
|
if not verses_to_process: |
|
|
print(f" ✅ All verses already have embeddings for this model") |
|
|
successful_models += 1 |
|
|
continue |
|
|
|
|
|
|
|
|
print(f" Generating embeddings for {len(verses_to_process)} verses...") |
|
|
await generate_embeddings(translation, provider, verses_to_process, args.batch_size) |
|
|
|
|
|
print(f" ✅ Completed: Generated embeddings for {len(verses_to_process)} verses") |
|
|
successful_models += 1 |
|
|
|
|
|
except Exception as e: |
|
|
print(f" ❌ Failed with error: {e}") |
|
|
failed_models += 1 |
|
|
|
|
|
print() |
|
|
|
|
|
|
|
|
print("=" * 60) |
|
|
print("Auto-embedding generation complete!") |
|
|
print(f" ✅ Successful: {successful_models} models") |
|
|
if failed_models > 0: |
|
|
print(f" ❌ Failed: {failed_models} models") |
|
|
print(f" 📊 Total processed: {successful_models + failed_models} models") |
|
|
|
|
|
if successful_models > 0: |
|
|
print(f"\n💡 New embeddings have been generated. You can now:") |
|
|
print(f" - Use 'uv run main.py query' to search verses") |
|
|
print(f" - Use 'uv run main.py autobatch' to evaluate all models") |
|
|
print(f" - Use 'uv run main.py report' to generate updated reports") |
|
|
|
|
|
|
|
|
def read_all_results_csv(results_dir: Path = None) -> List[Dict[str, Any]]: |
|
|
"""Read and parse results from all CSV files in the results directory structure.""" |
|
|
if results_dir is None: |
|
|
results_dir = Path("results") |
|
|
|
|
|
if not results_dir.exists(): |
|
|
print(f"Results directory not found: {results_dir}") |
|
|
return [] |
|
|
|
|
|
all_results = [] |
|
|
|
|
|
|
|
|
for provider_dir in results_dir.iterdir(): |
|
|
if not provider_dir.is_dir(): |
|
|
continue |
|
|
|
|
|
print(f"Reading results from provider: {provider_dir.name}") |
|
|
|
|
|
for csv_file in provider_dir.glob("*.csv"): |
|
|
print(f" Reading: {csv_file}") |
|
|
try: |
|
|
with open(csv_file, "r", encoding="utf-8") as f: |
|
|
reader = csv.DictReader(f) |
|
|
for row in reader: |
|
|
|
|
|
try: |
|
|
row["points"] = int(row["points"]) |
|
|
except (ValueError, KeyError): |
|
|
row["points"] = 0 |
|
|
|
|
|
|
|
|
for score_field in ["score1", "score2", "score3"]: |
|
|
try: |
|
|
if score_field in row and row[score_field]: |
|
|
row[score_field] = float(row[score_field]) |
|
|
else: |
|
|
row[score_field] = 0.0 |
|
|
except (ValueError, TypeError): |
|
|
row[score_field] = 0.0 |
|
|
|
|
|
all_results.append(row) |
|
|
except Exception as e: |
|
|
print(f" Error reading CSV file {csv_file}: {e}") |
|
|
continue |
|
|
|
|
|
print(f"Total results loaded: {len(all_results)}") |
|
|
return all_results |
|
|
|
|
|
|
|
|
def read_results_csv(results_file: Path) -> List[Dict[str, Any]]: |
|
|
"""Read and parse results from a single CSV file (backward compatibility).""" |
|
|
if not results_file.exists(): |
|
|
print(f"Results file not found: {results_file}") |
|
|
return [] |
|
|
|
|
|
results = [] |
|
|
try: |
|
|
with open(results_file, "r", encoding="utf-8") as f: |
|
|
reader = csv.DictReader(f) |
|
|
for row in reader: |
|
|
|
|
|
try: |
|
|
row["points"] = int(row["points"]) |
|
|
except (ValueError, KeyError): |
|
|
row["points"] = 0 |
|
|
|
|
|
|
|
|
for score_field in ["score1", "score2", "score3"]: |
|
|
try: |
|
|
if score_field in row and row[score_field]: |
|
|
row[score_field] = float(row[score_field]) |
|
|
else: |
|
|
row[score_field] = 0.0 |
|
|
except (ValueError, TypeError): |
|
|
row[score_field] = 0.0 |
|
|
|
|
|
results.append(row) |
|
|
except Exception as e: |
|
|
print(f"Error reading CSV file {results_file}: {e}") |
|
|
return [] |
|
|
|
|
|
return results |
|
|
|
|
|
|
|
|
def analyze_results(results: List[Dict[str, Any]]) -> Dict[str, Any]: |
|
|
"""Analyze results and generate summary statistics.""" |
|
|
if not results: |
|
|
return {} |
|
|
|
|
|
|
|
|
provider_model_stats = {} |
|
|
|
|
|
for result in results: |
|
|
provider = result.get("provider", "unknown") |
|
|
model = result.get("model", "unknown") |
|
|
translation = result.get("translation", "unknown") |
|
|
points = result.get("points", 0) |
|
|
|
|
|
key = (provider, model, translation) |
|
|
|
|
|
if key not in provider_model_stats: |
|
|
provider_model_stats[key] = { |
|
|
"provider": provider, |
|
|
"model": model, |
|
|
"translation": translation, |
|
|
"total_queries": 0, |
|
|
"total_points": 0, |
|
|
"max_points": 0, |
|
|
"correct_top1": 0, |
|
|
"correct_top2": 0, |
|
|
"correct_top3": 0, |
|
|
"incorrect": 0, |
|
|
"queries": [] |
|
|
} |
|
|
|
|
|
stats = provider_model_stats[key] |
|
|
stats["total_queries"] += 1 |
|
|
stats["total_points"] += points |
|
|
stats["max_points"] += 3 |
|
|
|
|
|
|
|
|
if points == 3: |
|
|
stats["correct_top1"] += 1 |
|
|
elif points == 2: |
|
|
stats["correct_top2"] += 1 |
|
|
elif points == 1: |
|
|
stats["correct_top3"] += 1 |
|
|
else: |
|
|
stats["incorrect"] += 1 |
|
|
|
|
|
|
|
|
stats["queries"].append({ |
|
|
"query": result.get("query", ""), |
|
|
"expected": result.get("expected", ""), |
|
|
"result1": result.get("result1", ""), |
|
|
"score1": result.get("score1", "0.0000"), |
|
|
"result2": result.get("result2", ""), |
|
|
"score2": result.get("score2", "0.0000"), |
|
|
"result3": result.get("result3", ""), |
|
|
"score3": result.get("score3", "0.0000"), |
|
|
"points": points |
|
|
}) |
|
|
|
|
|
|
|
|
for stats in provider_model_stats.values(): |
|
|
if stats["max_points"] > 0: |
|
|
stats["accuracy"] = (stats["total_points"] / stats["max_points"]) * 100 |
|
|
else: |
|
|
stats["accuracy"] = 0 |
|
|
|
|
|
return provider_model_stats |
|
|
|
|
|
|
|
|
def generate_summary_table(stats: Dict[tuple, Dict[str, Any]]) -> str: |
|
|
"""Generate summary table markdown.""" |
|
|
if not stats: |
|
|
return "No results to display.\n\n" |
|
|
|
|
|
markdown = "## Model Performance Summary\n\n" |
|
|
markdown += "| Provider | Model | Translation | Accuracy | Top 1 | Top 2 | Top 3 | Failed | Total |\n" |
|
|
markdown += "|----------|-------|-------------|----------|-------|-------|-------|-----------|-------|\n" |
|
|
|
|
|
|
|
|
sorted_stats = sorted(stats.items(), key=lambda x: x[1]["accuracy"], reverse=True) |
|
|
|
|
|
for (provider, model, translation), stat in sorted_stats: |
|
|
markdown += f"| {provider} | {model} | {translation} | " |
|
|
markdown += f"{stat['accuracy']:.1f}% | " |
|
|
markdown += f"{stat['correct_top1']} | " |
|
|
markdown += f"{stat['correct_top2']} | " |
|
|
markdown += f"{stat['correct_top3']} | " |
|
|
markdown += f"{stat['incorrect']} | " |
|
|
markdown += f"{stat['total_queries']} |\n" |
|
|
|
|
|
markdown += "\n" |
|
|
return markdown |
|
|
|
|
|
|
|
|
def generate_detailed_table(stats: Dict[tuple, Dict[str, Any]], max_queries: int = 10) -> str: |
|
|
"""Generate detailed results with individual sections for each model.""" |
|
|
if not stats: |
|
|
return "" |
|
|
|
|
|
markdown = "## Results\n\n" |
|
|
markdown += "✅ denotes accurate result.\n\n" |
|
|
|
|
|
|
|
|
sorted_stats = sorted(stats.items(), key=lambda x: x[1]["accuracy"], reverse=True) |
|
|
|
|
|
for (provider, model, translation), stat in sorted_stats: |
|
|
|
|
|
model_display = f"{provider}/{model}" if provider != "HuggingFace" else model |
|
|
markdown += f"### {model_display} ({translation.upper()})\n\n" |
|
|
|
|
|
|
|
|
accuracy = stat["accuracy"] |
|
|
total_points = stat["total_points"] |
|
|
max_points = stat["max_points"] |
|
|
total_queries = len(stat["queries"]) |
|
|
|
|
|
markdown += f"**Accuracy: {accuracy:.1f}%** ({total_points}/{max_points} points across {total_queries} queries)\n\n" |
|
|
|
|
|
|
|
|
markdown += "| Query | Expected | Top Result | Score | ✓ |\n" |
|
|
markdown += "|-------|----------|------------|-------|---|\n" |
|
|
|
|
|
|
|
|
queries = stat["queries"][:max_queries] |
|
|
|
|
|
for query_result in queries: |
|
|
query = query_result["query"] |
|
|
|
|
|
display_query = query[:50] + "..." if len(query) > 50 else query |
|
|
expected = query_result["expected"] |
|
|
|
|
|
|
|
|
if "result1" in query_result and query_result["result1"]: |
|
|
result1 = query_result["result1"] |
|
|
score1 = query_result.get("score1", 0.0) |
|
|
points = query_result.get("points", 0) |
|
|
|
|
|
|
|
|
if points == 3: |
|
|
checkmark = "✅" |
|
|
elif points == 2: |
|
|
checkmark = "⚠️" |
|
|
elif points == 1: |
|
|
checkmark = "❌" |
|
|
else: |
|
|
checkmark = "❌" |
|
|
|
|
|
markdown += f"| {display_query} | {expected} | {result1} | {score1:.4f} | {checkmark} |\n" |
|
|
else: |
|
|
markdown += f"| {display_query} | {expected} | No results | 0.0000 | ❌ |\n" |
|
|
|
|
|
if len(stat["queries"]) > max_queries: |
|
|
remaining = len(stat["queries"]) - max_queries |
|
|
markdown += f"\n*... and {remaining} more queries*\n" |
|
|
|
|
|
markdown += "\n" |
|
|
|
|
|
return markdown |
|
|
|
|
|
|
|
|
|
|
|
def generate_report(args): |
|
|
"""Generate markdown report and update only the Query Examples section in README.md.""" |
|
|
print("Bible Verse Embedding Evaluation Report Generator") |
|
|
print("=" * 55) |
|
|
print() |
|
|
|
|
|
output_file = Path(args.output_file) |
|
|
|
|
|
|
|
|
if hasattr(args, 'results_file') and args.results_file: |
|
|
|
|
|
results_file = Path(args.results_file) |
|
|
print(f"Reading results from specific file: {results_file}") |
|
|
results = read_results_csv(results_file) |
|
|
else: |
|
|
|
|
|
print("Reading results from directory structure: ./results/") |
|
|
results = read_all_results_csv() |
|
|
|
|
|
if not results: |
|
|
print("No results found to process.") |
|
|
return |
|
|
|
|
|
print(f"Loaded {len(results)} result entries") |
|
|
|
|
|
|
|
|
print("Analyzing results...") |
|
|
stats = analyze_results(results) |
|
|
|
|
|
if not stats: |
|
|
print("No statistics to generate.") |
|
|
return |
|
|
|
|
|
print(f"Found results for {len(stats)} model/provider combinations") |
|
|
|
|
|
|
|
|
try: |
|
|
with open(output_file, "r", encoding="utf-8") as f: |
|
|
existing_content = f.read() |
|
|
except FileNotFoundError: |
|
|
print(f"README.md not found at {output_file}, creating new file") |
|
|
existing_content = "# Bible Embeddings\n\n" |
|
|
|
|
|
|
|
|
import re |
|
|
|
|
|
legend_pattern = r'## Legend\n\n.*?(?=\n## |\Z)' |
|
|
existing_content = re.sub(legend_pattern, '', existing_content, flags=re.DOTALL) |
|
|
|
|
|
|
|
|
results_section_start = existing_content.find("## Results") |
|
|
if results_section_start == -1: |
|
|
results_section_start = existing_content.find("## Query Examples") |
|
|
|
|
|
if results_section_start == -1: |
|
|
|
|
|
print("No existing Results section found, appending to end") |
|
|
new_results_section = generate_detailed_table(stats, args.max_queries) |
|
|
|
|
|
new_results_section += "## Legend\n\n" |
|
|
new_results_section += "- ✅ **Perfect Match** - Expected result appears as #1 result (3 points)\n" |
|
|
new_results_section += "- ⚠️ **Good Match** - Expected result appears as #2 result (2 points)\n" |
|
|
new_results_section += "- ❌ **Poor/No Match** - Expected result appears as #3 result (1 point) or not in top 3 (0 points)\n" |
|
|
new_content = existing_content.rstrip() + "\n\n" + new_results_section |
|
|
else: |
|
|
|
|
|
next_section_start = existing_content.find("\n## ", results_section_start + 1) |
|
|
|
|
|
if next_section_start == -1: |
|
|
|
|
|
before_section = existing_content[:results_section_start] |
|
|
after_section = "" |
|
|
else: |
|
|
before_section = existing_content[:results_section_start] |
|
|
after_section = existing_content[next_section_start:] |
|
|
|
|
|
|
|
|
new_results_section = generate_detailed_table(stats, args.max_queries) |
|
|
|
|
|
new_results_section += "## Legend\n\n" |
|
|
new_results_section += "- ✅ **Perfect Match** - Expected result appears as #1 result (3 points)\n" |
|
|
new_results_section += "- ⚠️ **Good Match** - Expected result appears as #2 result (2 points)\n" |
|
|
new_results_section += "- ❌ **Poor/No Match** - Expected result appears as #3 result (1 point) or not in top 3 (0 points)\n" |
|
|
new_content = before_section + new_results_section + after_section |
|
|
|
|
|
|
|
|
try: |
|
|
with open(output_file, "w", encoding="utf-8") as f: |
|
|
f.write(new_content) |
|
|
print(f"\nResults section updated in: {output_file}") |
|
|
except Exception as e: |
|
|
print(f"Error writing report file: {e}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
asyncio.run(main()) |
|
|
|