Ken Powers commited on
Commit
52e1811
·
unverified ·
1 Parent(s): 52434f6

Batch querying and reporting

Browse files
Files changed (3) hide show
  1. main.py +1081 -9
  2. pyproject.toml +2 -0
  3. uv.lock +40 -0
main.py CHANGED
@@ -8,11 +8,18 @@ It supports both commercial APIs and open-source models.
8
 
9
  import json
10
  import asyncio
 
11
  from pathlib import Path
12
  from typing import List, Dict, Any, Optional, Tuple
13
  from abc import ABC, abstractmethod
14
  import argparse
15
  import numpy as np
 
 
 
 
 
 
16
 
17
 
18
  class EmbeddingProvider(ABC):
@@ -58,7 +65,34 @@ class OpenAIProvider(EmbeddingProvider):
58
  async def embed_batch(self, texts: List[str]) -> List[List[float]]:
59
  client = await self._get_client()
60
  response = await client.embeddings.create(input=texts, model=self.model_name)
 
61
  return [data.embedding for data in response.data]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
 
64
  class GeminiProvider(EmbeddingProvider):
@@ -86,7 +120,33 @@ class GeminiProvider(EmbeddingProvider):
86
  async def embed_batch(self, texts: List[str]) -> List[List[float]]:
87
  client = await self._get_client()
88
  result = client.embed_content(model=f"models/{self.model_name}", content=texts)
 
89
  return [embedding["embedding"] for embedding in result["embedding"]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
 
92
  class VoyageProvider(EmbeddingProvider):
@@ -113,7 +173,33 @@ class VoyageProvider(EmbeddingProvider):
113
  async def embed_batch(self, texts: List[str]) -> List[List[float]]:
114
  client = await self._get_client()
115
  response = await client.embed(texts, model=self.model_name)
 
116
  return response.embeddings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
 
119
  class HuggingFaceProvider(EmbeddingProvider):
@@ -147,8 +233,37 @@ class HuggingFaceProvider(EmbeddingProvider):
147
 
148
  async def embed_batch(self, texts: List[str]) -> List[List[float]]:
149
  model = await self._get_model()
150
- embeddings = model.encode(texts)
 
151
  return [emb.tolist() for emb in embeddings]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
 
154
  class JinaProvider(EmbeddingProvider):
@@ -487,6 +602,67 @@ async def main():
487
  query_parser = subparsers.add_parser(
488
  "query", help="Search Bible verses using embeddings"
489
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490
 
491
  args = parser.parse_args()
492
 
@@ -496,7 +672,15 @@ async def main():
496
  return
497
 
498
  if args.command == "query":
499
- await query_mode()
 
 
 
 
 
 
 
 
500
  return
501
 
502
  if args.command == "embed":
@@ -614,12 +798,98 @@ def cosine_similarity(a: List[float], b: List[float]) -> float:
614
  return dot_product / (norm_a * norm_b)
615
 
616
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
617
  async def search_embeddings(
618
- query: str, provider: EmbeddingProvider, translation: str, top_k: int = 10
 
619
  ) -> List[Tuple[Dict[str, Any], float]]:
620
- """Search for similar verses using embeddings."""
621
- # Load all embeddings for the model
622
- all_embeddings = load_embeddings_for_model(provider, translation)
 
 
 
623
 
624
  if not all_embeddings:
625
  return []
@@ -628,7 +898,14 @@ async def search_embeddings(
628
  print(f"Generating embedding for query: '{query}'")
629
  query_embedding = await provider.embed_text(query)
630
 
631
- # Calculate similarities
 
 
 
 
 
 
 
632
  results = []
633
  for embedding_data in all_embeddings:
634
  similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
@@ -755,8 +1032,9 @@ def select_model_for_query() -> EmbeddingProvider:
755
  exit(0)
756
 
757
 
758
- async def query_mode():
759
  """Interactive query mode."""
 
760
  print("Bible Verse Search Mode")
761
  print("=" * 30)
762
  print()
@@ -791,7 +1069,7 @@ async def query_mode():
791
  top_k = 10
792
 
793
  # Perform search
794
- results = await search_embeddings(query, provider, translation, top_k)
795
 
796
  if results:
797
  display_search_results(results, query)
@@ -808,5 +1086,799 @@ async def query_mode():
808
  print("Please try again.")
809
 
810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811
  if __name__ == "__main__":
812
  asyncio.run(main())
 
8
 
9
  import json
10
  import asyncio
11
+ from asyncio import Semaphore
12
  from pathlib import Path
13
  from typing import List, Dict, Any, Optional, Tuple
14
  from abc import ABC, abstractmethod
15
  import argparse
16
  import numpy as np
17
+ import yaml
18
+ import faiss
19
+ import csv
20
+ from datetime import datetime
21
+
22
+
23
 
24
 
25
  class EmbeddingProvider(ABC):
 
65
  async def embed_batch(self, texts: List[str]) -> List[List[float]]:
66
  client = await self._get_client()
67
  response = await client.embeddings.create(input=texts, model=self.model_name)
68
+ # OpenAI embeddings are automatically normalized to unit length
69
  return [data.embedding for data in response.data]
70
+
71
+ async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]:
72
+ """Efficiently embed multiple queries using OpenAI's batch API capability."""
73
+ all_embeddings = []
74
+
75
+ # Process in batches to respect API limits
76
+ for i in range(0, len(queries), batch_size):
77
+ batch = queries[i:i + batch_size]
78
+ print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
79
+
80
+ try:
81
+ batch_embeddings = await self.embed_batch(batch)
82
+ all_embeddings.extend(batch_embeddings)
83
+ except Exception as e:
84
+ print(f"Error in batch embedding: {e}")
85
+ # Fall back to individual embedding for this batch
86
+ for query in batch:
87
+ try:
88
+ embedding = await self.embed_text(query)
89
+ all_embeddings.append(embedding)
90
+ except Exception as e2:
91
+ print(f"Error embedding query '{query}': {e2}")
92
+ # Add zero embedding as placeholder
93
+ all_embeddings.append([0.0] * 1536) # Default dimension for text-embedding models
94
+
95
+ return all_embeddings
96
 
97
 
98
  class GeminiProvider(EmbeddingProvider):
 
120
  async def embed_batch(self, texts: List[str]) -> List[List[float]]:
121
  client = await self._get_client()
122
  result = client.embed_content(model=f"models/{self.model_name}", content=texts)
123
+ # Gemini embeddings are automatically normalized to unit length
124
  return [embedding["embedding"] for embedding in result["embedding"]]
125
+
126
+ async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]:
127
+ """Efficiently embed multiple queries using Gemini's batch capability."""
128
+ all_embeddings = []
129
+
130
+ for i in range(0, len(queries), batch_size):
131
+ batch = queries[i:i + batch_size]
132
+ print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
133
+
134
+ try:
135
+ batch_embeddings = await self.embed_batch(batch)
136
+ all_embeddings.extend(batch_embeddings)
137
+ except Exception as e:
138
+ print(f"Error in batch embedding: {e}")
139
+ # Fall back to individual embedding for this batch
140
+ for query in batch:
141
+ try:
142
+ embedding = await self.embed_text(query)
143
+ all_embeddings.append(embedding)
144
+ except Exception as e2:
145
+ print(f"Error embedding query '{query}': {e2}")
146
+ # Add zero embedding as placeholder
147
+ all_embeddings.append([0.0] * 768) # Default dimension for Gemini models
148
+
149
+ return all_embeddings
150
 
151
 
152
  class VoyageProvider(EmbeddingProvider):
 
173
  async def embed_batch(self, texts: List[str]) -> List[List[float]]:
174
  client = await self._get_client()
175
  response = await client.embed(texts, model=self.model_name)
176
+ # Voyage AI embeddings are automatically normalized to unit length
177
  return response.embeddings
178
+
179
+ async def embed_queries_batch(self, queries: List[str], batch_size: int = 100) -> List[List[float]]:
180
+ """Efficiently embed multiple queries using Voyage's batch capability."""
181
+ all_embeddings = []
182
+
183
+ for i in range(0, len(queries), batch_size):
184
+ batch = queries[i:i + batch_size]
185
+ print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
186
+
187
+ try:
188
+ batch_embeddings = await self.embed_batch(batch)
189
+ all_embeddings.extend(batch_embeddings)
190
+ except Exception as e:
191
+ print(f"Error in batch embedding: {e}")
192
+ # Fall back to individual embedding for this batch
193
+ for query in batch:
194
+ try:
195
+ embedding = await self.embed_text(query)
196
+ all_embeddings.append(embedding)
197
+ except Exception as e2:
198
+ print(f"Error embedding query '{query}': {e2}")
199
+ # Add zero embedding as placeholder
200
+ all_embeddings.append([0.0] * 1024) # Default dimension for Voyage models
201
+
202
+ return all_embeddings
203
 
204
 
205
  class HuggingFaceProvider(EmbeddingProvider):
 
233
 
234
  async def embed_batch(self, texts: List[str]) -> List[List[float]]:
235
  model = await self._get_model()
236
+ # Use native normalization parameter for sentence-transformers
237
+ embeddings = model.encode(texts, normalize_embeddings=True)
238
  return [emb.tolist() for emb in embeddings]
239
+
240
+ async def embed_queries_batch(self, queries: List[str], batch_size: int = 500) -> List[List[float]]:
241
+ """Efficiently embed multiple queries using HuggingFace's batch capability."""
242
+ all_embeddings = []
243
+
244
+ # HuggingFace can handle larger batches efficiently since it's local
245
+ for i in range(0, len(queries), batch_size):
246
+ batch = queries[i:i + batch_size]
247
+ print(f"Embedding batch {i//batch_size + 1}/{(len(queries) + batch_size - 1)//batch_size} ({len(batch)} queries)")
248
+
249
+ try:
250
+ batch_embeddings = await self.embed_batch(batch)
251
+ all_embeddings.extend(batch_embeddings)
252
+ except Exception as e:
253
+ print(f"Error in batch embedding: {e}")
254
+ # Fall back to individual embedding for this batch
255
+ for query in batch:
256
+ try:
257
+ embedding = await self.embed_text(query)
258
+ all_embeddings.append(embedding)
259
+ except Exception as e2:
260
+ print(f"Error embedding query '{query}': {e2}")
261
+ # Add zero embedding as placeholder based on model
262
+ model = await self._get_model()
263
+ dim = getattr(model, 'get_sentence_embedding_dimension', lambda: 384)()
264
+ all_embeddings.append([0.0] * dim)
265
+
266
+ return all_embeddings
267
 
268
 
269
  class JinaProvider(EmbeddingProvider):
 
602
  query_parser = subparsers.add_parser(
603
  "query", help="Search Bible verses using embeddings"
604
  )
605
+ query_parser.add_argument(
606
+ "--no-hnsw",
607
+ action="store_true",
608
+ help="Disable HNSW optimization and use brute-force search"
609
+ )
610
+
611
+ # Batch subcommand
612
+ batch_parser = subparsers.add_parser(
613
+ "batch", help="Run batch queries from YAML file and evaluate results"
614
+ )
615
+
616
+ # Report subcommand
617
+ report_parser = subparsers.add_parser(
618
+ "report", help="Generate markdown report from batch results CSV"
619
+ )
620
+ report_parser.add_argument(
621
+ "--results-file",
622
+ "-r",
623
+ default="results.csv",
624
+ help="CSV file to read results from (default: results.csv)"
625
+ )
626
+ report_parser.add_argument(
627
+ "--output-file",
628
+ "-o",
629
+ default="README.md",
630
+ help="Output markdown file (default: README.md)"
631
+ )
632
+ report_parser.add_argument(
633
+ "--max-queries",
634
+ "-q",
635
+ type=int,
636
+ default=10,
637
+ help="Maximum number of queries to show in detailed results (default: 10)"
638
+ )
639
+
640
+ batch_parser.add_argument("--translation", "-t", help="Translation to use")
641
+ batch_parser.add_argument("--model", "-m", help="Model to use")
642
+ batch_parser.add_argument(
643
+ "--queries-file",
644
+ "-q",
645
+ default="queries.yaml",
646
+ help="YAML file containing queries and expected results (default: queries.yaml)"
647
+ )
648
+ batch_parser.add_argument(
649
+ "--results-file",
650
+ "-r",
651
+ default="results.csv",
652
+ help="CSV file to append results to (default: results.csv)"
653
+ )
654
+ batch_parser.add_argument(
655
+ "--concurrency",
656
+ "-c",
657
+ type=int,
658
+ default=5,
659
+ help="Number of concurrent queries to process (default: 5)"
660
+ )
661
+ batch_parser.add_argument(
662
+ "--no-hnsw",
663
+ action="store_true",
664
+ help="Disable HNSW optimization and use brute-force search"
665
+ )
666
 
667
  args = parser.parse_args()
668
 
 
672
  return
673
 
674
  if args.command == "query":
675
+ await query_mode(args)
676
+ return
677
+
678
+ if args.command == "batch":
679
+ await batch_mode(args)
680
+ return
681
+
682
+ if args.command == "report":
683
+ generate_report(args)
684
  return
685
 
686
  if args.command == "embed":
 
798
  return dot_product / (norm_a * norm_b)
799
 
800
 
801
+ class HNSWIndex:
802
+ """FAISS-based index for fast approximate nearest neighbor search."""
803
+
804
+ def __init__(self, dimension: int, max_elements: int = 100000):
805
+ self.dimension = dimension
806
+ self.max_elements = max_elements
807
+ # Use FAISS HNSW index for cosine similarity (using inner product with normalized vectors)
808
+ self.index = faiss.IndexHNSWFlat(dimension, 32) # 32 is M parameter
809
+ self.index.hnsw.efConstruction = 200
810
+ self.embeddings_data = []
811
+ self.built = False
812
+
813
+ def add_embeddings(self, embeddings_data: List[Dict[str, Any]]):
814
+ """Add embeddings to the index."""
815
+ embeddings_array = np.array([data["embedding"] for data in embeddings_data]).astype('float32')
816
+
817
+ # All providers now return normalized embeddings, but normalize again to ensure consistency
818
+ # for FAISS inner product search (which requires normalized vectors for cosine similarity)
819
+ faiss.normalize_L2(embeddings_array)
820
+
821
+ self.index.add(embeddings_array)
822
+ self.embeddings_data = embeddings_data
823
+ self.built = True
824
+
825
+ # Set ef parameter for search (higher = more accurate but slower)
826
+ self.index.hnsw.efSearch = max(50, min(200, len(embeddings_data) // 10))
827
+
828
+ def search(self, query_embedding: List[float], k: int = 10) -> List[Tuple[Dict[str, Any], float]]:
829
+ """Search for k nearest neighbors and return with exact cosine similarities."""
830
+ if not self.built:
831
+ return []
832
+
833
+ query_array = np.array([query_embedding]).astype('float32').reshape(1, -1)
834
+ # All providers now return normalized embeddings, but normalize again to ensure consistency
835
+ # for FAISS inner product search (which requires normalized vectors for cosine similarity)
836
+ faiss.normalize_L2(query_array)
837
+
838
+ # Get approximate neighbors (returns more than k for exact rescoring)
839
+ search_k = min(k * 3, len(self.embeddings_data)) # Get 3x candidates for rescoring
840
+ distances, indices = self.index.search(query_array, search_k)
841
+
842
+ # Calculate exact cosine similarities for the candidates
843
+ results = []
844
+ for idx in indices[0]:
845
+ if idx < len(self.embeddings_data) and idx >= 0: # FAISS can return -1 for invalid indices
846
+ embedding_data = self.embeddings_data[idx]
847
+ exact_similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
848
+ results.append((embedding_data, exact_similarity))
849
+
850
+ # Sort by exact similarity and return top k
851
+ results.sort(key=lambda x: x[1], reverse=True)
852
+ return results[:k]
853
+
854
+
855
+ # Global cache for HNSW indices
856
+ _hnsw_cache = {}
857
+
858
+ def get_hnsw_index(provider: EmbeddingProvider, translation: str, all_embeddings: List[Dict[str, Any]]) -> HNSWIndex:
859
+ """Get or create FAISS HNSW index for given provider and translation."""
860
+ cache_key = f"{provider.model_name}_{translation}"
861
+
862
+ if cache_key in _hnsw_cache:
863
+ return _hnsw_cache[cache_key]
864
+
865
+ if not all_embeddings:
866
+ return None
867
+
868
+ # Determine embedding dimension from first embedding
869
+ dimension = len(all_embeddings[0]["embedding"])
870
+
871
+ # Create and build HNSW index
872
+ print(f"Building FAISS HNSW index for {len(all_embeddings)} embeddings (dimension: {dimension})...")
873
+ hnsw_index = HNSWIndex(dimension, max_elements=len(all_embeddings) * 2)
874
+ hnsw_index.add_embeddings(all_embeddings)
875
+
876
+ # Cache the index
877
+ _hnsw_cache[cache_key] = hnsw_index
878
+ print(f"FAISS HNSW index built and cached for {cache_key}")
879
+
880
+ return hnsw_index
881
+
882
+
883
  async def search_embeddings(
884
+ query: str, provider: EmbeddingProvider, translation: str, top_k: int = 10,
885
+ pre_loaded_embeddings: List[Dict[str, Any]] = None, use_hnsw: bool = True
886
  ) -> List[Tuple[Dict[str, Any], float]]:
887
+ """Search for similar verses using embeddings with optional HNSW optimization."""
888
+ # Use pre-loaded embeddings if provided, otherwise load them
889
+ if pre_loaded_embeddings is not None:
890
+ all_embeddings = pre_loaded_embeddings
891
+ else:
892
+ all_embeddings = load_embeddings_for_model(provider, translation)
893
 
894
  if not all_embeddings:
895
  return []
 
898
  print(f"Generating embedding for query: '{query}'")
899
  query_embedding = await provider.embed_text(query)
900
 
901
+ # Use HNSW when explicitly requested
902
+ if use_hnsw:
903
+ hnsw_index = get_hnsw_index(provider, translation, all_embeddings)
904
+ if hnsw_index:
905
+ return hnsw_index.search(query_embedding, top_k)
906
+
907
+ # Fallback to brute-force search for small datasets or when HNSW fails
908
+ print(f"Using brute-force search for {len(all_embeddings)} embeddings")
909
  results = []
910
  for embedding_data in all_embeddings:
911
  similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
 
1032
  exit(0)
1033
 
1034
 
1035
+ async def query_mode(args):
1036
  """Interactive query mode."""
1037
+ use_hnsw = not args.no_hnsw
1038
  print("Bible Verse Search Mode")
1039
  print("=" * 30)
1040
  print()
 
1069
  top_k = 10
1070
 
1071
  # Perform search
1072
+ results = await search_embeddings(query, provider, translation, top_k, use_hnsw=use_hnsw)
1073
 
1074
  if results:
1075
  display_search_results(results, query)
 
1086
  print("Please try again.")
1087
 
1088
 
1089
+ def load_queries(queries_file: Path) -> List[Dict[str, Any]]:
1090
+ """Load queries from YAML file."""
1091
+ if not queries_file.exists():
1092
+ print(f"Queries file not found: {queries_file}")
1093
+ print("Expected format:")
1094
+ print("""# Simple format - list of query/expected pairs
1095
+ - query: "love your enemies"
1096
+ expected: "Matthew 5:44"
1097
+ - query: "faith hope love"
1098
+ expected: "1 Corinthians 13:13"
1099
+
1100
+ # Or with expected as list for multiple valid answers
1101
+ - query: "god is love"
1102
+ expected: ["1 John 4:8", "1 John 4:16"]
1103
+ """)
1104
+ exit(1)
1105
+
1106
+ try:
1107
+ with open(queries_file, "r", encoding="utf-8") as f:
1108
+ queries = yaml.safe_load(f)
1109
+
1110
+ # Handle both formats: list directly or under "queries" key
1111
+ if isinstance(queries, dict) and "queries" in queries:
1112
+ queries = queries["queries"]
1113
+ elif not isinstance(queries, list):
1114
+ print(f"Invalid queries file format. Expected list of queries in {queries_file}")
1115
+ exit(1)
1116
+
1117
+ return queries
1118
+ except Exception as e:
1119
+ print(f"Error loading queries file {queries_file}: {e}")
1120
+ exit(1)
1121
+
1122
+
1123
+ def parse_verse_reference(ref: str) -> Tuple[str, int, int]:
1124
+ """Parse a verse reference like 'Matthew 5:44' into (book, chapter, verse)."""
1125
+ try:
1126
+ # Split on the last space to separate book from chapter:verse
1127
+ parts = ref.rsplit(" ", 1)
1128
+ if len(parts) != 2:
1129
+ raise ValueError("Invalid format")
1130
+
1131
+ book = parts[0].strip()
1132
+ chapter_verse = parts[1]
1133
+
1134
+ # Split chapter:verse
1135
+ if ":" not in chapter_verse:
1136
+ raise ValueError("Missing verse number")
1137
+
1138
+ chapter_str, verse_str = chapter_verse.split(":", 1)
1139
+ chapter = int(chapter_str.strip())
1140
+ verse = int(verse_str.strip())
1141
+
1142
+ return book, chapter, verse
1143
+ except Exception as e:
1144
+ print(f"Error parsing verse reference '{ref}': {e}")
1145
+ return None, None, None
1146
+
1147
+
1148
+ def calculate_score(expected_refs, results: List[Tuple[Dict[str, Any], float]]) -> int:
1149
+ """Calculate score based on where expected result appears in top 3 results."""
1150
+ # Handle both single string and list of expected references
1151
+ if isinstance(expected_refs, str):
1152
+ expected_refs = [expected_refs]
1153
+ elif not expected_refs:
1154
+ return 0
1155
+
1156
+ # Parse all expected references
1157
+ expected_verses = []
1158
+ for ref in expected_refs:
1159
+ book, chapter, verse = parse_verse_reference(ref)
1160
+ if book:
1161
+ expected_verses.append((book, chapter, verse))
1162
+
1163
+ if not expected_verses:
1164
+ return 0
1165
+
1166
+ # Check if any expected verse appears in top 3 results
1167
+ for i, (verse_data, similarity) in enumerate(results[:3]):
1168
+ verse_tuple = (verse_data["book"], verse_data["chapter"], verse_data["verse"])
1169
+ if verse_tuple in expected_verses:
1170
+ return 3 - i # 3 points for 1st, 2 for 2nd, 1 for 3rd
1171
+
1172
+ return 0 # No points if not in top 3
1173
+
1174
+
1175
+ def format_verse_reference(verse_data: Dict[str, Any]) -> str:
1176
+ """Format verse data into a reference string."""
1177
+ return f"{verse_data['book']} {verse_data['chapter']}:{verse_data['verse']}"
1178
+
1179
+
1180
+ async def process_single_query(
1181
+ query_data: Dict[str, Any],
1182
+ provider: EmbeddingProvider,
1183
+ translation: str,
1184
+ query_index: int,
1185
+ total_queries: int,
1186
+ semaphore: Semaphore,
1187
+ pre_loaded_embeddings: List[Dict[str, Any]],
1188
+ use_hnsw: bool = True
1189
+ ) -> Dict[str, Any]:
1190
+ """Process a single query with concurrency control."""
1191
+ async with semaphore:
1192
+ query = query_data.get("query", "")
1193
+ expected = query_data.get("expected", "")
1194
+
1195
+ if not query:
1196
+ print(f"Skipping query {query_index}: missing 'query' field")
1197
+ return None
1198
+
1199
+ print(f"Query {query_index}/{total_queries}: {query}")
1200
+
1201
+ try:
1202
+ # Get top 3 results using pre-loaded embeddings
1203
+ search_results = await search_embeddings(query, provider, translation, top_k=3, pre_loaded_embeddings=pre_loaded_embeddings, use_hnsw=use_hnsw)
1204
+
1205
+ if not search_results:
1206
+ print(f" No results found for query: {query}")
1207
+ return None
1208
+
1209
+ # Calculate score if expected result is provided
1210
+ score = 0
1211
+ if expected:
1212
+ score = calculate_score(expected, search_results)
1213
+ expected_str = str(expected) if isinstance(expected, list) else expected
1214
+ print(f" Expected: {expected_str} | Score: {score}/3")
1215
+
1216
+ # Format results
1217
+ result_data = {
1218
+ "query": query,
1219
+ "expected": expected,
1220
+ "score": score,
1221
+ "results": []
1222
+ }
1223
+
1224
+ for j, (verse_data, similarity) in enumerate(search_results):
1225
+ result_ref = format_verse_reference(verse_data)
1226
+ result_data["results"].append({
1227
+ "reference": result_ref,
1228
+ "similarity": similarity
1229
+ })
1230
+ print(f" {j+1}. {result_ref} ({similarity:.4f})")
1231
+
1232
+ return result_data
1233
+
1234
+ except Exception as e:
1235
+ print(f"Error processing query '{query}': {e}")
1236
+ return None
1237
+
1238
+
1239
+ async def run_batch_queries_openai_optimized(
1240
+ queries: List[Dict[str, Any]],
1241
+ provider: OpenAIProvider,
1242
+ translation: str,
1243
+ batch_size: int = 100,
1244
+ use_hnsw: bool = True
1245
+ ) -> List[Dict[str, Any]]:
1246
+ """Run batch queries optimized for OpenAI using their batch embedding capability."""
1247
+ total_queries = len(queries)
1248
+
1249
+ print(f"Running {total_queries} batch queries using OpenAI batch optimization...")
1250
+
1251
+ # Load embeddings once for all queries
1252
+ print("Loading verse embeddings for similarity search...")
1253
+ pre_loaded_embeddings = load_embeddings_for_model(provider, translation)
1254
+
1255
+ if not pre_loaded_embeddings:
1256
+ print("No embeddings found! Make sure embeddings exist for this model and translation.")
1257
+ return []
1258
+
1259
+ print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings")
1260
+
1261
+ # Extract all query texts for batch embedding
1262
+ query_texts = []
1263
+ query_data_list = []
1264
+ for query_data in queries:
1265
+ query = query_data.get("query", "")
1266
+ if query:
1267
+ query_texts.append(query)
1268
+ query_data_list.append(query_data)
1269
+
1270
+ if not query_texts:
1271
+ print("No valid queries found!")
1272
+ return []
1273
+
1274
+ # Embed all queries in batches using OpenAI's batch API
1275
+ print(f"Embedding {len(query_texts)} queries using OpenAI batch API...")
1276
+ query_embeddings = await provider.embed_queries_batch(query_texts, batch_size)
1277
+
1278
+ if len(query_embeddings) != len(query_texts):
1279
+ print(f"Warning: Expected {len(query_texts)} embeddings, got {len(query_embeddings)}")
1280
+
1281
+ # Build HNSW index if requested
1282
+ hnsw_index = None
1283
+ if use_hnsw:
1284
+ hnsw_index = get_hnsw_index(provider, translation, pre_loaded_embeddings)
1285
+
1286
+ # Process results
1287
+ results = []
1288
+ for i, (query_data, query_embedding) in enumerate(zip(query_data_list, query_embeddings)):
1289
+ query = query_data.get("query", "")
1290
+ expected = query_data.get("expected", "")
1291
+
1292
+ print(f"Processing query {i+1}/{len(query_texts)}: {query}")
1293
+
1294
+ # Use HNSW for similarity search if available, otherwise brute-force
1295
+ if hnsw_index:
1296
+ search_results = hnsw_index.search(query_embedding, k=3)
1297
+ else:
1298
+ # Calculate similarities with all verses
1299
+ similarities = []
1300
+ for embedding_data in pre_loaded_embeddings:
1301
+ similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
1302
+ similarities.append((embedding_data, similarity))
1303
+
1304
+ # Sort by similarity and get top 3
1305
+ similarities.sort(key=lambda x: x[1], reverse=True)
1306
+ search_results = similarities[:3]
1307
+
1308
+ # Calculate score if expected result is provided
1309
+ score = 0
1310
+ if expected:
1311
+ score = calculate_score(expected, search_results)
1312
+ expected_str = str(expected) if isinstance(expected, list) else expected
1313
+ print(f" Expected: {expected_str} | Score: {score}/3")
1314
+
1315
+ # Format results
1316
+ result_data = {
1317
+ "query": query,
1318
+ "expected": expected,
1319
+ "score": score,
1320
+ "results": []
1321
+ }
1322
+
1323
+ for j, (verse_data, similarity) in enumerate(search_results):
1324
+ result_ref = format_verse_reference(verse_data)
1325
+ result_data["results"].append({
1326
+ "reference": result_ref,
1327
+ "similarity": similarity
1328
+ })
1329
+ print(f" {j+1}. {result_ref} ({similarity:.4f})")
1330
+
1331
+ results.append(result_data)
1332
+
1333
+ print(f"\nProcessed {len(results)} queries successfully")
1334
+ return results
1335
+
1336
+
1337
+ async def run_batch_queries_optimized(
1338
+ queries: List[Dict[str, Any]],
1339
+ provider: EmbeddingProvider,
1340
+ translation: str,
1341
+ batch_size: int = 100,
1342
+ use_hnsw: bool = True
1343
+ ) -> List[Dict[str, Any]]:
1344
+ """Run batch queries optimized for providers that support batch embedding."""
1345
+ total_queries = len(queries)
1346
+
1347
+ print(f"Running {total_queries} batch queries using batch optimization...")
1348
+
1349
+ # Load embeddings once for all queries
1350
+ print("Loading verse embeddings for similarity search...")
1351
+ pre_loaded_embeddings = load_embeddings_for_model(provider, translation)
1352
+
1353
+ if not pre_loaded_embeddings:
1354
+ print("No embeddings found! Make sure embeddings exist for this model and translation.")
1355
+ return []
1356
+
1357
+ print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings")
1358
+
1359
+ # Extract all query texts for batch embedding
1360
+ query_texts = []
1361
+ query_data_list = []
1362
+ for query_data in queries:
1363
+ query = query_data.get("query", "")
1364
+ if query:
1365
+ query_texts.append(query)
1366
+ query_data_list.append(query_data)
1367
+
1368
+ if not query_texts:
1369
+ print("No valid queries found!")
1370
+ return []
1371
+
1372
+ # Embed all queries in batches using provider's batch API
1373
+ print(f"Embedding {len(query_texts)} queries using batch API...")
1374
+
1375
+ # Adjust batch size based on provider type
1376
+ if isinstance(provider, HuggingFaceProvider):
1377
+ batch_size = 500 # Local models can handle larger batches
1378
+ elif isinstance(provider, (OpenAIProvider, GeminiProvider, VoyageProvider)):
1379
+ batch_size = 100 # API models use smaller batches
1380
+
1381
+ query_embeddings = await provider.embed_queries_batch(query_texts, batch_size)
1382
+
1383
+ if len(query_embeddings) != len(query_texts):
1384
+ print(f"Warning: Expected {len(query_texts)} embeddings, got {len(query_embeddings)}")
1385
+
1386
+ # Build HNSW index if requested
1387
+ hnsw_index = None
1388
+ if use_hnsw:
1389
+ hnsw_index = get_hnsw_index(provider, translation, pre_loaded_embeddings)
1390
+
1391
+ # Process results
1392
+ results = []
1393
+ for i, (query_data, query_embedding) in enumerate(zip(query_data_list, query_embeddings)):
1394
+ query = query_data.get("query", "")
1395
+ expected = query_data.get("expected", "")
1396
+
1397
+ print(f"Processing query {i+1}/{len(query_texts)}: {query}")
1398
+
1399
+ # Use HNSW for similarity search if available, otherwise brute-force
1400
+ if hnsw_index:
1401
+ search_results = hnsw_index.search(query_embedding, k=3)
1402
+ else:
1403
+ # Calculate similarities with all verses
1404
+ similarities = []
1405
+ for embedding_data in pre_loaded_embeddings:
1406
+ similarity = cosine_similarity(query_embedding, embedding_data["embedding"])
1407
+ similarities.append((embedding_data, similarity))
1408
+
1409
+ # Sort by similarity and get top 3
1410
+ similarities.sort(key=lambda x: x[1], reverse=True)
1411
+ search_results = similarities[:3]
1412
+
1413
+ # Calculate score if expected result is provided
1414
+ score = 0
1415
+ if expected:
1416
+ score = calculate_score(expected, search_results)
1417
+ expected_str = str(expected) if isinstance(expected, list) else expected
1418
+ print(f" Expected: {expected_str} | Score: {score}/3")
1419
+
1420
+ # Format results
1421
+ result_data = {
1422
+ "query": query,
1423
+ "expected": expected,
1424
+ "score": score,
1425
+ "results": []
1426
+ }
1427
+
1428
+ for j, (verse_data, similarity) in enumerate(search_results):
1429
+ result_ref = format_verse_reference(verse_data)
1430
+ result_data["results"].append({
1431
+ "reference": result_ref,
1432
+ "similarity": similarity
1433
+ })
1434
+ print(f" {j+1}. {result_ref} ({similarity:.4f})")
1435
+
1436
+ results.append(result_data)
1437
+
1438
+ print(f"\nProcessed {len(results)} queries successfully")
1439
+ return results
1440
+
1441
+
1442
+ async def run_batch_queries(
1443
+ queries: List[Dict[str, Any]],
1444
+ provider: EmbeddingProvider,
1445
+ translation: str,
1446
+ concurrency: int = 5,
1447
+ use_hnsw: bool = True
1448
+ ) -> List[Dict[str, Any]]:
1449
+ """Run all batch queries with provider-specific optimizations."""
1450
+ # Check if provider supports batch optimization
1451
+ if hasattr(provider, 'embed_queries_batch'):
1452
+ provider_name = type(provider).__name__
1453
+ print(f"Using {provider_name} batch optimization...")
1454
+ return await run_batch_queries_optimized(queries, provider, translation, use_hnsw=use_hnsw)
1455
+
1456
+ # Fall back to standard concurrent processing for other providers
1457
+ total_queries = len(queries)
1458
+
1459
+ print(f"Running {total_queries} batch queries with concurrency limit of {concurrency}...")
1460
+
1461
+ # Load embeddings once for all queries
1462
+ print("Loading embeddings for all queries...")
1463
+ pre_loaded_embeddings = load_embeddings_for_model(provider, translation)
1464
+
1465
+ if not pre_loaded_embeddings:
1466
+ print("No embeddings found! Make sure embeddings exist for this model and translation.")
1467
+ return []
1468
+
1469
+ print(f"Loaded {len(pre_loaded_embeddings)} verse embeddings")
1470
+
1471
+ # Create semaphore to limit concurrent requests
1472
+ semaphore = Semaphore(concurrency)
1473
+
1474
+ # Create tasks for all queries
1475
+ tasks = []
1476
+ for i, query_data in enumerate(queries, 1):
1477
+ task = process_single_query(
1478
+ query_data, provider, translation, i, total_queries, semaphore, pre_loaded_embeddings, use_hnsw
1479
+ )
1480
+ tasks.append(task)
1481
+
1482
+ # Run all tasks concurrently
1483
+ print(f"Starting concurrent processing...")
1484
+ results = await asyncio.gather(*tasks, return_exceptions=True)
1485
+
1486
+ # Filter out None results and exceptions
1487
+ valid_results = []
1488
+ for result in results:
1489
+ if isinstance(result, Exception):
1490
+ print(f"Exception in batch processing: {result}")
1491
+ elif result is not None:
1492
+ valid_results.append(result)
1493
+
1494
+ print(f"\nProcessed {len(valid_results)} queries successfully out of {total_queries}")
1495
+ return valid_results
1496
+
1497
+
1498
+ def append_to_csv(
1499
+ results_file: Path,
1500
+ results: List[Dict[str, Any]],
1501
+ provider: EmbeddingProvider,
1502
+ translation: str
1503
+ ):
1504
+ """Append results to CSV file."""
1505
+ provider_name = get_model_provider(provider)
1506
+ model_name = provider.get_name()
1507
+
1508
+ # Create CSV headers if file doesn't exist
1509
+ file_exists = results_file.exists()
1510
+
1511
+ with open(results_file, "a", newline="", encoding="utf-8") as f:
1512
+ fieldnames = [
1513
+ "timestamp", "provider", "model", "translation", "query", "expected",
1514
+ "result1", "score1", "result2", "score2", "result3", "score3", "points"
1515
+ ]
1516
+ writer = csv.DictWriter(f, fieldnames=fieldnames)
1517
+
1518
+ if not file_exists:
1519
+ writer.writeheader()
1520
+
1521
+ timestamp = datetime.now().isoformat()
1522
+
1523
+ for result in results:
1524
+ # Pad results to ensure we have 3 entries
1525
+ padded_results = result["results"] + [{"reference": "", "similarity": 0.0}] * 3
1526
+ padded_results = padded_results[:3] # Take only first 3
1527
+
1528
+ row = {
1529
+ "timestamp": timestamp,
1530
+ "provider": provider_name,
1531
+ "model": model_name,
1532
+ "translation": translation,
1533
+ "query": result["query"],
1534
+ "expected": result["expected"],
1535
+ "result1": padded_results[0]["reference"],
1536
+ "score1": f"{padded_results[0]['similarity']:.4f}",
1537
+ "result2": padded_results[1]["reference"],
1538
+ "score2": f"{padded_results[1]['similarity']:.4f}",
1539
+ "result3": padded_results[2]["reference"],
1540
+ "score3": f"{padded_results[2]['similarity']:.4f}",
1541
+ "points": result["score"]
1542
+ }
1543
+ writer.writerow(row)
1544
+
1545
+
1546
+ async def batch_mode(args):
1547
+ """Batch query processing mode."""
1548
+ print("Bible Verse Batch Query Mode")
1549
+ print("=" * 35)
1550
+ print()
1551
+
1552
+ # Load queries
1553
+ queries_file = Path(args.queries_file)
1554
+ queries = load_queries(queries_file)
1555
+
1556
+ print(f"Loaded {len(queries)} queries from {queries_file}")
1557
+
1558
+ # Select translation
1559
+ if args.translation:
1560
+ translation = args.translation
1561
+ if translation not in get_available_translations():
1562
+ print(f"Translation '{translation}' not found!")
1563
+ return
1564
+ else:
1565
+ translation = select_translation_for_query()
1566
+
1567
+ print(f"Using translation: {translation}")
1568
+ print()
1569
+
1570
+ # Select model
1571
+ if args.model:
1572
+ # Find the model in available models
1573
+ provider = None
1574
+ models = get_available_models()
1575
+ for provider_info in models.values():
1576
+ if args.model in provider_info["models"]:
1577
+ provider = provider_info["provider_class"](args.model)
1578
+ break
1579
+
1580
+ if provider is None:
1581
+ print(f"Model '{args.model}' not found!")
1582
+ return
1583
+ else:
1584
+ provider = select_model_for_query()
1585
+
1586
+ provider_name = get_model_provider(provider)
1587
+ model_name = provider.get_name()
1588
+ print(f"Using model: {model_name} ({provider_name})")
1589
+ print()
1590
+
1591
+ # Run batch queries
1592
+ use_hnsw = not args.no_hnsw
1593
+ results = await run_batch_queries(queries, provider, translation, args.concurrency, use_hnsw)
1594
+
1595
+ if not results:
1596
+ print("No results to save.")
1597
+ return
1598
+
1599
+ # Calculate summary statistics
1600
+ total_queries = len(results)
1601
+ total_points = sum(r["score"] for r in results)
1602
+ max_possible_points = total_queries * 3
1603
+ accuracy = (total_points / max_possible_points) * 100 if max_possible_points > 0 else 0
1604
+
1605
+ print(f"\nBatch Query Summary:")
1606
+ print(f" Total queries: {total_queries}")
1607
+ print(f" Total points: {total_points}/{max_possible_points}")
1608
+ print(f" Accuracy: {accuracy:.1f}%")
1609
+
1610
+ # Save to CSV
1611
+ results_file = Path(args.results_file)
1612
+ append_to_csv(results_file, results, provider, translation)
1613
+ print(f"\nResults appended to: {results_file}")
1614
+
1615
+
1616
+ def read_results_csv(results_file: Path) -> List[Dict[str, Any]]:
1617
+ """Read and parse results from CSV file."""
1618
+ if not results_file.exists():
1619
+ print(f"Results file not found: {results_file}")
1620
+ return []
1621
+
1622
+ results = []
1623
+ try:
1624
+ with open(results_file, "r", encoding="utf-8") as f:
1625
+ reader = csv.DictReader(f)
1626
+ for row in reader:
1627
+ # Convert points to integer
1628
+ try:
1629
+ row["points"] = int(row["points"])
1630
+ except (ValueError, KeyError):
1631
+ row["points"] = 0
1632
+ results.append(row)
1633
+ except Exception as e:
1634
+ print(f"Error reading CSV file {results_file}: {e}")
1635
+ return []
1636
+
1637
+ return results
1638
+
1639
+
1640
+ def analyze_results(results: List[Dict[str, Any]]) -> Dict[str, Any]:
1641
+ """Analyze results and generate summary statistics."""
1642
+ if not results:
1643
+ return {}
1644
+
1645
+ # Group by provider and model
1646
+ provider_model_stats = {}
1647
+
1648
+ for result in results:
1649
+ provider = result.get("provider", "unknown")
1650
+ model = result.get("model", "unknown")
1651
+ translation = result.get("translation", "unknown")
1652
+ points = result.get("points", 0)
1653
+
1654
+ key = (provider, model, translation)
1655
+
1656
+ if key not in provider_model_stats:
1657
+ provider_model_stats[key] = {
1658
+ "provider": provider,
1659
+ "model": model,
1660
+ "translation": translation,
1661
+ "total_queries": 0,
1662
+ "total_points": 0,
1663
+ "max_points": 0,
1664
+ "correct_top1": 0, # 3 points
1665
+ "correct_top2": 0, # 2 points
1666
+ "correct_top3": 0, # 1 point
1667
+ "incorrect": 0, # 0 points
1668
+ "queries": []
1669
+ }
1670
+
1671
+ stats = provider_model_stats[key]
1672
+ stats["total_queries"] += 1
1673
+ stats["total_points"] += points
1674
+ stats["max_points"] += 3 # Maximum possible points per query
1675
+
1676
+ # Count by position
1677
+ if points == 3:
1678
+ stats["correct_top1"] += 1
1679
+ elif points == 2:
1680
+ stats["correct_top2"] += 1
1681
+ elif points == 1:
1682
+ stats["correct_top3"] += 1
1683
+ else:
1684
+ stats["incorrect"] += 1
1685
+
1686
+ # Store query details for detailed table
1687
+ stats["queries"].append({
1688
+ "query": result.get("query", ""),
1689
+ "expected": result.get("expected", ""),
1690
+ "result1": result.get("result1", ""),
1691
+ "result2": result.get("result2", ""),
1692
+ "result3": result.get("result3", ""),
1693
+ "points": points
1694
+ })
1695
+
1696
+ # Calculate accuracy percentages
1697
+ for stats in provider_model_stats.values():
1698
+ if stats["max_points"] > 0:
1699
+ stats["accuracy"] = (stats["total_points"] / stats["max_points"]) * 100
1700
+ else:
1701
+ stats["accuracy"] = 0
1702
+
1703
+ return provider_model_stats
1704
+
1705
+
1706
+ def generate_summary_table(stats: Dict[tuple, Dict[str, Any]]) -> str:
1707
+ """Generate summary table markdown."""
1708
+ if not stats:
1709
+ return "No results to display.\n\n"
1710
+
1711
+ markdown = "## Model Performance Summary\n\n"
1712
+ markdown += "| Provider | Model | Translation | Accuracy | Top 1 | Top 2 | Top 3 | Failed | Total |\n"
1713
+ markdown += "|----------|-------|-------------|----------|-------|-------|-------|-----------|-------|\n"
1714
+
1715
+ # Sort by accuracy (descending)
1716
+ sorted_stats = sorted(stats.items(), key=lambda x: x[1]["accuracy"], reverse=True)
1717
+
1718
+ for (provider, model, translation), stat in sorted_stats:
1719
+ markdown += f"| {provider} | {model} | {translation} | "
1720
+ markdown += f"{stat['accuracy']:.1f}% | "
1721
+ markdown += f"{stat['correct_top1']} | "
1722
+ markdown += f"{stat['correct_top2']} | "
1723
+ markdown += f"{stat['correct_top3']} | "
1724
+ markdown += f"{stat['incorrect']} | "
1725
+ markdown += f"{stat['total_queries']} |\n"
1726
+
1727
+ markdown += "\n"
1728
+ return markdown
1729
+
1730
+
1731
+ def generate_detailed_table(stats: Dict[tuple, Dict[str, Any]], max_queries: int = 10) -> str:
1732
+ """Generate detailed results with individual sections for each model."""
1733
+ if not stats:
1734
+ return ""
1735
+
1736
+ markdown = "## Results\n\n"
1737
+ markdown += "✅ denotes accurate result.\n\n"
1738
+
1739
+ # Sort models by accuracy (best first)
1740
+ sorted_stats = sorted(stats.items(), key=lambda x: x[1]["accuracy"], reverse=True)
1741
+
1742
+ for (provider, model, translation), stat in sorted_stats:
1743
+ # Create model heading
1744
+ model_display = f"{provider}/{model}" if provider != "HuggingFace" else model
1745
+ markdown += f"### {model_display} ({translation.upper()})\n\n"
1746
+
1747
+ # Add summary statistics
1748
+ accuracy = stat["accuracy"]
1749
+ total_points = stat["total_points"]
1750
+ max_points = stat["max_points"]
1751
+ total_queries = len(stat["queries"])
1752
+
1753
+ markdown += f"**Accuracy: {accuracy:.1f}%** ({total_points}/{max_points} points across {total_queries} queries)\n\n"
1754
+
1755
+ # Create table for this model
1756
+ markdown += "| Query | Expected | Top Result | Score | ✓ |\n"
1757
+ markdown += "|-------|----------|------------|-------|---|\n"
1758
+
1759
+ # Show up to max_queries results
1760
+ queries = stat["queries"][:max_queries]
1761
+
1762
+ for query_result in queries:
1763
+ query = query_result["query"]
1764
+ # Truncate long queries for table readability
1765
+ display_query = query[:50] + "..." if len(query) > 50 else query
1766
+ expected = query_result["expected"]
1767
+
1768
+ # Get the top result
1769
+ if "result1" in query_result and query_result["result1"]:
1770
+ result1 = query_result["result1"]
1771
+ score1 = query_result.get("score1", 0.0)
1772
+ points = query_result.get("points", 0)
1773
+
1774
+ # Add checkmark based on points
1775
+ if points == 3:
1776
+ checkmark = "✅" # Perfect match (1st place)
1777
+ elif points == 2:
1778
+ checkmark = "⚠️" # Good match (2nd place)
1779
+ elif points == 1:
1780
+ checkmark = "❌" # Poor match (3rd place)
1781
+ else:
1782
+ checkmark = "❌" # No match
1783
+
1784
+ markdown += f"| {display_query} | {expected} | {result1} | {score1:.4f} | {checkmark} |\n"
1785
+ else:
1786
+ markdown += f"| {display_query} | {expected} | No results | 0.0000 | ❌ |\n"
1787
+
1788
+ if len(stat["queries"]) > max_queries:
1789
+ remaining = len(stat["queries"]) - max_queries
1790
+ markdown += f"\n*... and {remaining} more queries*\n"
1791
+
1792
+ markdown += "\n"
1793
+
1794
+ # Add legend
1795
+ markdown += "## Legend\n\n"
1796
+ markdown += "- ✅ **Perfect Match** - Expected result appears as #1 result (3 points)\n"
1797
+ markdown += "- ⚠️ **Good Match** - Expected result appears as #2 result (2 points)\n"
1798
+ markdown += "- ❌ **Poor/No Match** - Expected result appears as #3 result (1 point) or not in top 3 (0 points)\n\n"
1799
+
1800
+ return markdown
1801
+
1802
+
1803
+ def generate_legend() -> str:
1804
+ """Generate legend for checkmark symbols."""
1805
+ return """## Legend
1806
+
1807
+ - ✅ **Perfect Match** - Expected result appears as #1 result (3 points)
1808
+ - ⚠️ **Good Match** - Expected result appears as #2 result (2 points)
1809
+ - ❌ **Poor/No Match** - Expected result appears as #3 result (1 point) or not in top 3 (0 points)
1810
+
1811
+ """
1812
+
1813
+
1814
+ def generate_report(args):
1815
+ """Generate markdown report and update only the Query Examples section in README.md."""
1816
+ print("Bible Verse Embedding Evaluation Report Generator")
1817
+ print("=" * 55)
1818
+ print()
1819
+
1820
+ results_file = Path(args.results_file)
1821
+ output_file = Path(args.output_file)
1822
+
1823
+ print(f"Reading results from: {results_file}")
1824
+ results = read_results_csv(results_file)
1825
+
1826
+ if not results:
1827
+ print("No results found to process.")
1828
+ return
1829
+
1830
+ print(f"Loaded {len(results)} result entries")
1831
+
1832
+ # Analyze results
1833
+ print("Analyzing results...")
1834
+ stats = analyze_results(results)
1835
+
1836
+ if not stats:
1837
+ print("No statistics to generate.")
1838
+ return
1839
+
1840
+ print(f"Found results for {len(stats)} model/provider combinations")
1841
+
1842
+ # Read existing README.md file
1843
+ try:
1844
+ with open(output_file, "r", encoding="utf-8") as f:
1845
+ existing_content = f.read()
1846
+ except FileNotFoundError:
1847
+ print(f"README.md not found at {output_file}, creating new file")
1848
+ existing_content = "# Bible Embeddings\n\n"
1849
+
1850
+ # Find the Results section (or Query Examples for backward compatibility) and replace it
1851
+ results_section_start = existing_content.find("## Results")
1852
+ if results_section_start == -1:
1853
+ results_section_start = existing_content.find("## Query Examples")
1854
+
1855
+ if results_section_start == -1:
1856
+ # No Results section found, append to end
1857
+ print("No existing Results section found, appending to end")
1858
+ new_content = existing_content.rstrip() + "\n\n" + generate_detailed_table(stats, args.max_queries)
1859
+ else:
1860
+ # Find the end of the Results section (next ## heading or end of file)
1861
+ next_section_start = existing_content.find("\n## ", results_section_start + 1)
1862
+ if next_section_start == -1:
1863
+ # Results is the last section
1864
+ before_section = existing_content[:results_section_start]
1865
+ after_section = ""
1866
+ else:
1867
+ before_section = existing_content[:results_section_start]
1868
+ after_section = existing_content[next_section_start:]
1869
+
1870
+ # Replace the Results section
1871
+ new_results_section = generate_detailed_table(stats, args.max_queries)
1872
+ new_content = before_section + new_results_section + after_section
1873
+
1874
+ # Write updated content to file
1875
+ try:
1876
+ with open(output_file, "w", encoding="utf-8") as f:
1877
+ f.write(new_content)
1878
+ print(f"\nResults section updated in: {output_file}")
1879
+ except Exception as e:
1880
+ print(f"Error writing report file: {e}")
1881
+
1882
+
1883
  if __name__ == "__main__":
1884
  asyncio.run(main())
pyproject.toml CHANGED
@@ -11,6 +11,8 @@ dependencies = [
11
  "voyageai>=0.3.4",
12
  "jina>=3.0.0",
13
  "numpy>=1.21.0",
 
 
14
  ]
15
 
16
  [project.optional-dependencies]
 
11
  "voyageai>=0.3.4",
12
  "jina>=3.0.0",
13
  "numpy>=1.21.0",
14
+ "pyyaml>=6.0",
15
+ "faiss-cpu>=1.8.0",
16
  ]
17
 
18
  [project.optional-dependencies]
uv.lock CHANGED
@@ -120,10 +120,12 @@ name = "bible-embeddings"
120
  version = "0.1.0"
121
  source = { virtual = "." }
122
  dependencies = [
 
123
  { name = "google-generativeai" },
124
  { name = "jina" },
125
  { name = "numpy" },
126
  { name = "openai" },
 
127
  { name = "sentence-transformers" },
128
  { name = "voyageai" },
129
  ]
@@ -150,6 +152,7 @@ voyage = [
150
 
151
  [package.metadata]
152
  requires-dist = [
 
153
  { name = "google-generativeai", specifier = ">=0.8.5" },
154
  { name = "google-generativeai", marker = "extra == 'all'", specifier = ">=0.8.5" },
155
  { name = "google-generativeai", marker = "extra == 'gemini'", specifier = ">=0.8.5" },
@@ -160,6 +163,7 @@ requires-dist = [
160
  { name = "openai", specifier = ">=1.101.0" },
161
  { name = "openai", marker = "extra == 'all'", specifier = ">=1.0.0" },
162
  { name = "openai", marker = "extra == 'openai'", specifier = ">=1.0.0" },
 
163
  { name = "sentence-transformers", specifier = ">=5.1.0" },
164
  { name = "voyageai", specifier = ">=0.3.4" },
165
  { name = "voyageai", marker = "extra == 'all'", specifier = ">=0.3.4" },
@@ -289,6 +293,42 @@ wheels = [
289
  { url = "https://files.pythonhosted.org/packages/cb/a3/460c57f094a4a165c84a1341c373b0a4f5ec6ac244b998d5021aade89b77/ecdsa-0.19.1-py2.py3-none-any.whl", hash = "sha256:30638e27cf77b7e15c4c4cc1973720149e1033827cfd00661ca5c8cc0cdb24c3", size = 150607, upload-time = "2025-03-13T11:52:41.757Z" },
290
  ]
291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
  [[package]]
293
  name = "fastapi"
294
  version = "0.116.1"
 
120
  version = "0.1.0"
121
  source = { virtual = "." }
122
  dependencies = [
123
+ { name = "faiss-cpu" },
124
  { name = "google-generativeai" },
125
  { name = "jina" },
126
  { name = "numpy" },
127
  { name = "openai" },
128
+ { name = "pyyaml" },
129
  { name = "sentence-transformers" },
130
  { name = "voyageai" },
131
  ]
 
152
 
153
  [package.metadata]
154
  requires-dist = [
155
+ { name = "faiss-cpu", specifier = ">=1.8.0" },
156
  { name = "google-generativeai", specifier = ">=0.8.5" },
157
  { name = "google-generativeai", marker = "extra == 'all'", specifier = ">=0.8.5" },
158
  { name = "google-generativeai", marker = "extra == 'gemini'", specifier = ">=0.8.5" },
 
163
  { name = "openai", specifier = ">=1.101.0" },
164
  { name = "openai", marker = "extra == 'all'", specifier = ">=1.0.0" },
165
  { name = "openai", marker = "extra == 'openai'", specifier = ">=1.0.0" },
166
+ { name = "pyyaml", specifier = ">=6.0" },
167
  { name = "sentence-transformers", specifier = ">=5.1.0" },
168
  { name = "voyageai", specifier = ">=0.3.4" },
169
  { name = "voyageai", marker = "extra == 'all'", specifier = ">=0.3.4" },
 
293
  { url = "https://files.pythonhosted.org/packages/cb/a3/460c57f094a4a165c84a1341c373b0a4f5ec6ac244b998d5021aade89b77/ecdsa-0.19.1-py2.py3-none-any.whl", hash = "sha256:30638e27cf77b7e15c4c4cc1973720149e1033827cfd00661ca5c8cc0cdb24c3", size = 150607, upload-time = "2025-03-13T11:52:41.757Z" },
294
  ]
295
 
296
+ [[package]]
297
+ name = "faiss-cpu"
298
+ version = "1.12.0"
299
+ source = { registry = "https://pypi.org/simple" }
300
+ dependencies = [
301
+ { name = "numpy" },
302
+ { name = "packaging" },
303
+ ]
304
+ sdist = { url = "https://files.pythonhosted.org/packages/7d/80/bb75a7ed6e824dea452a24d3434a72ed799324a688b10b047d441d270185/faiss_cpu-1.12.0.tar.gz", hash = "sha256:2f87cbcd603f3ed464ebceb857971fdebc318de938566c9ae2b82beda8e953c0", size = 69292, upload-time = "2025-08-13T06:07:26.553Z" }
305
+ wheels = [
306
+ { url = "https://files.pythonhosted.org/packages/5f/4e/6be5fbd2ceccd87b168c64edeefa469cd11f095bb63b16a61a29296b0fdb/faiss_cpu-1.12.0-cp313-cp313-macosx_13_0_x86_64.whl", hash = "sha256:c9c79b5f28dcf9b2e2557ce51b938b21b7a9d508e008dc1ffea7b8249e7bd443", size = 8034409, upload-time = "2025-08-13T06:06:22.519Z" },
307
+ { url = "https://files.pythonhosted.org/packages/4b/f0/658012a91a690d82f3587fd8e56ea1d9b9698c31970929a9dba17edd211e/faiss_cpu-1.12.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:0db6485bc9f32b69aaccf9ad520782371a79904dcfe20b6da5cbfd61a712e85f", size = 3362034, upload-time = "2025-08-13T06:06:24.052Z" },
308
+ { url = "https://files.pythonhosted.org/packages/81/8b/9b355309d448e1a737fac31d45e9b2484ffb0f04f10fba3b544efe6661e4/faiss_cpu-1.12.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f6db5532831791d7bac089fc580e741e99869122946bb6a5f120016c83b95d10", size = 3834324, upload-time = "2025-08-13T06:06:25.506Z" },
309
+ { url = "https://files.pythonhosted.org/packages/7e/31/d229f6cdb9cbe03020499d69c4b431b705aa19a55aa0fe698c98022b2fef/faiss_cpu-1.12.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d57ed7aac048b18809af70350c31acc0fb9f00e6c03b6ed1651fd58b174882d", size = 31421590, upload-time = "2025-08-13T06:06:27.601Z" },
310
+ { url = "https://files.pythonhosted.org/packages/26/19/80289ba008f14c95fbb6e94617ea9884e421ca745864fe6b8b90e1c3fc94/faiss_cpu-1.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:26c29290e7d1c5938e5886594dc0a2272b30728351ca5f855d4ae30704d5a6cc", size = 9762452, upload-time = "2025-08-13T06:06:30.237Z" },
311
+ { url = "https://files.pythonhosted.org/packages/af/e7/6cc03ead5e19275e34992419e2b7d107d0295390ccf589636ff26adb41e2/faiss_cpu-1.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9b43d0c295e93a8e5f1dd30325caaf34d4ecb51f1e3d461c7b0e71bff3a8944b", size = 24156530, upload-time = "2025-08-13T06:06:32.23Z" },
312
+ { url = "https://files.pythonhosted.org/packages/34/90/438865fe737d65e7348680dadf3b2983bdcef7e5b7e852000e74c50a9933/faiss_cpu-1.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:a7c6156f1309bb969480280906e8865c3c4378eebb0f840c55c924bf06efd8d3", size = 18169604, upload-time = "2025-08-13T06:06:34.884Z" },
313
+ { url = "https://files.pythonhosted.org/packages/76/69/40a1d8d781a70d33c57ef1b4b777486761dd1c502a86d27e90ef6aa8a9f9/faiss_cpu-1.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:0b5fac98a350774a98b904f7a7c6689eb5cf0a593d63c552e705a80c55636d15", size = 8012523, upload-time = "2025-08-13T06:06:37.24Z" },
314
+ { url = "https://files.pythonhosted.org/packages/12/35/01a4a7c179d67bee0d8a027b95c3eae19cb354ae69ef2bc50ac3b93bc853/faiss_cpu-1.12.0-cp314-cp314-macosx_13_0_x86_64.whl", hash = "sha256:ff7db774968210d08cd0331287f3f66a6ffef955a7aa9a7fcd3eb4432a4ce5f5", size = 8036142, upload-time = "2025-08-13T06:06:38.894Z" },
315
+ { url = "https://files.pythonhosted.org/packages/08/23/bac2859490096608c9d527f3041b44c2e43f8df0d4aadd53a4cc5ce678ac/faiss_cpu-1.12.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:220b5bb5439c64e417b35f9ade4c7dc3bf7df683d6123901ba84d6d764ecd486", size = 3363747, upload-time = "2025-08-13T06:06:40.73Z" },
316
+ { url = "https://files.pythonhosted.org/packages/7b/1d/e18023e1f43a18ec593adcd69d356f1fa94bde20344e38334d5985e5c5cc/faiss_cpu-1.12.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:693d0bf16f79e8d16a1baaeda459f3375f37da0354e97dc032806b48a2a54151", size = 3835232, upload-time = "2025-08-13T06:06:42.172Z" },
317
+ { url = "https://files.pythonhosted.org/packages/cd/2b/1c1fea423d3f550f44c5ec3f14d8400919b49c285c3bd146687c63e40186/faiss_cpu-1.12.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bcc6587dee21e17430fb49ddc5200625d6f5e1de2bdf436f14827bad4ca78d19", size = 31432677, upload-time = "2025-08-13T06:06:44.348Z" },
318
+ { url = "https://files.pythonhosted.org/packages/de/d2/3483e92a02f30e2d8491a256f470f54b7f5483266dfe09126d28741d31ec/faiss_cpu-1.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b80e5965f001822cc99ec65c715169af1b70bdae72eccd573520a2dec485b3ee", size = 9765504, upload-time = "2025-08-13T06:06:46.567Z" },
319
+ { url = "https://files.pythonhosted.org/packages/ce/2f/d97792211a9bd84b8d6b1dcaa1dcd69ac11e026c6ef19c641b6a87e31025/faiss_cpu-1.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98279f1b4876ef9902695a329b81a99002782ab6e26def472022009df6f1ac68", size = 24169930, upload-time = "2025-08-13T06:06:48.916Z" },
320
+ { url = "https://files.pythonhosted.org/packages/ee/b8/b707ca4d88af472509a053c39d3cced53efd19d096b8dff2fadc18c4b82d/faiss_cpu-1.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:11670337f9f5ee9ff3490e30683eea80add060c300cf6f6cb0e8faf3155fd20e", size = 18475400, upload-time = "2025-08-13T06:06:51.233Z" },
321
+ { url = "https://files.pythonhosted.org/packages/77/11/42e41ddebde4dfe77e36e92d0110b4f733c8640883abffde54f802482deb/faiss_cpu-1.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:7ac1c8b53609b5c722ab60f1749260a7cb3c72fdfb720a0e3033067e73591da5", size = 8281229, upload-time = "2025-08-13T06:06:53.735Z" },
322
+ { url = "https://files.pythonhosted.org/packages/1c/9a/8ae5bbeabe70eb673c37fc7c77e2e476746331afb6654b2df97d8b6d380d/faiss_cpu-1.12.0-cp314-cp314t-macosx_13_0_x86_64.whl", hash = "sha256:110b21b7bb4c93c4f1a5eb2ffb8ef99dcdb4725f8ab2e5cd161324e4d981f204", size = 8087247, upload-time = "2025-08-13T06:06:55.407Z" },
323
+ { url = "https://files.pythonhosted.org/packages/f4/df/b3d79098860b67b126da351788c04ac243c29718dadc4a678a6f5e7209c0/faiss_cpu-1.12.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:82eb5515ce72be9a43f4cf74447a0d090e014231981df91aff7251204b506fbf", size = 3411043, upload-time = "2025-08-13T06:06:56.983Z" },
324
+ { url = "https://files.pythonhosted.org/packages/bc/2f/b1a2a03dd3cce22ff9fc434aa3c7390125087260c1d1349311da36eaa432/faiss_cpu-1.12.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:754eef89cdf2b35643df6b0923a5a098bdfecf63b5f4bd86c385042ee511b287", size = 3801789, upload-time = "2025-08-13T06:06:58.688Z" },
325
+ { url = "https://files.pythonhosted.org/packages/a3/a8/16ad0c6a966e93d04bfd5248d2be1d8b5849842b0e2611c5ecd26fcaf036/faiss_cpu-1.12.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7285c71c8f5e9c58b55175f5f74c78c518c52c421a88a430263f34e3e31f719c", size = 31231388, upload-time = "2025-08-13T06:07:00.55Z" },
326
+ { url = "https://files.pythonhosted.org/packages/62/a1/9c16eca0b8f8b13c32c47a5e4ff7a4bc0ca3e7d263140312088811230871/faiss_cpu-1.12.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:84a50d7a2f711f79cc8b65aa28956dba6435e47b71a38b2daea44c94c9b8e458", size = 9737605, upload-time = "2025-08-13T06:07:03.018Z" },
327
+ { url = "https://files.pythonhosted.org/packages/a8/4a/2c2d615078c9d816a836fb893aaef551ad152f2eb00bc258698273c240c0/faiss_cpu-1.12.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7f3e0a14e4edec6a3959a9f51afccb89e863138f184ff2cc24c13f9ad788740b", size = 23922880, upload-time = "2025-08-13T06:07:05.099Z" },
328
+ { url = "https://files.pythonhosted.org/packages/30/aa/99b8402a4dac678794f13f8f4f29d666c2ef0a91594418147f47034ebc81/faiss_cpu-1.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8b3239cc371df6826ac43c62ac04eec7cc497bedb43f681fcd8ea494f520ddbb", size = 18750661, upload-time = "2025-08-13T06:07:07.551Z" },
329
+ { url = "https://files.pythonhosted.org/packages/a3/a2/b546e9a20ba157eb2fbe141289f1752f157ee6d932899f4853df4ded6d4b/faiss_cpu-1.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:58b23456db725ee1bd605a6135d2ef55b2ac3e0b6fe873fd99a909e8ef4bd0ff", size = 8302032, upload-time = "2025-08-13T06:07:09.602Z" },
330
+ ]
331
+
332
  [[package]]
333
  name = "fastapi"
334
  version = "0.116.1"