broadfield-dev commited on
Commit
08c0e4e
·
verified ·
1 Parent(s): 9439f92

Update build_rag.py

Browse files
Files changed (1) hide show
  1. build_rag.py +60 -79
build_rag.py CHANGED
@@ -1,26 +1,25 @@
1
  import json
2
  import os
3
  import pandas as pd
4
- from datasets import Dataset
5
- from transformers import AutoTokenizer, AutoModel
6
  import torch
7
- from huggingface_hub import create_repo
 
8
  import sys
 
 
9
 
10
  # --- Configuration ---
11
- # The name of the Gemma model for creating embeddings.
12
- # Make sure this matches the model used in app.py
13
- MODEL_NAME = "google/gemma-2b"
14
-
15
- # The name for the new dataset repository on the Hugging Face Hub.
16
- # This MUST match the DATASET_REPO in app.py
17
- DATASET_REPO = "broadfield-dev/bible-rag-dataset-gemma"
18
 
19
- # The directory containing the Bible JSON files
20
  JSON_DIRECTORY = 'bible_json'
21
- CHUNK_SIZE = 3 # Number of verses to group into a single text chunk
 
22
 
23
- # This dictionary maps the numeric book ID from the JSON to a human-readable name.
24
  BOOK_ID_TO_NAME = {
25
  1: "Genesis", 2: "Exodus", 3: "Leviticus", 4: "Numbers", 5: "Deuteronomy",
26
  6: "Joshua", 7: "Judges", 8: "Ruth", 9: "1 Samuel", 10: "2 Samuel",
@@ -39,117 +38,99 @@ BOOK_ID_TO_NAME = {
39
  }
40
 
41
  def process_bible_json_files(directory_path: str, chunk_size: int) -> pd.DataFrame:
42
- """
43
- Reads all Bible JSON files from a directory, processes them, chunks them,
44
- and returns a single unified Pandas DataFrame.
45
- """
46
  all_verses = []
47
-
48
  print(f"Reading JSON files from '{directory_path}'...")
49
  if not os.path.exists(directory_path) or not os.listdir(directory_path):
50
  print(f"Error: Directory '{directory_path}' is empty or does not exist.", file=sys.stderr)
51
- print("Please add your Bible JSON files to this directory.", file=sys.stderr)
52
  sys.exit(1)
53
-
54
  for filename in os.listdir(directory_path):
55
  if filename.endswith('.json'):
56
  version_name = filename.split('.')[0].upper()
57
  file_path = os.path.join(directory_path, filename)
58
-
59
- with open(file_path, 'r') as f:
60
- data = json.load(f)
61
-
62
  rows = data.get("resultset", {}).get("row", [])
63
  for row in rows:
64
  field = row.get("field", [])
65
  if len(field) == 5:
66
  _id, book_id, chapter, verse, text = field
67
  book_name = BOOK_ID_TO_NAME.get(book_id, "Unknown Book")
68
- all_verses.append({
69
- 'version': version_name,
70
- 'book_id': book_id,
71
- 'book_name': book_name,
72
- 'chapter': chapter,
73
- 'verse': verse,
74
- 'text': text.strip()
75
- })
76
-
77
  if not all_verses:
78
- print("Error: No verses were processed. Check the format of your JSON files.", file=sys.stderr)
79
  sys.exit(1)
80
-
81
- print(f"Successfully parsed {len(all_verses)} verses.")
82
  df = pd.DataFrame(all_verses)
83
-
84
- print(f"Chunking verses into groups of {chunk_size}...")
85
  all_chunks = []
86
  for (version, book_name, chapter), group in df.groupby(['version', 'book_name', 'chapter']):
87
  group = group.sort_values('verse').reset_index(drop=True)
88
  for i in range(0, len(group), chunk_size):
89
  chunk_df = group.iloc[i:i+chunk_size]
90
  combined_text = " ".join(chunk_df['text'])
91
- start_verse = chunk_df.iloc[0]['verse']
92
- end_verse = chunk_df.iloc[-1]['verse']
93
- if start_verse == end_verse:
94
- reference = f"{book_name} {chapter}:{start_verse}"
95
- else:
96
- reference = f"{book_name} {chapter}:{start_verse}-{end_verse}"
97
- all_chunks.append({
98
- 'text': combined_text,
99
- 'reference': reference,
100
- 'version': version,
101
- })
102
-
103
  final_df = pd.DataFrame(all_chunks)
104
  print(f"Created {len(final_df)} text chunks.")
105
  return final_df
106
 
107
  if __name__ == "__main__":
108
- print("--- Starting RAG Dataset Build Process ---")
109
 
110
- # 1. Process local JSON files
111
- print(f"\n--- Step 1: Processing JSON files from '{JSON_DIRECTORY}' ---")
112
  bible_chunks_df = process_bible_json_files(JSON_DIRECTORY, chunk_size=CHUNK_SIZE)
113
 
114
- # 2. Convert to Hugging Face Dataset
115
- print("\n--- Step 2: Converting to Hugging Face Dataset ---")
116
- hf_dataset = Dataset.from_pandas(bible_chunks_df)
117
- print(hf_dataset)
 
 
 
 
118
 
119
  # 3. Load embedding model
120
- print(f"\n--- Step 3: Loading embedding model: '{MODEL_NAME}' ---")
121
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
122
  model = AutoModel.from_pretrained(MODEL_NAME, device_map="auto")
123
- print("Model loaded successfully.")
124
 
125
- # 4. Generate embeddings
126
- print("\n--- Step 4: Generating embeddings (this may take a while) ---")
127
- def get_embeddings(batch):
128
- inputs = tokenizer(batch['text'], padding=True, truncation=True, return_tensors="pt", max_length=512).to(model.device)
 
 
 
 
129
  with torch.no_grad():
130
  outputs = model(**inputs)
131
- embeddings = outputs.last_hidden_state.mean(dim=1).cpu().numpy()
132
- return {'embeddings': embeddings}
133
 
134
- hf_dataset_with_embeddings = hf_dataset.map(get_embeddings, batched=True, batch_size=16)
135
- print("Embeddings generated successfully.")
136
-
137
- # 5. Add FAISS index
138
- print("\n--- Step 5: Creating and adding FAISS index ---")
139
- hf_dataset_with_embeddings.add_faiss_index(column="embeddings")
140
- print("FAISS index added successfully.")
141
 
142
- # 6. Push to Hub
143
- print(f"\n--- Step 6: Pushing dataset to Hub: '{DATASET_REPO}' ---")
144
  try:
 
145
  create_repo(repo_id=DATASET_REPO, repo_type="dataset", exist_ok=True)
146
- print(f"Repository '{DATASET_REPO}' created or already exists.")
147
 
148
- hf_dataset_with_embeddings.push_to_hub(DATASET_REPO)
149
- print("Dataset pushed successfully!")
 
 
 
 
 
 
150
  except Exception as e:
151
  print(f"An error occurred while pushing to the Hub: {e}", file=sys.stderr)
 
152
  sys.exit(1)
153
 
154
- print("\n--- RAG Build Process Complete! ---")
155
- print(f"The dataset is now available at: https://huggingface.co/datasets/{DATASET_REPO}")
 
1
  import json
2
  import os
3
  import pandas as pd
 
 
4
  import torch
5
+ from transformers import AutoTokenizer, AutoModel
6
+ import chromadb
7
  import sys
8
+ from tqdm import tqdm
9
+ from huggingface_hub import HfApi, create_repo
10
 
11
  # --- Configuration ---
12
+ # Must match the settings in app.py
13
+ CHROMA_PATH = "chroma_db"
14
+ COLLECTION_NAME = "bible_verses"
15
+ MODEL_NAME = "google/embeddinggemma-300m"
16
+ DATASET_REPO = "broadfield-dev/bible-chromadb-gemma" # The HF Dataset to store the DB
 
 
17
 
 
18
  JSON_DIRECTORY = 'bible_json'
19
+ CHUNK_SIZE = 3
20
+ EMBEDDING_BATCH_SIZE = 16
21
 
22
+ # --- Book ID Mapping (Unchanged) ---
23
  BOOK_ID_TO_NAME = {
24
  1: "Genesis", 2: "Exodus", 3: "Leviticus", 4: "Numbers", 5: "Deuteronomy",
25
  6: "Joshua", 7: "Judges", 8: "Ruth", 9: "1 Samuel", 10: "2 Samuel",
 
38
  }
39
 
40
  def process_bible_json_files(directory_path: str, chunk_size: int) -> pd.DataFrame:
41
+ """Reads, processes, and chunks Bible JSON files into a Pandas DataFrame."""
42
+ # (This function's internal logic remains unchanged)
 
 
43
  all_verses = []
 
44
  print(f"Reading JSON files from '{directory_path}'...")
45
  if not os.path.exists(directory_path) or not os.listdir(directory_path):
46
  print(f"Error: Directory '{directory_path}' is empty or does not exist.", file=sys.stderr)
 
47
  sys.exit(1)
 
48
  for filename in os.listdir(directory_path):
49
  if filename.endswith('.json'):
50
  version_name = filename.split('.')[0].upper()
51
  file_path = os.path.join(directory_path, filename)
52
+ with open(file_path, 'r') as f: data = json.load(f)
 
 
 
53
  rows = data.get("resultset", {}).get("row", [])
54
  for row in rows:
55
  field = row.get("field", [])
56
  if len(field) == 5:
57
  _id, book_id, chapter, verse, text = field
58
  book_name = BOOK_ID_TO_NAME.get(book_id, "Unknown Book")
59
+ all_verses.append({'version': version_name, 'book_name': book_name, 'chapter': chapter, 'verse': verse, 'text': text.strip()})
 
 
 
 
 
 
 
 
60
  if not all_verses:
61
+ print("Error: No verses were processed.", file=sys.stderr)
62
  sys.exit(1)
 
 
63
  df = pd.DataFrame(all_verses)
 
 
64
  all_chunks = []
65
  for (version, book_name, chapter), group in df.groupby(['version', 'book_name', 'chapter']):
66
  group = group.sort_values('verse').reset_index(drop=True)
67
  for i in range(0, len(group), chunk_size):
68
  chunk_df = group.iloc[i:i+chunk_size]
69
  combined_text = " ".join(chunk_df['text'])
70
+ start_verse, end_verse = chunk_df.iloc[0]['verse'], chunk_df.iloc[-1]['verse']
71
+ reference = f"{book_name} {chapter}:{start_verse}" if start_verse == end_verse else f"{book_name} {chapter}:{start_verse}-{end_verse}"
72
+ all_chunks.append({'text': combined_text, 'reference': reference, 'version': version})
 
 
 
 
 
 
 
 
 
73
  final_df = pd.DataFrame(all_chunks)
74
  print(f"Created {len(final_df)} text chunks.")
75
  return final_df
76
 
77
  if __name__ == "__main__":
78
+ print("--- Starting Vector Database Build Process ---")
79
 
80
+ # 1. Process JSON
 
81
  bible_chunks_df = process_bible_json_files(JSON_DIRECTORY, chunk_size=CHUNK_SIZE)
82
 
83
+ # 2. Setup local ChromaDB
84
+ print(f"\n--- Setting up local ChromaDB in '{CHROMA_PATH}' ---")
85
+ if os.path.exists(CHROMA_PATH):
86
+ import shutil
87
+ print("Deleting old local database directory...")
88
+ shutil.rmtree(CHROMA_PATH)
89
+ client = chromadb.PersistentClient(path=CHROMA_PATH)
90
+ collection = client.create_collection(name=COLLECTION_NAME)
91
 
92
  # 3. Load embedding model
93
+ print(f"\n--- Loading embedding model: '{MODEL_NAME}' ---")
94
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
95
  model = AutoModel.from_pretrained(MODEL_NAME, device_map="auto")
 
96
 
97
+ # 4. Generate embeddings and populate DB
98
+ print(f"\n--- Generating embeddings and populating database ---")
99
+ total_chunks = len(bible_chunks_df)
100
+ for i in tqdm(range(0, total_chunks, EMBEDDING_BATCH_SIZE), desc="Embedding Chunks"):
101
+ batch_df = bible_chunks_df.iloc[i:i+EMBEDDING_BATCH_SIZE]
102
+ texts = batch_df['text'].tolist()
103
+
104
+ inputs = tokenizer(texts, return_tensors="pt", padding=True, truncation=True, max_length=512).to(model.device)
105
  with torch.no_grad():
106
  outputs = model(**inputs)
107
+ embeddings = outputs.last_hidden_state.mean(dim=1).cpu().tolist()
 
108
 
109
+ collection.add(
110
+ ids=[str(j) for j in range(i, i + len(batch_df))],
111
+ embeddings=embeddings,
112
+ documents=texts,
113
+ metadatas=batch_df[['reference', 'version']].to_dict('records')
114
+ )
115
+ print(f"Successfully added {total_chunks} documents to the local ChromaDB.")
116
 
117
+ # 5. Upload the database directory to Hugging Face Hub
118
+ print(f"\n--- Pushing database to Hugging Face Hub: '{DATASET_REPO}' ---")
119
  try:
120
+ # Ensure the repo exists
121
  create_repo(repo_id=DATASET_REPO, repo_type="dataset", exist_ok=True)
 
122
 
123
+ # Upload the entire folder
124
+ api = HfApi()
125
+ api.upload_folder(
126
+ folder_path=CHROMA_PATH,
127
+ repo_id=DATASET_REPO,
128
+ repo_type="dataset",
129
+ )
130
+ print("Database pushed successfully!")
131
  except Exception as e:
132
  print(f"An error occurred while pushing to the Hub: {e}", file=sys.stderr)
133
+ print("Please ensure your HF_TOKEN secret has WRITE permissions.", file=sys.stderr)
134
  sys.exit(1)
135
 
136
+ print("\n--- Build Process Complete! ---")