broadfield-dev commited on
Commit
80d005a
·
verified ·
1 Parent(s): 46245df

Update build_rag.py

Browse files
Files changed (1) hide show
  1. build_rag.py +72 -35
build_rag.py CHANGED
@@ -1,9 +1,26 @@
1
  import json
2
  import os
3
  import pandas as pd
 
 
 
 
 
4
 
5
- # This dictionary maps the numeric book ID from your JSON to a human-readable name.
6
- # It covers the standard 66 books of the Protestant Bible canon.
 
 
 
 
 
 
 
 
 
 
 
 
7
  BOOK_ID_TO_NAME = {
8
  1: "Genesis", 2: "Exodus", 3: "Leviticus", 4: "Numbers", 5: "Deuteronomy",
9
  6: "Joshua", 7: "Judges", 8: "Ruth", 9: "1 Samuel", 10: "2 Samuel",
@@ -21,7 +38,7 @@ BOOK_ID_TO_NAME = {
21
  62: "1 John", 63: "2 John", 64: "3 John", 65: "Jude", 66: "Revelation"
22
  }
23
 
24
- def process_bible_json_files(directory_path: str, chunk_size: int = 3) -> pd.DataFrame:
25
  """
26
  Reads all Bible JSON files from a directory, processes them, chunks them,
27
  and returns a single unified Pandas DataFrame.
@@ -29,6 +46,11 @@ def process_bible_json_files(directory_path: str, chunk_size: int = 3) -> pd.Dat
29
  all_verses = []
30
 
31
  print(f"Reading JSON files from '{directory_path}'...")
 
 
 
 
 
32
  for filename in os.listdir(directory_path):
33
  if filename.endswith('.json'):
34
  version_name = filename.split('.')[0].upper()
@@ -37,15 +59,12 @@ def process_bible_json_files(directory_path: str, chunk_size: int = 3) -> pd.Dat
37
  with open(file_path, 'r') as f:
38
  data = json.load(f)
39
 
40
- # Navigate the nested JSON structure
41
  rows = data.get("resultset", {}).get("row", [])
42
  for row in rows:
43
  field = row.get("field", [])
44
  if len(field) == 5:
45
  _id, book_id, chapter, verse, text = field
46
-
47
  book_name = BOOK_ID_TO_NAME.get(book_id, "Unknown Book")
48
-
49
  all_verses.append({
50
  'version': version_name,
51
  'book_id': book_id,
@@ -56,34 +75,25 @@ def process_bible_json_files(directory_path: str, chunk_size: int = 3) -> pd.Dat
56
  })
57
 
58
  if not all_verses:
59
- raise ValueError("No verses were processed. Check the directory path and JSON structure.")
 
60
 
61
- print(f"Successfully parsed {len(all_verses)} verses from {len(os.listdir(directory_path))} files.")
62
-
63
- # Convert to DataFrame for easier manipulation
64
  df = pd.DataFrame(all_verses)
65
 
66
- # --- Chunking Logic ---
67
  print(f"Chunking verses into groups of {chunk_size}...")
68
  all_chunks = []
69
- # Group by version, book, and chapter to ensure chunks don't cross boundaries
70
  for (version, book_name, chapter), group in df.groupby(['version', 'book_name', 'chapter']):
71
  group = group.sort_values('verse').reset_index(drop=True)
72
-
73
  for i in range(0, len(group), chunk_size):
74
  chunk_df = group.iloc[i:i+chunk_size]
75
-
76
  combined_text = " ".join(chunk_df['text'])
77
-
78
  start_verse = chunk_df.iloc[0]['verse']
79
  end_verse = chunk_df.iloc[-1]['verse']
80
-
81
- # Create a clean reference string
82
  if start_verse == end_verse:
83
  reference = f"{book_name} {chapter}:{start_verse}"
84
  else:
85
  reference = f"{book_name} {chapter}:{start_verse}-{end_verse}"
86
-
87
  all_chunks.append({
88
  'text': combined_text,
89
  'reference': reference,
@@ -92,27 +102,54 @@ def process_bible_json_files(directory_path: str, chunk_size: int = 3) -> pd.Dat
92
 
93
  final_df = pd.DataFrame(all_chunks)
94
  print(f"Created {len(final_df)} text chunks.")
95
-
96
  return final_df
97
 
98
- # --- Main execution ---
99
  if __name__ == "__main__":
100
- # 1. Set the path to your directory containing the JSON files
101
- json_directory = 'bible_json'
102
 
103
- # 2. Run the processing and chunking function
104
- bible_chunks_df = process_bible_json_files(json_directory, chunk_size=3)
 
105
 
106
- # 3. Display the result
107
- print("\n--- Processing Complete ---")
108
- print("DataFrame Info:")
109
- bible_chunks_df.info()
 
 
 
 
 
 
110
 
111
- print("\n--- Example Chunks ---")
112
- print(bible_chunks_df.head())
113
- print("\n")
114
- print(bible_chunks_df.sample(5))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
- # This DataFrame is now ready for the next step:
117
- # `hf_dataset = Dataset.from_pandas(bible_chunks_df)`
118
- # ...followed by Gemma embedding and FAISS indexing.
 
1
  import json
2
  import os
3
  import pandas as pd
4
+ from datasets import Dataset
5
+ from transformers import AutoTokenizer, AutoModel
6
+ import torch
7
+ from huggingface_hub import create_repo
8
+ import sys
9
 
10
+ # --- Configuration ---
11
+ # The name of the Gemma model for creating embeddings.
12
+ # Make sure this matches the model used in app.py
13
+ MODEL_NAME = "google/gemma-2b"
14
+
15
+ # The name for the new dataset repository on the Hugging Face Hub.
16
+ # This MUST match the DATASET_REPO in app.py
17
+ DATASET_REPO = "broadfield-dev/bible-rag-dataset-gemma"
18
+
19
+ # The directory containing the Bible JSON files
20
+ JSON_DIRECTORY = 'bible_json'
21
+ CHUNK_SIZE = 3 # Number of verses to group into a single text chunk
22
+
23
+ # This dictionary maps the numeric book ID from the JSON to a human-readable name.
24
  BOOK_ID_TO_NAME = {
25
  1: "Genesis", 2: "Exodus", 3: "Leviticus", 4: "Numbers", 5: "Deuteronomy",
26
  6: "Joshua", 7: "Judges", 8: "Ruth", 9: "1 Samuel", 10: "2 Samuel",
 
38
  62: "1 John", 63: "2 John", 64: "3 John", 65: "Jude", 66: "Revelation"
39
  }
40
 
41
+ def process_bible_json_files(directory_path: str, chunk_size: int) -> pd.DataFrame:
42
  """
43
  Reads all Bible JSON files from a directory, processes them, chunks them,
44
  and returns a single unified Pandas DataFrame.
 
46
  all_verses = []
47
 
48
  print(f"Reading JSON files from '{directory_path}'...")
49
+ if not os.path.exists(directory_path) or not os.listdir(directory_path):
50
+ print(f"Error: Directory '{directory_path}' is empty or does not exist.", file=sys.stderr)
51
+ print("Please add your Bible JSON files to this directory.", file=sys.stderr)
52
+ sys.exit(1)
53
+
54
  for filename in os.listdir(directory_path):
55
  if filename.endswith('.json'):
56
  version_name = filename.split('.')[0].upper()
 
59
  with open(file_path, 'r') as f:
60
  data = json.load(f)
61
 
 
62
  rows = data.get("resultset", {}).get("row", [])
63
  for row in rows:
64
  field = row.get("field", [])
65
  if len(field) == 5:
66
  _id, book_id, chapter, verse, text = field
 
67
  book_name = BOOK_ID_TO_NAME.get(book_id, "Unknown Book")
 
68
  all_verses.append({
69
  'version': version_name,
70
  'book_id': book_id,
 
75
  })
76
 
77
  if not all_verses:
78
+ print("Error: No verses were processed. Check the format of your JSON files.", file=sys.stderr)
79
+ sys.exit(1)
80
 
81
+ print(f"Successfully parsed {len(all_verses)} verses.")
 
 
82
  df = pd.DataFrame(all_verses)
83
 
 
84
  print(f"Chunking verses into groups of {chunk_size}...")
85
  all_chunks = []
 
86
  for (version, book_name, chapter), group in df.groupby(['version', 'book_name', 'chapter']):
87
  group = group.sort_values('verse').reset_index(drop=True)
 
88
  for i in range(0, len(group), chunk_size):
89
  chunk_df = group.iloc[i:i+chunk_size]
 
90
  combined_text = " ".join(chunk_df['text'])
 
91
  start_verse = chunk_df.iloc[0]['verse']
92
  end_verse = chunk_df.iloc[-1]['verse']
 
 
93
  if start_verse == end_verse:
94
  reference = f"{book_name} {chapter}:{start_verse}"
95
  else:
96
  reference = f"{book_name} {chapter}:{start_verse}-{end_verse}"
 
97
  all_chunks.append({
98
  'text': combined_text,
99
  'reference': reference,
 
102
 
103
  final_df = pd.DataFrame(all_chunks)
104
  print(f"Created {len(final_df)} text chunks.")
 
105
  return final_df
106
 
 
107
  if __name__ == "__main__":
108
+ print("--- Starting RAG Dataset Build Process ---")
 
109
 
110
+ # 1. Process local JSON files
111
+ print(f"\n--- Step 1: Processing JSON files from '{JSON_DIRECTORY}' ---")
112
+ bible_chunks_df = process_bible_json_files(JSON_DIRECTORY, chunk_size=CHUNK_SIZE)
113
 
114
+ # 2. Convert to Hugging Face Dataset
115
+ print("\n--- Step 2: Converting to Hugging Face Dataset ---")
116
+ hf_dataset = Dataset.from_pandas(bible_chunks_df)
117
+ print(hf_dataset)
118
+
119
+ # 3. Load embedding model
120
+ print(f"\n--- Step 3: Loading embedding model: '{MODEL_NAME}' ---")
121
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
122
+ model = AutoModel.from_pretrained(MODEL_NAME, device_map="auto")
123
+ print("Model loaded successfully.")
124
 
125
+ # 4. Generate embeddings
126
+ print("\n--- Step 4: Generating embeddings (this may take a while) ---")
127
+ def get_embeddings(batch):
128
+ inputs = tokenizer(batch['text'], padding=True, truncation=True, return_tensors="pt", max_length=512).to(model.device)
129
+ with torch.no_grad():
130
+ outputs = model(**inputs)
131
+ embeddings = outputs.last_hidden_state.mean(dim=1).cpu().numpy()
132
+ return {'embeddings': embeddings}
133
+
134
+ hf_dataset_with_embeddings = hf_dataset.map(get_embeddings, batched=True, batch_size=16)
135
+ print("Embeddings generated successfully.")
136
+
137
+ # 5. Add FAISS index
138
+ print("\n--- Step 5: Creating and adding FAISS index ---")
139
+ hf_dataset_with_embeddings.add_faiss_index(column="embeddings")
140
+ print("FAISS index added successfully.")
141
+
142
+ # 6. Push to Hub
143
+ print(f"\n--- Step 6: Pushing dataset to Hub: '{DATASET_REPO}' ---")
144
+ try:
145
+ create_repo(repo_id=DATASET_REPO, repo_type="dataset", exist_ok=True)
146
+ print(f"Repository '{DATASET_REPO}' created or already exists.")
147
+
148
+ hf_dataset_with_embeddings.push_to_hub(DATASET_REPO)
149
+ print("Dataset pushed successfully!")
150
+ except Exception as e:
151
+ print(f"An error occurred while pushing to the Hub: {e}", file=sys.stderr)
152
+ sys.exit(1)
153
 
154
+ print("\n--- RAG Build Process Complete! ---")
155
+ print(f"The dataset is now available at: https://huggingface.co/datasets/{DATASET_REPO}")