Upload test_dataset2.py
Browse files- test_dataset2.py +36 -0
test_dataset2.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Example loading script (save as .py file in your HF repo)
|
| 2 |
+
from datasets import load_dataset
|
| 3 |
+
import pandas as pd
|
| 4 |
+
|
| 5 |
+
# --- How to load the data ---
|
| 6 |
+
|
| 7 |
+
# Replace 'your_hf_username/your_dataset_name' with your actual repo ID
|
| 8 |
+
repo_id = "DavidCBaines/test_dataset2"
|
| 9 |
+
|
| 10 |
+
# Load the corpus (contains verse_id and text for each translation)
|
| 11 |
+
corpus_ds = load_dataset(repo_id, data_files="bible_corpus.parquet", split="train")
|
| 12 |
+
|
| 13 |
+
# Load the metadata (contains info about each translation)
|
| 14 |
+
metadata_ds = load_dataset(repo_id, data_files="bible_metadata.parquet", split="train")
|
| 15 |
+
|
| 16 |
+
print("--- Corpus Dataset Sample ---")
|
| 17 |
+
print(corpus_ds[0]) # Show the first verse entry
|
| 18 |
+
|
| 19 |
+
print("\n--- Metadata Dataset Sample ---")
|
| 20 |
+
print(metadata_ds[0]) # Show metadata for the first translation
|
| 21 |
+
|
| 22 |
+
# --- Example Usage: Filtering ---
|
| 23 |
+
# Convert metadata to pandas for easier filtering
|
| 24 |
+
metadata_df = metadata_ds.to_pandas()
|
| 25 |
+
|
| 26 |
+
# Get metadata for only English translations
|
| 27 |
+
english_metadata = metadata_df[metadata_df['language_code'] == 'eng']
|
| 28 |
+
print("\n--- Metadata for English Translations ---")
|
| 29 |
+
print(english_metadata[['translation_id', 'language_name', 'verse_count']].head())
|
| 30 |
+
|
| 31 |
+
# Get the list of English translation IDs (column names in corpus_ds)
|
| 32 |
+
english_translation_ids = english_metadata['translation_id'].tolist()
|
| 33 |
+
|
| 34 |
+
# You can now use 'english_translation_ids' to select specific columns
|
| 35 |
+
# from corpus_ds if needed, though this often requires converting to pandas
|
| 36 |
+
# or using map functions depending on the desired operation.
|