File size: 28,087 Bytes
018c4c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112e258
 
 
 
018c4c5
112e258
018c4c5
 
112e258
 
018c4c5
 
112e258
018c4c5
112e258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
018c4c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
"""
Data loader for Rabbinic Hebrew/Aramaic benchmark texts from Sefaria API.

Fetches parallel Hebrew/Aramaic + English text pairs across diverse categories.
"""

import json
import os
import re
import time
from pathlib import Path
from typing import Optional

import requests
import tiktoken

# Token limit for OpenAI embedding models (text-embedding-ada-002, text-embedding-3-*)
# Using cl100k_base encoding
MAX_EMBEDDING_TOKENS = 8192
_tokenizer = None


def get_tokenizer() -> tiktoken.Encoding:
    """Get or create the tiktoken encoder (cached for performance)."""
    global _tokenizer
    if _tokenizer is None:
        _tokenizer = tiktoken.get_encoding("cl100k_base")
    return _tokenizer


def count_tokens(text: str) -> int:
    """Count the number of tokens in a text string using OpenAI's tokenizer."""
    return len(get_tokenizer().encode(text))

# Sefaria host - configurable via environment variable
# Default is the public Sefaria API
DEFAULT_SEFARIA_HOST = "https://www.sefaria.org"
SEFARIA_HOST = os.environ.get("SEFARIA_HOST", DEFAULT_SEFARIA_HOST)


def set_sefaria_host(host: str) -> None:
    """Set the Sefaria host URL (e.g., 'http://localhost:8000')."""
    global SEFARIA_HOST
    # Remove trailing slash if present
    SEFARIA_HOST = host.rstrip("/")


def get_sefaria_host() -> str:
    """Get the current Sefaria host URL."""
    return SEFARIA_HOST

# Text categories with confirmed English translations
BENCHMARK_TEXTS = {
    "talmud_bavli": {
        "category": "Talmud",
        "language": "Aramaic/Hebrew",
        "texts": [
            "Berakhot",
            "Pesachim",
            "Yoma",
            "Megillah",
            "Chagigah",
            "Ketubot",
            "Gittin",
            "Bava Metzia",
            "Sanhedrin",
            "Avodah Zarah",
            "Chullin",
            "Niddah",
        ],
    },
    "talmud_yerushalmi": {
        "category": "Jerusalem Talmud",
        "language": "Aramaic/Hebrew",
        "texts": [
            "Jerusalem Talmud Berakhot",
            "Jerusalem Talmud Kilayim",
            "Jerusalem Talmud Terumot",
            "Jerusalem Talmud Shabbat",
            "Jerusalem Talmud Shekalim",
            "Jerusalem Talmud Sukkah",
            "Jerusalem Talmud Sotah",
            "Jerusalem Talmud Nedarim",
            "Jerusalem Talmud Kiddushin",
            "Jerusalem Talmud Bava Kamma",
            "Jerusalem Talmud Sanhedrin",
            "Jerusalem Talmud Avodah Zarah",
            "Jerusalem Talmud Niddah",
        ],
    },
    "mishnah": {
        "category": "Mishnah",
        "language": "Rabbinic Hebrew",
        "texts": [
            "Mishnah Berakhot",
            "Mishnah Peah",
            "Mishnah Kilayim",
            "Mishnah Shabbat",
            "Mishnah Pesachim",
            "Mishnah Sukkah",
            "Mishnah Taanit",
            "Mishnah Chagigah",
            "Mishnah Yevamot",
            "Mishnah Sotah",
            "Mishnah Kiddushin",
            "Mishnah Bava Kamma",
            "Mishnah Sanhedrin",
            "Mishnah Eduyot",
            "Mishnah Avot",
            "Mishnah Zevachim",
            "Mishnah Chullin",
            "Mishnah Tamid",
            "Mishnah Kelim",
            "Mishnah Parah",
            "Mishnah Niddah",
        ],
    },
    "midrash_rabbah": {
        "category": "Midrash Rabbah",
        "language": "Hebrew/Aramaic",
        "texts": [
            "Bereishit Rabbah",
            "Shemot Rabbah",
            "Vayikra Rabbah",
            "Bamidbar Rabbah",
            "Devarim Rabbah",
            "Shir HaShirim Rabbah",
            "Ruth Rabbah",
            "Eichah Rabbah",
            "Kohelet Rabbah",
            "Esther Rabbah",
        ],
    },
    "tanakh_commentary": {
        "category": "Tanakh Commentary",
        "language": "Hebrew",
        "texts": [
            "Rashi on Genesis",
            "Rashi on Exodus",
            "Rashi on Leviticus",
            "Rashi on Numbers",
            "Rashi on Deuteronomy",
            "Ramban on Genesis",
            "Ramban on Exodus",
            "Ramban on Leviticus",
            "Ramban on Numbers",
            "Ramban on Deuteronomy",
            "Radak on Genesis",
            "Akeidat Yitzchak",
            "Rabbeinu Behaye, Bereshit",
            "Rabbeinu Behaye, Shemot",
            "Rabbeinu Behaye, Vayikra",
            "Rabbeinu Behaye, Bamidbar",
            "Rabbeinu Behaye, Devarim",
        ],
    },
    "hasidic_kabbalistic": {
        "category": "Hasidic/Kabbalistic",
        "language": "Hebrew",
        "texts": [
            "Likutei Moharan",
            "Tomer Devorah",
            "Or Neerav, PART I",
            "Or Neerav, PART II",
            "Or Neerav, PART III",
            "Shekel HaKodesh, On Abstinence",
            "Shekel HaKodesh, On Wisdom",
            "Kalach Pitchei Chokhmah",
        ],
    },
    "halacha": {
        "category": "Halacha",
        "language": "Hebrew",
        "texts": [
            "Sefer HaChinukh",
            "Shev Shmateta, Introduction",
            "Mishneh Torah, Human Dispositions",
            "Sefer Yesodei HaTorah",
        ],
    },
    "philosophy": {
        "category": "Philosophy",
        "language": "Hebrew",
        "texts": [
            "Sefer HaIkkarim, Maamar 1",
            "Sefer HaIkkarim, Maamar 2",
            "Sefer HaIkkarim, Maamar 3",
            "Guide for the Perplexed, Part 1",
            "Guide for the Perplexed, Part 2",
            "Guide for the Perplexed, Part 3",
        ],
    },
    "targum": {
        "category": "Targum",
        "language": "Aramaic",
        "texts": [
            "Aramaic Targum to Song of Songs",
        ],
    },
    "mussar": {
        "category": "Mussar/Ethics",
        "language": "Hebrew",
        "texts": [
            "Iggeret HaRamban",
            "Shulchan Shel Arba",
            "Chafetz Chaim",
            "Yesod HaYirah, On Endurance",
            "Yesod HaYirah, On Humility",
            "Kav HaYashar",
        ],
    },
}


def strip_html(text: str) -> str:
    """
    Remove HTML tags from text.
    
    Some tags are dropped completely with their content:
    - <sup class="footnote-marker">...</sup>
    - <i class="footnote"...>...</i>
    
    Other tags are stripped but their inner content is preserved.
    """
    # First, remove footnote markers (simple, no nesting issues)
    clean = re.sub(r'<sup[^>]*class="footnote-marker"[^>]*>.*?</sup>', '', text, flags=re.DOTALL)
    
    # Remove footnotes with nested <i> tags - need to handle nesting
    # Strategy: find footnote start, then count <i> and </i> to find matching close
    clean = _remove_footnote_tags(clean)
    
    # Then strip remaining HTML tags (keeping their content)
    clean = re.sub(r"<[^>]+>", "", clean)
    
    # Clean up extra whitespace
    clean = re.sub(r"\s+", " ", clean).strip()
    return clean


def _remove_footnote_tags(text: str) -> str:
    """Remove <i class="footnote"...>...</i> tags, handling nested <i> tags."""
    result = []
    i = 0
    
    while i < len(text):
        # Look for footnote opening tag
        match = re.match(r'<i[^>]*class="footnote"[^>]*>', text[i:], flags=re.IGNORECASE)
        if match:
            # Found a footnote, now find the matching </i>
            start = i + match.end()
            depth = 1
            j = start
            
            while j < len(text) and depth > 0:
                if text[j:j+3].lower() == '<i ' or text[j:j+3].lower() == '<i>':
                    depth += 1
                    j += 1
                elif text[j:j+4].lower() == '</i>':
                    depth -= 1
                    if depth == 0:
                        # Skip past the closing </i>
                        j += 4
                        break
                    j += 1
                else:
                    j += 1
            
            # Skip the entire footnote (from i to j)
            i = j
        else:
            result.append(text[i])
            i += 1
    
    return ''.join(result)


def extract_bold_only(text: str) -> str:
    """
    Extract only content within <b>...</b> tags, for Talmud Bavli.
    
    The Steinsaltz English has bold for actual translation and non-bold for
    elucidation. We only want the translation.
    
    Example:
        "<b>The Rabbis say:</b> The time for... is <b>until midnight.</b>"
        -> "The Rabbis say: until midnight."
    """
    # Find all content within <b>...</b> tags
    bold_parts = re.findall(r'<b>(.*?)</b>', text, flags=re.DOTALL)
    
    if not bold_parts:
        # No bold tags found, fall back to regular strip
        return strip_html(text)
    
    # Strip any nested HTML from each bold part and join with spaces
    cleaned_parts = [strip_html(part) for part in bold_parts]
    
    # Join parts, ensuring proper spacing
    result = ' '.join(cleaned_parts)
    
    # Clean up extra whitespace
    result = re.sub(r"\s+", " ", result).strip()
    return result


def get_text_from_sefaria(ref: str, retries: int = 3) -> Optional[dict]:
    """
    Fetch a text from Sefaria API.
    
    Args:
        ref: Sefaria reference string (e.g., "Berakhot.2a")
        retries: Number of retry attempts
        
    Returns:
        Dict with 'he' (Hebrew/Aramaic) and 'en' (English) texts, or None if failed/error
    """
    url = f"{SEFARIA_HOST}/api/texts/{ref}"
    params = {"context": 0}
    
    for attempt in range(retries):
        try:
            response = requests.get(url, params=params, timeout=30)
            if response.status_code == 200:
                data = response.json()
                # Check if response contains an error
                if "error" in data:
                    return None
                return data
            elif response.status_code == 429:
                # Rate limited, wait and retry
                time.sleep(2 ** attempt)
            else:
                return None
        except requests.RequestException:
            if attempt < retries - 1:
                time.sleep(1)
            continue
    return None


def get_index_from_sefaria(title: str) -> Optional[dict]:
    """
    Get index/structure information for a text.
    
    Args:
        title: The title of the text
        
    Returns:
        Index data or None if failed or text not found
    """
    url = f"{SEFARIA_HOST}/api/index/{title}"
    try:
        response = requests.get(url, timeout=30)
        if response.status_code == 200:
            data = response.json()
            # Check if response contains an error
            if "error" in data:
                return None
            return data
    except requests.RequestException:
        pass
    return None


def extract_parallel_segments(data: dict, ref: str, category: str = "") -> list[dict]:
    """
    Extract parallel Hebrew/English segments from API response.
    
    Args:
        data: API response data
        ref: The reference string
        category: Category name (used for special handling, e.g., "Talmud")
        
    Returns:
        List of dicts with 'ref', 'he', 'en' keys
    """
    segments = []
    
    he_text = data.get("he", [])
    en_text = data.get("text", [])
    
    # Handle nested arrays (common in Talmud)
    if he_text and isinstance(he_text, list):
        # Flatten if nested
        if he_text and isinstance(he_text[0], list):
            he_flat = []
            en_flat = []
            for i, (he_seg, en_seg) in enumerate(zip(he_text, en_text)):
                if isinstance(he_seg, list):
                    he_flat.extend(he_seg)
                    en_flat.extend(en_seg if isinstance(en_seg, list) else [en_seg])
                else:
                    he_flat.append(he_seg)
                    en_flat.append(en_seg)
            he_text = he_flat
            en_text = en_flat
    
    # Handle single string responses
    if isinstance(he_text, str):
        he_text = [he_text]
    if isinstance(en_text, str):
        en_text = [en_text]
    
    # For Talmud Bavli, extract only bold text (actual translation, not elucidation)
    is_bavli = category == "Talmud"
    
    # Pair up segments
    for i, (he, en) in enumerate(zip(he_text, en_text)):
        if he and en:
            he_clean = strip_html(str(he)) if he else ""
            # Use bold-only extraction for Bavli English
            if is_bavli:
                en_clean = extract_bold_only(str(en)) if en else ""
            else:
                en_clean = strip_html(str(en)) if en else ""
            
            # Skip empty or very short segments
            if len(he_clean) > 10 and len(en_clean) > 10:
                # Check token limits for OpenAI embedding models
                he_tokens = count_tokens(he_clean)
                en_tokens = count_tokens(en_clean)
                
                if he_tokens > MAX_EMBEDDING_TOKENS:
                    print(f"  Skipping {ref}:{i+1} - Hebrew text exceeds token limit ({he_tokens} > {MAX_EMBEDDING_TOKENS})")
                    continue
                if en_tokens > MAX_EMBEDDING_TOKENS:
                    print(f"  Skipping {ref}:{i+1} - English text exceeds token limit ({en_tokens} > {MAX_EMBEDDING_TOKENS})")
                    continue
                
                segments.append({
                    "ref": f"{ref}:{i+1}" if ":" not in ref else ref,
                    "he": he_clean,
                    "en": en_clean,
                })
    
    return segments


def fetch_text_pairs(
    text_title: str,
    category: str,
    max_segments: int = 500,
    delay: float = 0.5
) -> list[dict]:
    """
    Fetch parallel text pairs for a given text.
    
    Args:
        text_title: Title of the text to fetch
        category: Category name for metadata
        max_segments: Maximum segments to fetch per text
        delay: Delay between API calls (rate limiting)
        
    Returns:
        List of segment dicts with ref, he, en, category
    """
    pairs = []
    
    # Get text index to understand structure
    index = get_index_from_sefaria(text_title)
    if not index:
        print(f"  Could not get index for {text_title}")
        return pairs
    
    # Determine refs to fetch based on text structure
    schema = index.get("schema", {})
    
    # For simple texts, just fetch the whole thing
    if schema.get("nodeType") == "JaggedArrayNode":
        depth = schema.get("depth", 2)
        address_types = schema.get("addressTypes", [])
        
        # Check if this uses Talmud daf notation (2a, 2b, etc.)
        uses_talmud_daf = address_types and address_types[0] == "Talmud"
        
        if uses_talmud_daf:
            # Talmud-style structure with daf notation (e.g., Berakhot.2a)
            # Start from daf 3 for Jerusalem Talmud to avoid overlap with Bavli
            start_daf = 3 if category == "Jerusalem Talmud" else 2
            # Fetch daf by daf
            done = False
            for daf_num in range(start_daf, 200):
                if len(pairs) >= max_segments or done:
                    break
                
                for side in ["a", "b"]:
                    if len(pairs) >= max_segments:
                        break
                        
                    ref = f"{text_title}.{daf_num}{side}"
                    data = get_text_from_sefaria(ref)
                    
                    # None means API error (daf doesn't exist)
                    if data is None:
                        if side == "a":
                            done = True  # Daf doesn't exist, we're done with tractate
                        break
                    
                    if not data.get("he"):
                        continue  # Empty side, try next
                        
                    segments = extract_parallel_segments(data, ref, category)
                    for seg in segments:
                        seg["category"] = category
                    pairs.extend(segments)
                    
                    time.sleep(delay)
                
        elif depth == 1:
            # Single-level structure (e.g., Iggeret HaRamban - just paragraphs)
            # Fetch the whole text at once
            data = get_text_from_sefaria(text_title)
            if data and data.get("he"):
                segments = extract_parallel_segments(data, text_title, category)
                for seg in segments:
                    seg["category"] = category
                pairs.extend(segments)
                
        elif depth == 2:
            # Two-level structure (e.g., Mishnah chapter:verse)
            # Start from chapter 2 for Mishnah to avoid overlap with Talmud
            start_chapter = 2 if category == "Mishnah" else 1
            consecutive_empty = 0
            # Fetch chapter by chapter
            for chapter in range(start_chapter, 200):  # Reasonable upper bound
                if len(pairs) >= max_segments:
                    break
                    
                ref = f"{text_title}.{chapter}"
                data = get_text_from_sefaria(ref)
                
                # None means API error (ref doesn't exist)
                if data is None:
                    break
                
                # Empty array means chapter exists but has no content
                if not data.get("he"):
                    consecutive_empty += 1
                    if consecutive_empty >= 5:
                        break  # Probably past end of book
                    time.sleep(delay)
                    continue
                    
                consecutive_empty = 0
                segments = extract_parallel_segments(data, ref, category)
                for seg in segments:
                    seg["category"] = category
                pairs.extend(segments)
                
                time.sleep(delay)
                
        elif depth >= 3:
            # Three+ level structure (e.g., commentary chapter:verse:comment)
            # Fetch chapter.verse by chapter.verse
            # For Jerusalem Talmud, start from 1.3 to avoid overlap with Bavli
            start_verse = 3 if category == "Jerusalem Talmud" else 1
            consecutive_empty_chapters = 0
            for chapter in range(1, 200):
                if len(pairs) >= max_segments:
                    break
                
                chapter_had_content = False
                # Use start_verse only for first chapter
                first_verse = start_verse if chapter == 1 else 1
                for verse in range(first_verse, 100):
                    if len(pairs) >= max_segments:
                        break
                    
                    ref = f"{text_title}.{chapter}.{verse}"
                    data = get_text_from_sefaria(ref)
                    
                    # None means API error (ref doesn't exist)
                    if data is None:
                        break  # No more verses in this chapter
                    
                    # Empty array means verse exists but has no content
                    if not data.get("he"):
                        continue
                        
                    chapter_had_content = True
                    segments = extract_parallel_segments(data, ref, category)
                    for seg in segments:
                        seg["category"] = category
                    pairs.extend(segments)
                    
                    time.sleep(delay)
                
                if not chapter_had_content:
                    consecutive_empty_chapters += 1
                    if consecutive_empty_chapters >= 5:
                        break  # Probably past end of book
                else:
                    consecutive_empty_chapters = 0
    
    else:
        # Complex structure (SchemaNode) - try different ref patterns
        # First try simple numeric refs (works for Sefer HaChinukh style)
        consecutive_empty = 0
        for section in range(1, 1000):
            if len(pairs) >= max_segments:
                break
            
            ref = f"{text_title}.{section}"
            data = get_text_from_sefaria(ref)
            
            if data is None:
                break
            
            if not data.get("he"):
                consecutive_empty += 1
                if consecutive_empty >= 5:
                    break
                time.sleep(delay)
                continue
                
            consecutive_empty = 0
            segments = extract_parallel_segments(data, ref, category)
            for seg in segments:
                seg["category"] = category
            pairs.extend(segments)
            
            time.sleep(delay)
        
        # If we haven't reached max_segments, try chapter.verse style refs (commentary pattern)
        if len(pairs) < max_segments:
            consecutive_empty = 0
            for chapter in range(1, 100):
                if len(pairs) >= max_segments:
                    break
                
                chapter_had_content = False
                for verse in range(1, 50):
                    if len(pairs) >= max_segments:
                        break
                    
                    ref = f"{text_title}.{chapter}.{verse}"
                    data = get_text_from_sefaria(ref)
                    
                    if data is None:
                        break  # This verse doesn't exist, try next chapter
                    
                    if data.get("he"):
                        chapter_had_content = True
                        consecutive_empty = 0
                        segments = extract_parallel_segments(data, ref, category)
                        for seg in segments:
                            seg["category"] = category
                        pairs.extend(segments)
                    
                    time.sleep(delay)
                
                if not chapter_had_content:
                    consecutive_empty += 1
                    if consecutive_empty >= 5:
                        break
    
    return pairs[:max_segments]


def build_benchmark_dataset(
    output_path: str = "benchmark_data/benchmark.json",
    segments_per_text: int = 200,
    total_target: int = 10000,
) -> list[dict]:
    """
    Build the full benchmark dataset from all configured texts.
    
    Args:
        output_path: Path to save the benchmark JSON
        segments_per_text: Target segments per text
        total_target: Overall target segment count
        
    Returns:
        List of all benchmark pairs
    """
    all_pairs = []
    
    for category_key, category_info in BENCHMARK_TEXTS.items():
        category_name = category_info["category"]
        texts = category_info["texts"]
        
        print(f"\n{'='*60}")
        print(f"Processing category: {category_name}")
        print(f"{'='*60}")
        
        for text_title in texts:
            if len(all_pairs) >= total_target:
                break
                
            print(f"\nFetching: {text_title}")
            
            pairs = fetch_text_pairs(
                text_title,
                category_name,
                max_segments=segments_per_text,
            )
            
            print(f"  Got {len(pairs)} pairs")
            all_pairs.extend(pairs)
            
        if len(all_pairs) >= total_target:
            break
    
    # Save to file
    output_file = Path(output_path)
    output_file.parent.mkdir(parents=True, exist_ok=True)
    
    with open(output_file, "w", encoding="utf-8") as f:
        json.dump(all_pairs, f, ensure_ascii=False, indent=2)
    
    print(f"\n{'='*60}")
    print(f"Total pairs collected: {len(all_pairs)}")
    print(f"Saved to: {output_path}")
    
    # Save stats to markdown file
    stats = get_benchmark_stats(all_pairs)
    save_stats_markdown(stats, output_path)
    
    return all_pairs


def load_benchmark_dataset(
    source: str = "Sefaria/Rabbinic-Hebrew-English-Pairs",
    use_local: bool = False,
) -> list[dict]:
    """
    Load the benchmark dataset from HuggingFace Hub or local file.
    
    Args:
        source: HuggingFace dataset ID or local file path
        use_local: If True, load from local JSON file instead of HuggingFace
        
    Returns:
        List of benchmark pairs with keys: ref, he, en, category
    """
    if use_local or source.endswith(".json"):
        # Load from local JSON file
        with open(source, "r", encoding="utf-8") as f:
            return json.load(f)
    
    # Load from HuggingFace Hub
    try:
        from datasets import load_dataset
        
        print(f"Loading benchmark from HuggingFace: {source}")
        ds = load_dataset(source, split="train")
        return ds.to_list()
    except Exception as e:
        print(f"Failed to load from HuggingFace: {e}")
        # Fallback to local file if it exists
        local_path = "benchmark_data/benchmark.json"
        if Path(local_path).exists():
            print(f"Falling back to local file: {local_path}")
            with open(local_path, "r", encoding="utf-8") as f:
                return json.load(f)
        raise


def get_benchmark_stats(pairs: list[dict]) -> dict:
    """
    Get statistics about the benchmark dataset.
    
    Args:
        pairs: List of benchmark pairs
        
    Returns:
        Dict with category counts and other stats
    """
    from collections import Counter
    
    categories = Counter(p["category"] for p in pairs)
    
    he_lengths = [len(p["he"]) for p in pairs]
    en_lengths = [len(p["en"]) for p in pairs]
    
    return {
        "total_pairs": len(pairs),
        "categories": dict(categories),
        "avg_he_length": sum(he_lengths) / len(he_lengths) if he_lengths else 0,
        "avg_en_length": sum(en_lengths) / len(en_lengths) if en_lengths else 0,
    }


def save_stats_markdown(stats: dict, data_path: str) -> str:
    """
    Save benchmark statistics to a markdown file alongside the data.
    
    Args:
        stats: Statistics dict from get_benchmark_stats()
        data_path: Path to the data file (used to derive stats file path)
        
    Returns:
        Path to the saved markdown file
    """
    from datetime import datetime
    
    # Derive markdown path from data path
    data_file = Path(data_path)
    stats_path = data_file.with_suffix(".stats.md")
    
    # Build markdown content
    lines = [
        "# Benchmark Dataset Statistics",
        "",
        f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
        "",
        "## Summary",
        "",
        f"- **Total pairs:** {stats['total_pairs']:,}",
        f"- **Average Hebrew length:** {stats['avg_he_length']:.0f} chars",
        f"- **Average English length:** {stats['avg_en_length']:.0f} chars",
        "",
        "## Category Breakdown",
        "",
        "| Category | Count |",
        "|----------|-------|",
    ]
    
    # Sort categories by count (descending)
    sorted_categories = sorted(
        stats["categories"].items(),
        key=lambda x: x[1],
        reverse=True
    )
    
    for category, count in sorted_categories:
        lines.append(f"| {category} | {count:,} |")
    
    lines.append("")
    
    # Write to file
    with open(stats_path, "w", encoding="utf-8") as f:
        f.write("\n".join(lines))
    
    print(f"Stats saved to: {stats_path}")
    return str(stats_path)


if __name__ == "__main__":
    # Build the benchmark dataset
    print("Building Rabbinic Hebrew/Aramaic benchmark dataset...")
    pairs = build_benchmark_dataset()
    
    # Print stats
    stats = get_benchmark_stats(pairs)
    print(f"\nDataset Statistics:")
    print(f"  Total pairs: {stats['total_pairs']}")
    print(f"  Categories: {stats['categories']}")
    print(f"  Avg Hebrew length: {stats['avg_he_length']:.0f} chars")
    print(f"  Avg English length: {stats['avg_en_length']:.0f} chars")