NHLOCAL commited on
Commit
59bad8c
·
verified ·
1 Parent(s): 777f3f4

Upload convert_dataset.py

Browse files
Files changed (1) hide show
  1. convert_dataset.py +145 -0
convert_dataset.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ def process_standard_structure(data, filename):
5
+ """
6
+ מעבד קבצים עם מבנה סטנדרטי (תנ"ך, ש"ס, ירושלמי, משנה).
7
+ מבנה: קטגוריה -> תת-קטגוריה -> ספרים -> {שם_ספר: {pages: מספר}}
8
+ """
9
+ records = []
10
+ main_category = data.get('name')
11
+ content_type = data.get('content_type')
12
+
13
+ for subcategory in data.get('subcategories', []):
14
+ subcategory_name = subcategory.get('name')
15
+ for book_name, book_info in subcategory.get('books', {}).items():
16
+ record = {
17
+ "source_file": filename,
18
+ "main_category": main_category,
19
+ "subcategory": subcategory_name,
20
+ "book": book_name,
21
+ "content_type": content_type,
22
+ "count": book_info.get('pages')
23
+ }
24
+ records.append(record)
25
+ return records
26
+
27
+ def process_rambam(data, filename):
28
+ """
29
+ מעבד את קובץ הרמב"ם, השומר על החלוקה הפנימית ל'חלקים' (parts).
30
+ """
31
+ records = []
32
+ main_category = data.get('name')
33
+ content_type = data.get('content_type')
34
+
35
+ for subcategory in data.get('subcategories', []):
36
+ subcategory_name = subcategory.get('name')
37
+ for book_name, book_info in subcategory.get('books', {}).items():
38
+ parts = book_info.get('parts', [])
39
+ # חישוב סך הפרקים מכל החלקים
40
+ total_chapters = sum(part.get('end', 0) - part.get('start', 0) + 1 for part in parts)
41
+
42
+ record = {
43
+ "source_file": filename,
44
+ "main_category": main_category,
45
+ "subcategory": subcategory_name,
46
+ "book": book_name,
47
+ "content_type": content_type,
48
+ "count": total_chapters,
49
+ "parts": parts # שימור המבנה המפורט המקורי
50
+ }
51
+ records.append(record)
52
+ return records
53
+
54
+ def process_halakha(data, filename):
55
+ """
56
+ מעבד את קובץ ההלכה, המכיל מבנים שונים (טור/שו"ע, משנה ברורה).
57
+ """
58
+ records = []
59
+ main_category = data.get('name')
60
+
61
+ for subcategory in data.get('subcategories', []):
62
+ subcategory_name = subcategory.get('name')
63
+ content_type = subcategory.get('content_type')
64
+
65
+ for book_name, book_info in subcategory.get('books', {}).items():
66
+ record = {
67
+ "source_file": filename,
68
+ "main_category": main_category,
69
+ "subcategory": subcategory_name,
70
+ "book": book_name,
71
+ "content_type": content_type
72
+ }
73
+
74
+ # טיפול במבנה של משנה ברורה / ביאור הלכה
75
+ if 'parts' in book_info:
76
+ parts = book_info.get('parts', [])
77
+ # חישוב סך הסימנים מכל החלקים
78
+ total_simanim = sum(part.get('end', 0) - part.get('start', 0) + 1 for part in parts)
79
+ record['count'] = total_simanim
80
+ record['parts'] = parts # שימור המבנה המפורט
81
+
82
+ # טיפול במבנה של טור / שו"ע
83
+ elif 'pages' in book_info:
84
+ record['count'] = book_info.get('pages')
85
+
86
+ # הוספת החרגות אם קיימות
87
+ if 'exclude' in book_info:
88
+ record['excluded_units'] = book_info.get('exclude')
89
+
90
+ records.append(record)
91
+
92
+ return records
93
+
94
+
95
+ def main():
96
+ """
97
+ פונקציה ראשית שמנהלת את תהליך ההמרה.
98
+ """
99
+ # מיפוי שם קובץ לפונקציית העיבוד המתאימה
100
+ file_processors = {
101
+ 'shas.json': process_standard_structure,
102
+ 'yerushalmi.json': process_standard_structure,
103
+ 'mishna.json': process_standard_structure,
104
+ 'tanach.json': process_standard_structure,
105
+ 'rambam.json': process_rambam,
106
+ 'halakha.json': process_halakha
107
+ }
108
+
109
+ all_records = []
110
+ input_files = list(file_processors.keys())
111
+
112
+ for filename in input_files:
113
+ if not os.path.exists(filename):
114
+ print(f"Warning: File '{filename}' not found. Skipping.")
115
+ continue
116
+
117
+ try:
118
+ with open(filename, 'r', encoding='utf-8') as f:
119
+ data = json.load(f)
120
+
121
+ processor_func = file_processors[filename]
122
+ records = processor_func(data, filename)
123
+ all_records.extend(records)
124
+ print(f"Processed '{filename}', added {len(records)} records.")
125
+
126
+ except json.JSONDecodeError:
127
+ print(f"Error: Could not decode JSON from '{filename}'.")
128
+ except Exception as e:
129
+ print(f"An unexpected error occurred while processing '{filename}': {e}")
130
+
131
+ output_filename = 'judaic_texts_dataset.jsonl'
132
+ try:
133
+ with open(output_filename, 'w', encoding='utf-8') as f_out:
134
+ for record in all_records:
135
+ # שימוש ב-ensure_ascii=False כדי לשמור על עברית תקינה
136
+ f_out.write(json.dumps(record, ensure_ascii=False) + '\n')
137
+
138
+ print(f"\nSuccessfully created dataset file: '{output_filename}' with {len(all_records)} total records.")
139
+
140
+ except Exception as e:
141
+ print(f"An error occurred while writing the output file: {e}")
142
+
143
+
144
+ if __name__ == '__main__':
145
+ main()