NejimakiTori commited on
Commit
ae4c236
·
verified ·
1 Parent(s): 7613ac5

script that can create json file from the raw files and vice versa

Browse files
Files changed (1) hide show
  1. manage_dataset_structure.py +128 -0
manage_dataset_structure.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ import re
4
+
5
+ JSON_TITLE = 'ruwikibench_articles.json'
6
+
7
+ def create_json():
8
+ # CREATES JSON FROM DATASET
9
+ base_dir = Path('.')
10
+ articles_list = base_dir / 'small_articles_data.txt'
11
+ html_dir = base_dir / 'Html'
12
+ sources_dir = base_dir / 'Sources'
13
+ sources_list_dir = base_dir / 'Downloaded_Sources_List'
14
+ output_file = base_dir / JSON_TITLE
15
+
16
+ with open(articles_list, 'r', encoding='utf-8') as f:
17
+ article_titles = [line.strip() for line in f if line.strip()]
18
+
19
+ dataset = []
20
+
21
+ for title in sorted(article_titles):
22
+ true_title = title
23
+ title = re.sub(r'[<>:"/\\|?*]', '', title)
24
+ entry = {'article_name': true_title, 'article_cleared_name': title}
25
+
26
+ html_file = html_dir / f'{title}.html'
27
+ with open(html_file, 'r', encoding='utf-8') as f:
28
+ entry['html'] = f.read()
29
+
30
+ sources_list_file = sources_list_dir / f'{title}.json'.replace(' ', '_')
31
+ with open(sources_list_file, 'r', encoding='utf-8') as f:
32
+ source_ids = json.load(f)
33
+ print(f'Number of links for article {title}: {len(source_ids)}')
34
+ article_sources_dir = sources_dir / title
35
+ entry['sources'] = []
36
+
37
+ source_files = sorted(
38
+ article_sources_dir.glob('source_*.txt'),
39
+ key=lambda x: int(x.stem.split('_')[1])
40
+ )
41
+
42
+ for idx, (source_id, source_file) in enumerate(zip(source_ids, source_files)):
43
+ with open(source_file, 'r', encoding='utf-8') as f:
44
+ source_text = f.read()
45
+
46
+ entry['sources'].append({
47
+ 'source_id': source_id,
48
+ 'source_text': source_text,
49
+ 'file': source_file.name
50
+ })
51
+
52
+ dataset.append(entry)
53
+
54
+ print(f'Processed: {title}')
55
+
56
+ with open(output_file, 'w', encoding='utf-8') as f:
57
+ json.dump(dataset, f, ensure_ascii=False, indent=2)
58
+
59
+ print(f'Articles processed: {len(dataset)}')
60
+
61
+ def load_json():
62
+ # CHECKS JSON DATA
63
+ with open(JSON_TITLE, 'r', encoding='utf-8') as f:
64
+ data = json.load(f)
65
+
66
+ print(len(data))
67
+ i = 17
68
+ print(data[i]['article_name'])
69
+ print(data[i]['article_cleared_name'])
70
+ print(data[i]['html'][:100])
71
+ print(data[i]['sources'][10]['source_id'])
72
+ print(data[i]['sources'][10]['source_text'][:100])
73
+ print(data[i]['sources'][10]['file'])
74
+
75
+ def decompose_json():
76
+ # DECOMPOSES JSON TO NORMAL FILE SYSTEM
77
+ base_dir = Path('.')
78
+ json_file = base_dir / JSON_TITLE
79
+
80
+ html_dir = base_dir / 'Html'
81
+ sources_dir = base_dir / 'Sources'
82
+ sources_list_dir = base_dir / 'Downloaded_Sources_List'
83
+
84
+ html_dir.mkdir(exist_ok=True)
85
+ sources_dir.mkdir(exist_ok=True)
86
+ sources_list_dir.mkdir(exist_ok=True)
87
+
88
+ with open(json_file, 'r', encoding='utf-8') as f:
89
+ dataset = json.load(f)
90
+
91
+ article_titles = []
92
+
93
+ for article in dataset:
94
+ true_title = article['article_name']
95
+ title = article['article_cleared_name']
96
+ article_titles.append(true_title)
97
+
98
+ html_file = html_dir / f'{title}.html'
99
+ with open(html_file, 'w', encoding='utf-8') as f:
100
+ f.write(article['html'])
101
+
102
+ source_ids = [source['source_id'] for source in article['sources']]
103
+ sources_list_file = sources_list_dir / f'{title}.json'.replace(' ', '_')
104
+ with open(sources_list_file, 'w', encoding='utf-8') as f:
105
+ json.dump(source_ids, f, ensure_ascii=False, indent=2)
106
+
107
+ article_sources_dir = sources_dir / title
108
+ article_sources_dir.mkdir(exist_ok=True)
109
+
110
+ for idx, source in enumerate(article['sources'], 1):
111
+ source_file = article_sources_dir / source['file']
112
+ with open(source_file, 'w', encoding='utf-8') as f:
113
+ f.write(source['source_text'])
114
+
115
+ print(f'Reinstalled: {title}')
116
+
117
+ articles_list = base_dir / 'small_articles_data.txt'
118
+ with open(articles_list, 'w', encoding='utf-8') as f:
119
+ f.write('\n'.join(article_titles))
120
+
121
+ print(f'\nDone!')
122
+
123
+
124
+ #create_json()
125
+
126
+ #load_json()
127
+
128
+ #decompose_json()