gustawdaniel commited on
Commit
1b0882b
·
1 Parent(s): 0c3d20b

added scripts

Browse files
Files changed (3) hide show
  1. .gitignore +8 -0
  2. download.py +86 -0
  3. process.py +72 -0
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ .idea
2
+ # Ignore all files in the .venv directory
3
+ .venv/*
4
+
5
+ # Except for the pyvenv.cfg file
6
+ !.venv/pyvenv.cfg
7
+
8
+ draft
download.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import urllib.request
3
+ import json
4
+ from bs4 import BeautifulSoup
5
+ from tqdm import tqdm
6
+ import requests
7
+
8
+ url = 'http://storage.googleapis.com/books/ngrams/books/datasetsv2.html'
9
+ filename = 'raw/index.html'
10
+ data_dir = 'data'
11
+
12
+ if not os.path.isfile(filename):
13
+ urllib.request.urlretrieve(url, filename)
14
+
15
+ output_file = 'raw/dict.jsonl'
16
+
17
+ # Load the HTML file using BeautifulSoup
18
+ with open(filename, 'r', encoding='utf-8') as f:
19
+ soup = BeautifulSoup(f, 'html.parser')
20
+
21
+ # Find all the h1 and h3 elements after the first one
22
+ headers = soup.select('h1 + h3')
23
+
24
+ # Loop over the headers and create a JSON line for each pair of elements
25
+ with open(output_file, 'w', encoding='utf-8') as f:
26
+ for (index, header) in enumerate(headers):
27
+ title = header.find_previous_sibling('h1').text
28
+ subtitle = header.text.strip()
29
+ sections = []
30
+ p = header
31
+ while True:
32
+ p = p.find_next_sibling()
33
+ if (p is None) or (p.name in ['h1', 'h3']):
34
+ break
35
+ if p.name in ['a']:
36
+ continue
37
+
38
+ name = p.select_one('b').text
39
+ links = [{'url': a['href'], 'name': a.text} for a in p.select('a')]
40
+
41
+ sections.append({
42
+ 'name': name,
43
+ 'links': links
44
+ })
45
+
46
+ data = {
47
+ 'title': title,
48
+ 'subtitle': subtitle,
49
+ 'sections': sections,
50
+ }
51
+ json.dump(data, f, ensure_ascii=False)
52
+ if (title or subtitle) and index != len(headers) - 1:
53
+ f.write('\n')
54
+
55
+
56
+ if not os.path.exists(data_dir):
57
+ os.makedirs(data_dir)
58
+
59
+ with open('raw/dict.jsonl', 'r') as f:
60
+ items = [json.loads(line) for line in f]
61
+ links = [link for item in items for section in item['sections'] for link in section['links']]
62
+
63
+ for link in tqdm(links):
64
+ url = link['url']
65
+ file_name = os.path.basename(url)
66
+ file_path = os.path.join(data_dir, file_name)
67
+
68
+ # Only 1gram are collected
69
+ if '1gram' not in url:
70
+ tqdm.write(f"{file_name} does not contains 1gram.")
71
+ continue
72
+
73
+ # check if file exists before downloading
74
+ if not os.path.exists(file_path):
75
+ # download file
76
+ response = requests.get(url, stream=True)
77
+ total_size_in_bytes = int(response.headers.get('content-length', 0))
78
+ block_size = 1024 # 1 Kibibyte
79
+ progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
80
+ with open(file_path, 'wb') as file:
81
+ for data in response.iter_content(block_size):
82
+ progress_bar.update(len(data))
83
+ file.write(data)
84
+ progress_bar.close()
85
+ else:
86
+ tqdm.write(f"{file_name} already exists, skipping download.")
process.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ from collections import defaultdict
3
+ from spellchecker import SpellChecker
4
+ import json
5
+ from tqdm import tqdm
6
+
7
+ config = [
8
+ {'title': 'English', 'lang': 'en', 'out': 'out/en.tsv'},
9
+ {'title': 'Spanish', 'lang': 'es', 'out': 'out/es.tsv'},
10
+ {'title': 'French', 'lang': 'fr', 'out': 'out/fr.tsv'},
11
+ {'title': 'German', 'lang': 'de', 'out': 'out/de.tsv'},
12
+ {'title': 'Russian', 'lang': 'ru', 'out': 'out/ru.tsv'}
13
+ ]
14
+
15
+
16
+ def get_file_dict(file_path, language):
17
+ spell = SpellChecker(language=language)
18
+ count_dict = defaultdict(int)
19
+ with gzip.open(file_path, mode="rt") as f:
20
+ file_content = f.readlines()
21
+
22
+ for line in tqdm(file_content):
23
+ parts = [el for el in line.split('\t')]
24
+ word = parts[0].split("_")[0]
25
+
26
+ if word in spell.known([word]):
27
+ count_dict[word] += int(parts[2])
28
+
29
+ return count_dict
30
+
31
+
32
+ def url_to_path(url):
33
+ parts = url.rsplit('/', 1)
34
+ return 'data/' + parts[-1]
35
+
36
+
37
+ def get_paths(title):
38
+ with open('raw/dict.jsonl', 'r') as f:
39
+ items = [json.loads(line) for line in f]
40
+
41
+ links = [
42
+ link['url']
43
+ for item in items if item['title'] == title
44
+ for section in item['sections'] if section['name'] == '1-grams'
45
+ for link in section['links'] if link['name'].isalpha() and len(link['name']) == 1
46
+ ]
47
+
48
+ return list(map(url_to_path, links))
49
+
50
+
51
+ for single_set in config:
52
+ set_dict = defaultdict(int)
53
+
54
+ paths = get_paths(single_set['title'])
55
+
56
+ print(paths)
57
+
58
+ # continue
59
+
60
+ for path in tqdm(paths):
61
+ new_dict = get_file_dict(path, single_set['lang'])
62
+ set_dict.update(new_dict)
63
+
64
+ with open(single_set['out'], "a") as f:
65
+ for key, value in set_dict.items():
66
+ line = f"{key}\t{value}\n"
67
+ f.write(line)
68
+
69
+ # print(set_dict)
70
+
71
+
72
+ # print(get_file_dict('data/googlebooks-eng-all-1gram-20120701-a.gz', 'en'))