|
|
import gzip |
|
|
from collections import defaultdict |
|
|
from spellchecker import SpellChecker |
|
|
import json |
|
|
from tqdm import tqdm |
|
|
|
|
|
config = [ |
|
|
{'title': 'English', 'lang': 'en', 'out': 'out/en.tsv'}, |
|
|
{'title': 'Spanish', 'lang': 'es', 'out': 'out/es.tsv'}, |
|
|
{'title': 'French', 'lang': 'fr', 'out': 'out/fr.tsv'}, |
|
|
{'title': 'German', 'lang': 'de', 'out': 'out/de.tsv'}, |
|
|
{'title': 'Russian', 'lang': 'ru', 'out': 'out/ru.tsv'} |
|
|
] |
|
|
|
|
|
|
|
|
def get_file_dict(file_path, language): |
|
|
spell = SpellChecker(language=language) |
|
|
count_dict = defaultdict(int) |
|
|
with gzip.open(file_path, mode="rt") as f: |
|
|
file_content = f.readlines() |
|
|
|
|
|
for line in tqdm(file_content): |
|
|
parts = [el for el in line.split('\t')] |
|
|
word = parts[0].split("_")[0] |
|
|
|
|
|
if word in spell.known([word]): |
|
|
count_dict[word] += int(parts[2]) |
|
|
|
|
|
return count_dict |
|
|
|
|
|
|
|
|
def url_to_path(url): |
|
|
parts = url.rsplit('/', 1) |
|
|
return 'data/' + parts[-1] |
|
|
|
|
|
|
|
|
def get_paths(title): |
|
|
with open('raw/dict.jsonl', 'r') as f: |
|
|
items = [json.loads(line) for line in f] |
|
|
|
|
|
links = [ |
|
|
link['url'] |
|
|
for item in items if item['title'] == title |
|
|
for section in item['sections'] if section['name'] == '1-grams' |
|
|
for link in section['links'] if link['name'].isalpha() and len(link['name']) == 1 |
|
|
] |
|
|
|
|
|
return list(map(url_to_path, links)) |
|
|
|
|
|
|
|
|
for single_set in config: |
|
|
set_dict = defaultdict(int) |
|
|
|
|
|
paths = get_paths(single_set['title']) |
|
|
|
|
|
print(paths) |
|
|
|
|
|
|
|
|
|
|
|
for path in tqdm(paths): |
|
|
new_dict = get_file_dict(path, single_set['lang']) |
|
|
set_dict.update(new_dict) |
|
|
|
|
|
with open(single_set['out'], "a") as f: |
|
|
for key, value in set_dict.items(): |
|
|
line = f"{key}\t{value}\n" |
|
|
f.write(line) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|