|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import logging |
|
|
import zimply |
|
|
import os |
|
|
import bs4 |
|
|
import random |
|
|
import re |
|
|
import tqdm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class WikipediaExtractor: |
|
|
def __init__(self, |
|
|
wikipedia_path: str, |
|
|
encoding: str = 'utf-8', |
|
|
find: tuple = ('p',), |
|
|
logger: logging.Logger = None, |
|
|
seed: int = None, |
|
|
): |
|
|
""" |
|
|
:param wikipedia_path: Path to the Wikipedia ZIM file. |
|
|
:param encoding: Encoding of the ZIM file. Default is 'utf-8'. |
|
|
:param find: The elements of the article to find, refer to BS4. |
|
|
:param logger: Logger object for logging. Default is None. |
|
|
:param seed: Seed for random number generator. Default is None. |
|
|
""" |
|
|
|
|
|
if not os.path.exists(wikipedia_path): |
|
|
raise FileNotFoundError(f"File {wikipedia_path} does not exist.") |
|
|
|
|
|
self.zim = zimply.zimply.ZIMFile(wikipedia_path, encoding=encoding) |
|
|
self.logger = logger or logging.getLogger(__name__) |
|
|
self.find = find |
|
|
self.magic_min = 78 |
|
|
self.magic_max = 4_113_686 |
|
|
|
|
|
|
|
|
random.seed(seed) |
|
|
|
|
|
|
|
|
self.stacked_refs = {'Wikidata', 'Wikimedia_Commons', 'ISSN'} |
|
|
self.logger.info(f'WikipediaExtractor initialized.') |
|
|
|
|
|
def get_database(self, relation_recursion: int = 0, n_trials: int = 100_000, from_cnt: int = 0): |
|
|
""" |
|
|
Gets the database of articles. |
|
|
:param relation_recursion: Relation recursion. Default is 0. |
|
|
:param n_trials: Number of trials to get articles. Default is 100_000. |
|
|
:param from_cnt: Count of articles. Default is 0. |
|
|
:return: A list of related (or not) articles and the successful count. |
|
|
""" |
|
|
|
|
|
articles = list() |
|
|
cnt = from_cnt |
|
|
|
|
|
|
|
|
for _ in tqdm.tqdm(range(n_trials), desc='Article extraction', unit='article'): |
|
|
article = self.get(relation_recursion=relation_recursion) |
|
|
|
|
|
if article is not None: |
|
|
for entry in article: |
|
|
if entry is not None: |
|
|
cnt += 1 |
|
|
entry['id'] = f'L0-{cnt:06}' |
|
|
articles.append(entry) |
|
|
|
|
|
return articles, cnt |
|
|
|
|
|
def get(self, relation_recursion: int = 0, generation_policy: str = 'kill'): |
|
|
""" |
|
|
Gets a random article from wikipedia. Gets a random related article per relation_recursion given. |
|
|
:param relation_recursion: Relation recursion. Default is 0. |
|
|
:param generation_policy: Tells continuing if there is no relationship recursion. Default is 'kill': |
|
|
'kill': Stops generation and returns None |
|
|
'warn': Logs a warning and returns the current generation. |
|
|
'ignore': Ignores the article and returns the current generation. |
|
|
:return: A list of Articles. |
|
|
""" |
|
|
articles = list() |
|
|
|
|
|
random_index = random.randint(self.magic_min, self.magic_max) |
|
|
articles.append(self.__get_article_by_index(random_index)) |
|
|
|
|
|
for recursion in range(relation_recursion): |
|
|
|
|
|
last_refs = articles[-1]['refs'] |
|
|
|
|
|
if last_refs: |
|
|
|
|
|
random_choice = random.choice(last_refs) |
|
|
articles.append(self.__get_article_by_url(random_choice)) |
|
|
elif generation_policy == 'kill': |
|
|
self.logger.error(f'Generation at iteration {recursion + 1} stoped due to lack of references.') |
|
|
return None |
|
|
elif generation_policy == 'warn': |
|
|
self.logger.warning(f'Generation at iteration {recursion + 1} stoped due to lack of references.') |
|
|
return articles |
|
|
elif generation_policy == 'ignore': |
|
|
return articles |
|
|
|
|
|
return articles |
|
|
|
|
|
def __get_article_by_index(self, index: int, astype: type = dict): |
|
|
""" |
|
|
Gets an article by its index. |
|
|
:param index: Index of the article. |
|
|
:param astype: Type of the return article. Dictionary or article. |
|
|
:return: |
|
|
""" |
|
|
if index < self.magic_min or index > self.magic_max: |
|
|
raise IndexError(f"Index {index} is out of range [{self.magic_min}, {self.magic_max}].") |
|
|
|
|
|
dict_entry = self.zim.read_directory_entry_by_index(index) |
|
|
|
|
|
return self.__get_article_by_url(dict_entry['url'], astype=astype) |
|
|
|
|
|
def __get_article_by_url(self, url: str, astype: type = dict): |
|
|
""" |
|
|
Get article by url |
|
|
:param url: The url of the article. |
|
|
:param astype: Type of the return article. Dictionary or article. |
|
|
:return: |
|
|
""" |
|
|
|
|
|
article = self.zim.get_article_by_url('A', url) |
|
|
if article is None: |
|
|
logging.error(f'Article {url} not found, skipping...') |
|
|
return None |
|
|
|
|
|
self.stacked_refs.add(url) |
|
|
|
|
|
return self.__article_to_dict(article, self.stacked_refs, self.find) if astype == dict else article |
|
|
|
|
|
@staticmethod |
|
|
def __article_to_dict(article: zimply.zimply.Article, |
|
|
stacked_refs: set, |
|
|
find: tuple = ('p',)) -> dict: |
|
|
""" |
|
|
Converts an article into a dictionary. |
|
|
:param article: Article to convert. |
|
|
:param stacked_refs: Stacked references of the article to avoid. |
|
|
:param find: Elements of the article to find, refer to BS4. |
|
|
:return: A dictionary. |
|
|
""" |
|
|
|
|
|
html = article.data.decode('utf-8') |
|
|
soup = bs4.BeautifulSoup(html, 'html.parser') |
|
|
|
|
|
|
|
|
page_title = soup.find('title').text.strip() |
|
|
|
|
|
|
|
|
paragraphs = soup.find_all(find) |
|
|
text = [re.sub(r'\s+', ' ', re.sub(r'\[\d+]', '', p.get_text())).strip() |
|
|
for p in paragraphs if p.get_text(strip=True)] |
|
|
|
|
|
|
|
|
|
|
|
internal_refs = list() |
|
|
for a in soup.find_all('a', href=True): |
|
|
href = a['href'] |
|
|
title = a.get('title') |
|
|
if ( |
|
|
href.startswith('/') is False and |
|
|
'://' not in href and |
|
|
title and len(title) > 1 and |
|
|
'%' not in href and |
|
|
'#' not in href and |
|
|
'.svg' not in href and |
|
|
href not in stacked_refs |
|
|
): |
|
|
internal_refs.append(href) |
|
|
|
|
|
|
|
|
return { |
|
|
'title': page_title, |
|
|
'text': text, |
|
|
'refs': internal_refs |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|