File size: 4,712 Bytes
d12f2e3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | # - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
# #
# This file was created by: Alberto Palomo Alonso #
# Universidad de Alcalá - Escuela Politécnica Superior #
# #
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
"""
Wikipedia ZIM extraction and segmentation script.
Main workflow:
1) Ask the user for a ZIM path and an identifier.
2) Extract articles using `WikipediaExtractor`.
3) Convert the extracted list to a Hugging Face `datasets.Dataset`.
4) Post-process the dataset with `wiki_to_seg` (segmentation).
5) Save the resulting dataset to disk and reload it.
Notes:
- This script assumes `src.WikipediaExtractor` and `src.wiki_to_seg` are available.
- Output is saved under `./wikipedia-es-<identifier>`.
"""
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
# IMPORT STATEMENTS #
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
import logging
import datasets
from src import WikipediaExtractor, wiki_to_seg
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
# FUNCTION DEF #
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
def setup_logger() -> logging.Logger:
"""
Set up the logger for debugging.
Creates a module-level logger configured at DEBUG level with a StreamHandler.
Returns:
logging.Logger: Configured logger instance.
Notes:
If this function is called multiple times in the same process, it may attach
multiple handlers to the same logger. If that is undesirable in your runtime,
consider checking `logger.handlers` before adding a new handler.
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.debug('Debugging WikipediaExtractor')
return logger
def extract(
zim_path: str,
relation_recursion: int = 0,
n_trials: int = 30_000
) -> datasets.Dataset:
"""
Extract Wikipedia articles from a ZIM file and return a Hugging Face Dataset.
Args:
zim_path (str):
Path to the Wikipedia ZIM file.
relation_recursion (int, optional):
Recursion depth for relation/link exploration (as implemented by
`WikipediaExtractor`). Defaults to 0.
n_trials (int, optional):
Trial/iteration budget for extraction (as implemented by the extractor).
Defaults to 30_000.
Returns:
datasets.Dataset:
A Hugging Face Dataset built from the extracted articles list.
Raises:
Any exception raised by `WikipediaExtractor` or `datasets.Dataset.from_list`.
"""
extractor = WikipediaExtractor(
zim_path,
encoding='utf-8',
logger=setup_logger()
)
articles, _ = extractor.get_database(
relation_recursion=relation_recursion,
n_trials=n_trials,
from_cnt=0
)
hf_ds = datasets.Dataset.from_list(articles)
return hf_ds
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
# MAIN #
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
if __name__ == '__main__':
"""
Script entry point.
Prompts for user inputs, runs extraction + segmentation, saves the dataset to disk,
and reloads it at the end.
Inputs:
- Wikipedia (zim file) path
- Wikipedia identifier (e.g., B000)
Side effects:
- Creates `./wikipedia-es-<identifier>` containing the saved dataset.
- Reloads the dataset from disk into the `dataset` variable.
"""
# Ask user for input data:
z_path = input("Wikipedia (zim file) path: ")
identifier = input("Wikipedia (Wikipedia identifier, e.g. B000): ")
# Pathing:
path_to_disk = rf'./wikipedia-es-{identifier}'
# Extract:
hf_pre_dataset = extract(z_path)
# Post-processing:
segmentation_dataset = wiki_to_seg(hf_pre_dataset, 50)
# Save the dataset:
segmentation_dataset.save_to_disk(path_to_disk)
# Load the dataset:
dataset = datasets.load_from_disk(path_to_disk)
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
# END OF FILE #
# - x - x - x - x - x - x - x - x - x - x - x - x - x - x - #
|