| | |
| | |
| | |
| | |
| | |
| | |
| | """ |
| | Wikipedia ZIM extraction and segmentation script. |
| | |
| | Main workflow: |
| | 1) Ask the user for a ZIM path and an identifier. |
| | 2) Extract articles using `WikipediaExtractor`. |
| | 3) Convert the extracted list to a Hugging Face `datasets.Dataset`. |
| | 4) Post-process the dataset with `wiki_to_seg` (segmentation). |
| | 5) Save the resulting dataset to disk and reload it. |
| | |
| | Notes: |
| | - This script assumes `src.WikipediaExtractor` and `src.wiki_to_seg` are available. |
| | - Output is saved under `./wikipedia-es-<identifier>`. |
| | """ |
| |
|
| | |
| | |
| | |
| | import logging |
| | import datasets |
| | from src import WikipediaExtractor, wiki_to_seg |
| |
|
| |
|
| | |
| | |
| | |
| | def setup_logger() -> logging.Logger: |
| | """ |
| | Set up the logger for debugging. |
| | |
| | Creates a module-level logger configured at DEBUG level with a StreamHandler. |
| | |
| | Returns: |
| | logging.Logger: Configured logger instance. |
| | |
| | Notes: |
| | If this function is called multiple times in the same process, it may attach |
| | multiple handlers to the same logger. If that is undesirable in your runtime, |
| | consider checking `logger.handlers` before adding a new handler. |
| | """ |
| | logger = logging.getLogger(__name__) |
| | logger.setLevel(logging.DEBUG) |
| |
|
| | handler = logging.StreamHandler() |
| | handler.setLevel(logging.DEBUG) |
| |
|
| | formatter = logging.Formatter( |
| | '%(asctime)s - %(name)s - %(levelname)s - %(message)s' |
| | ) |
| | handler.setFormatter(formatter) |
| |
|
| | logger.addHandler(handler) |
| | logger.debug('Debugging WikipediaExtractor') |
| | return logger |
| |
|
| |
|
| | def extract( |
| | zim_path: str, |
| | relation_recursion: int = 0, |
| | n_trials: int = 30_000 |
| | ) -> datasets.Dataset: |
| | """ |
| | Extract Wikipedia articles from a ZIM file and return a Hugging Face Dataset. |
| | |
| | Args: |
| | zim_path (str): |
| | Path to the Wikipedia ZIM file. |
| | relation_recursion (int, optional): |
| | Recursion depth for relation/link exploration (as implemented by |
| | `WikipediaExtractor`). Defaults to 0. |
| | n_trials (int, optional): |
| | Trial/iteration budget for extraction (as implemented by the extractor). |
| | Defaults to 30_000. |
| | |
| | Returns: |
| | datasets.Dataset: |
| | A Hugging Face Dataset built from the extracted articles list. |
| | |
| | Raises: |
| | Any exception raised by `WikipediaExtractor` or `datasets.Dataset.from_list`. |
| | """ |
| | extractor = WikipediaExtractor( |
| | zim_path, |
| | encoding='utf-8', |
| | logger=setup_logger() |
| | ) |
| |
|
| | articles, _ = extractor.get_database( |
| | relation_recursion=relation_recursion, |
| | n_trials=n_trials, |
| | from_cnt=0 |
| | ) |
| |
|
| | hf_ds = datasets.Dataset.from_list(articles) |
| | return hf_ds |
| |
|
| |
|
| | |
| | |
| | |
| | if __name__ == '__main__': |
| | """ |
| | Script entry point. |
| | |
| | Prompts for user inputs, runs extraction + segmentation, saves the dataset to disk, |
| | and reloads it at the end. |
| | |
| | Inputs: |
| | - Wikipedia (zim file) path |
| | - Wikipedia identifier (e.g., B000) |
| | |
| | Side effects: |
| | - Creates `./wikipedia-es-<identifier>` containing the saved dataset. |
| | - Reloads the dataset from disk into the `dataset` variable. |
| | """ |
| | |
| | z_path = input("Wikipedia (zim file) path: ") |
| | identifier = input("Wikipedia (Wikipedia identifier, e.g. B000): ") |
| |
|
| | |
| | path_to_disk = rf'./wikipedia-es-{identifier}' |
| | |
| | hf_pre_dataset = extract(z_path) |
| | |
| | segmentation_dataset = wiki_to_seg(hf_pre_dataset, 50) |
| | |
| | segmentation_dataset.save_to_disk(path_to_disk) |
| | |
| | dataset = datasets.load_from_disk(path_to_disk) |
| |
|
| | |
| | |
| | |
| |
|