| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """BookCoref is a datasets for coreference resolution, with a manually annotated test set and an automatically generated training set.""" |
| |
|
| | import contextlib |
| | import io |
| | import json |
| | import logging |
| | import sys |
| | from collections import defaultdict |
| | from multiprocessing import Pool, cpu_count |
| | from pathlib import Path |
| | from typing import Generator, Iterable, Literal, Mapping, Union |
| |
|
| | import datasets |
| | import nltk |
| | import spacy |
| | import spacy.cli |
| | from datasets.download import DownloadManager, StreamingDownloadManager |
| | from deepdiff import Delta |
| | from deepdiff.serialization import json_loads |
| | from nltk.collections import chain |
| | from nltk.tokenize import sent_tokenize |
| | from tqdm import tqdm |
| |
|
| | |
| | _CITATION = """\ |
| | @inproceedings{martinelli-etal-2025-bookcoref, |
| | title = "{BOOKCOREF}: Coreference Resolution at Book Scale", |
| | author = "Martinelli, Giuliano and |
| | Bonomo, Tommaso and |
| | Huguet Cabot, Pere-Llu{\'i}s and |
| | Navigli, Roberto", |
| | editor = "Che, Wanxiang and |
| | Nabende, Joyce and |
| | Shutova, Ekaterina and |
| | Pilehvar, Mohammad Taher", |
| | booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", |
| | month = jul, |
| | year = "2025", |
| | address = "Vienna, Austria", |
| | publisher = "Association for Computational Linguistics", |
| | url = "https://aclanthology.org/2025.acl-long.1197/", |
| | pages = "24526--24544", |
| | ISBN = "979-8-89176-251-0", |
| | } |
| | """ |
| |
|
| | _DESCRIPTION = """\ |
| | BookCoref is a large-scale dataset for coreference resolution, with a manually annotated test set and an automatically generated training set. |
| | """ |
| |
|
| | _HOMEPAGE = "" |
| |
|
| | _LICENSE = "CC BY-NC-SA 4.0 License" |
| |
|
| | _LOCAL_DATA_DIR = Path("bookcoref_annotations") |
| |
|
| | _LOCAL_FILES = { |
| | "full": [ |
| | _LOCAL_DATA_DIR / "full" / "train.jsonl", |
| | _LOCAL_DATA_DIR / "full" / "validation.jsonl", |
| | _LOCAL_DATA_DIR / "full" / "test.jsonl", |
| | ], |
| | "split": [ |
| | _LOCAL_DATA_DIR / "split" / "train_split.jsonl", |
| | _LOCAL_DATA_DIR / "split" / "validation_split.jsonl", |
| | _LOCAL_DATA_DIR / "split" / "test_split.jsonl", |
| | ], |
| | "delta": [ |
| | _LOCAL_DATA_DIR / "gutenberg_delta.json", |
| | ], |
| | "lengths": [ |
| | _LOCAL_DATA_DIR / "gutenberg_lengths.json", |
| | ], |
| | } |
| |
|
| |
|
| | class BookCoref(datasets.GeneratorBasedBuilder): |
| | """BookCoref is a datasets for coreference resolution, with a manually annotated test set and an automatically generated training set.""" |
| |
|
| | VERSION = datasets.Version("1.1.0") |
| |
|
| | BUILDER_CONFIGS = [ |
| | datasets.BuilderConfig(name="full", version=VERSION, description="Full BookCoref dataset"), |
| | datasets.BuilderConfig( |
| | name="split", |
| | version=VERSION, |
| | description="BookCoref with books split into windows of 1500 tokens", |
| | ), |
| | ] |
| |
|
| | LOCAL_DATA_DIR = Path("bookcoref_annotations") |
| |
|
| | DEFAULT_CONFIG_NAME = "full" |
| |
|
| | def __init__(self, *args, **kwargs): |
| | super().__init__(*args, **kwargs) |
| | |
| | self.logger = self._setup_logger() |
| | self.logger.info("Initializing BOOKCOREF dataset.") |
| | |
| | nltk.download("punkt", quiet=True) |
| | nltk.download("punkt_tab", quiet=True) |
| | |
| | try: |
| | self.nlp = spacy.load("en_core_web_sm") |
| | except OSError: |
| | self.logger.info("Downloading spaCy model...") |
| | with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(io.StringIO()): |
| | spacy.cli.download("en_core_web_sm-3.8.0", direct=True) |
| | self.nlp = spacy.load("en_core_web_sm") |
| | self.logger.info("spaCy model loaded.") |
| |
|
| | def _info(self): |
| | features = datasets.Features( |
| | { |
| | "doc_key": datasets.Value("string"), |
| | "gutenberg_key": datasets.Value("string"), |
| | "sentences": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), |
| | "clusters": datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value("int64")))), |
| | "characters": [ |
| | { |
| | "name": datasets.Value("string"), |
| | "mentions": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
| | } |
| | ], |
| | } |
| | ) |
| |
|
| | return datasets.DatasetInfo( |
| | |
| | description=_DESCRIPTION, |
| | |
| | features=features, |
| | |
| | homepage=_HOMEPAGE, |
| | |
| | license=_LICENSE, |
| | |
| | citation=_CITATION, |
| | ) |
| |
|
| | def _setup_logger(self): |
| | """ |
| | Set up a custom logger for the BookCoref class. |
| | |
| | Returns: |
| | logging.Logger: Configured logger instance |
| | """ |
| | |
| | logger = logging.getLogger("bookcoref") |
| | logger.setLevel(logging.INFO) |
| |
|
| | |
| | if logger.handlers: |
| | logger.handlers.clear() |
| |
|
| | |
| | handler = logging.StreamHandler(sys.stdout) |
| | formatter = logging.Formatter( |
| | "[%(asctime)s][bookcoref][%(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S" |
| | ) |
| | handler.setFormatter(formatter) |
| |
|
| | |
| | logger.addHandler(handler) |
| |
|
| | |
| | logger.propagate = False |
| |
|
| | return logger |
| |
|
| | def _load_local_data(self, splits_paths: list[str]) -> dict[Literal["train", "validation", "test"], dict]: |
| | data = {} |
| | for filepath in splits_paths: |
| | split_name = Path(filepath).stem.split("_")[0] |
| | with open(filepath, "r") as f: |
| | samples = [json.loads(line) for line in f] |
| | data[split_name] = samples |
| | return data |
| |
|
| | def _load_local_delta(self, delta_path: str) -> Delta: |
| | delta = Delta(delta_path=delta_path, deserializer=json_loads) |
| | return delta |
| |
|
| | def _load_local_lengths(self, lengths_path: str) -> dict[str, list[int]]: |
| | with open(lengths_path, "r") as f: |
| | lengths = json.load(f) |
| | return lengths |
| |
|
| | def _download_gutenberg_data( |
| | self, |
| | gutenberg_keys: list[str], |
| | dl_manager: Union[DownloadManager, StreamingDownloadManager], |
| | ) -> dict[str, str]: |
| | special_gutenberg_urls = { |
| | "28240": "https://web.archive.org/web/20240320095627/https://gutenberg.org/ebooks/28240.txt.utf-8" |
| | } |
| | gutenberg_urls = { |
| | key: ( |
| | f"https://web.archive.org/web/20250406105821/https://gutenberg.org/ebooks/{key}.txt.utf-8" |
| | if key not in special_gutenberg_urls |
| | else special_gutenberg_urls[key] |
| | ) |
| | for key in gutenberg_keys |
| | } |
| | gutenberg_files: dict[str, str] = dl_manager.download_and_extract(gutenberg_urls) |
| | return gutenberg_files |
| |
|
| | def _tokenize_book(self, key_and_path: tuple[str, str]) -> tuple[str, list[list[str]]]: |
| | key, path = key_and_path |
| | book = Path(path).read_text(encoding="utf-8") |
| | sentences = [] |
| | off = 0 |
| | s = sent_tokenize(book) |
| | for sent, sentence in zip(self.nlp.pipe(s, n_process=1), s): |
| | sentences.append([tok.text for tok in sent]) |
| | off += len(sentence) + 1 |
| | return (key, sentences) |
| |
|
| | def _cut_into_split( |
| | self, complete_sentences: dict[str, list], split_keys: Mapping[str, Iterable[str]] |
| | ) -> dict[str, list[list[str]]]: |
| | """Cut sentences into chunks of length `length`. |
| | |
| | Args: |
| | complete_sentences: A dictionary of document keys and their corresponding sentences. |
| | split_keys: A dictionary of split names (train, validation, test) and their corresponding gutenberg keys. |
| | length: The maximum length of each chunk. |
| | |
| | Returns: |
| | A dictionary of document keys suffixed with "_split" and their corresponding chunks of sentences. |
| | """ |
| |
|
| | |
| | keys_to_split = {} |
| | for split_name, keys in split_keys.items(): |
| | for key in keys: |
| | keys_to_split[key] = split_name |
| |
|
| | all_split_docs = {} |
| | for doc_key, sentences in complete_sentences.items(): |
| | split_name = keys_to_split[doc_key] |
| | if split_name == "train" or split_name == "validation": |
| | length = 1350 |
| | elif split_name == "test": |
| | length = 1500 |
| | else: |
| | raise ValueError(f"Unknown split name: {split_name}") |
| |
|
| | eos_to_sent = {} |
| | tokens = [] |
| | eos = 0 |
| |
|
| | |
| | for idx, tokenized in enumerate(sentences): |
| | tokens.extend(tokenized) |
| | eos_to_sent[eos + len(tokenized)] = idx |
| | eos += len(tokenized) |
| |
|
| | |
| | tokens_indices = sorted( |
| | [ |
| | next(item for item in eos_to_sent.keys() if item > step) |
| | for step in range(length, len(tokens), length) |
| | ] |
| | ) |
| | tokens_indices.append(len(tokens)) |
| |
|
| | sent = 0 |
| | |
| | for off in tokens_indices: |
| | chunk_doc_key = doc_key + "_split1500_" + "S" + str(sent) |
| | all_split_docs[chunk_doc_key] = sentences[sent : eos_to_sent[off] + 1] |
| | |
| | sent = eos_to_sent[off] + 1 |
| |
|
| | return all_split_docs |
| |
|
| | def _split_generators(self, dl_manager: Union[DownloadManager, StreamingDownloadManager]): |
| | self.logger.info("Loading local data...") |
| | all_local_data: dict[str, list[str]] = dl_manager.download_and_extract(_LOCAL_FILES) |
| |
|
| | local_data = self._load_local_data(all_local_data[self.config.name]) |
| | gutenberg_data = defaultdict(dict) |
| | for split, samples in local_data.items(): |
| | for sample in samples: |
| | gutenberg_data[split][sample["gutenberg_key"]] = sample |
| | gutenberg_data_size = {split: len(samples) for split, samples in gutenberg_data.items()} |
| | self.logger.info( |
| | f"Loaded {gutenberg_data_size} samples from local data for `{self.config.name}` configuration." |
| | ) |
| | delta = self._load_local_delta(all_local_data["delta"][0]) |
| | lengths = self._load_local_lengths(all_local_data["lengths"][0]) |
| | gutenberg_keys = set( |
| | sample["gutenberg_key"] if self.config.name == "full" else sample["gutenberg_key"].split("_")[0] |
| | for split in local_data.values() |
| | for sample in split |
| | if sample["gutenberg_key"] != "0" and sample["gutenberg_key"].split("_")[0] != "0" |
| | ) |
| | gutenberg_files = self._download_gutenberg_data(gutenberg_keys, dl_manager) |
| |
|
| | |
| | with Pool(cpu_count()) as pool: |
| | downloaded_sentences = dict( |
| | tqdm( |
| | pool.imap(self._tokenize_book, gutenberg_files.items()), |
| | desc="Tokenizing Gutenberg books", |
| | total=len(gutenberg_files), |
| | ) |
| | ) |
| |
|
| | flattened_downloaded_sentences = { |
| | key: list(chain.from_iterable(sentences)) for key, sentences in downloaded_sentences.items() |
| | } |
| |
|
| | |
| | self.logger.info("Applying delta...") |
| | flattened_complete_sentences = flattened_downloaded_sentences + delta |
| | self.logger.info("Applied delta to downloaded sentences.") |
| |
|
| | |
| | complete_sentences = defaultdict(list) |
| | for key, sentences in flattened_complete_sentences.items(): |
| | book_lengths = lengths[key] |
| | current_pos = 0 |
| | for length in book_lengths: |
| | complete_sentences[key].append(sentences[current_pos : current_pos + length]) |
| | current_pos += length |
| |
|
| | if self.config.name == "full": |
| | |
| | for split, split_data in gutenberg_data.items(): |
| | for key, sample in split_data.items(): |
| | if "animal_farm" not in sample["doc_key"]: |
| | sample["sentences"] = complete_sentences[key] |
| | elif self.config.name == "split": |
| | |
| | |
| | split_keys: dict[str, set[str]] = { |
| | split: set(sample["gutenberg_key"].split("_")[0] for sample in split_data) |
| | for split, split_data in local_data.items() |
| | } |
| | split_complete_sentences = self._cut_into_split(complete_sentences, split_keys) |
| | |
| | for split, split_data in gutenberg_data.items(): |
| | for key, sample in split_data.items(): |
| | if "animal_farm" not in sample["doc_key"]: |
| | sample["sentences"] = split_complete_sentences[key] |
| | else: |
| | raise ValueError(f"Unknown config name: {self.config.name}") |
| |
|
| | self.logger.info("Finished downloading and pre-processing Gutenberg books.") |
| |
|
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | |
| | gen_kwargs={ |
| | "annotations": gutenberg_data["train"], |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | |
| | gen_kwargs={ |
| | "annotations": gutenberg_data["validation"], |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | |
| | gen_kwargs={ |
| | "annotations": gutenberg_data["test"], |
| | }, |
| | ), |
| | ] |
| |
|
| | |
| | def _generate_examples(self, annotations: dict[str, dict]) -> Generator[tuple[str, dict], None, None]: |
| | """Yields examples as (key, example) tuples.""" |
| | for key, sample in annotations.items(): |
| | yield (key, sample) |
| |
|