| import os |
| import json |
| import tarfile |
| import datasets |
| from collections import defaultdict |
|
|
| _DESCRIPTION = """\ |
| Dataset for extracting notations from chess scoresheets, integrating both image and text data. |
| """ |
|
|
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {A great new dataset}, |
| author={huggingface, Inc.}, |
| year={2024} |
| } |
| """ |
|
|
| _LICENSE = "Creative Commons Attribution 3.0" |
|
|
| class ChessImageTextDataset(datasets.GeneratorBasedBuilder): |
| """Dataset for linking chess scoresheet images with multiple ground truth texts.""" |
| |
| def _info(self): |
| |
| features = datasets.Features( |
| { |
| "image": datasets.Image(), |
| "text": datasets.Value("string"), |
| } |
| ) |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage={ |
| "text_dataset_homepage": "https://huggingface.co/datasets/Chesscorner/jsonl-chess-dataset", |
| "image_dataset_homepage": "https://huggingface.co/datasets/Chesscorner/chess-images" |
| }, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Define the splits of the dataset.""" |
| image_dataset_url = "https://huggingface.co/datasets/Chesscorner/chess-images/resolve/main/flat_images.tar.gz" |
| extracted_image_path = dl_manager.download(image_dataset_url) |
|
|
| text_dataset_url = "https://huggingface.co/datasets/Chesscorner/jsonl-chess-dataset/resolve/main/train.jsonl/train.jsonl" |
| text_filepath = dl_manager.download(text_dataset_url) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "image_tar_path": extracted_image_path, |
| "text_filepath": text_filepath, |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, image_tar_path, text_filepath): |
| """Generate examples by linking images with multiple related texts and clean up the text data.""" |
| idx = 0 |
| |
| image_mapping = self._extract_images_from_tar(image_tar_path) |
|
|
| |
| grouped_texts = defaultdict(list) |
|
|
| |
| with open(text_filepath, encoding="utf-8") as fp: |
| for line in fp: |
| obj = json.loads(line) |
| text = obj["text"] |
| |
| |
| text_id = text[:5] |
| |
| |
| grouped_texts[text_id].append(text) |
|
|
| |
| for text_id, texts in grouped_texts.items(): |
| image_file = image_mapping.get(f"{text_id}.png") |
| |
| |
| if image_file: |
| |
| cleaned_texts = [self._extract_chess_notation(text) for text in texts] |
| |
| |
| numbered_text = self._add_numeration(cleaned_texts) |
| |
| yield idx, { |
| "image": image_file, |
| "text": numbered_text, |
| } |
| idx += 1 |
| else: |
| print(f"Image not found for ID: {text_id}") |
|
|
| def _extract_images_from_tar(self, tar_path): |
| """Extracts the images from the tar.gz archive and returns a mapping of image filenames to file paths.""" |
| image_mapping = {} |
| extraction_directory = "images_extracted" |
| os.makedirs(extraction_directory, exist_ok=True) |
|
|
| |
| with tarfile.open(tar_path, "r:gz") as tar: |
| for member in tar.getmembers(): |
| if member.isfile(): |
| image_filename = os.path.basename(member.name) |
| extracted_image_path = os.path.join(extraction_directory, image_filename) |
|
|
| |
| with tar.extractfile(member) as extracted_file: |
| with open(extracted_image_path, "wb") as out_file: |
| out_file.write(extracted_file.read()) |
|
|
| |
| image_mapping[image_filename] = extracted_image_path |
| |
| return image_mapping |
|
|
| def _extract_chess_notation(self, text): |
| """Extracts the chess notation from the full text string.""" |
| |
| notation = text.split(" ", 1)[-1] |
| return notation.strip() |
|
|
| def _add_numeration(self, notations): |
| """Adds numeration to chess notations, pairing moves and numbering them.""" |
| numbered_text = [] |
| counter = 1 |
|
|
| |
| for i in range(0, len(notations), 2): |
| |
| move_pair = notations[i:i+2] |
| numbered_move = f"{counter}. " + " ".join(move_pair) |
| numbered_text.append(numbered_move) |
| counter += 1 |
|
|
| |
| return " ".join(numbered_text) |
|
|