| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """TODO: Add a description here.""" |
|
|
| import csv |
| import os |
| import itertools |
| import numpy as np |
| from PIL import Image |
| from transformers import AutoTokenizer |
| import datasets |
|
|
| |
| |
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {A great new dataset}, |
| author={huggingface, Inc. |
| }, |
| year={2020} |
| } |
| """ |
|
|
| |
| |
| _DESCRIPTION = """\ |
| This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
| """ |
|
|
| |
| _HOMEPAGE = "" |
|
|
| |
| _LICENSE = "" |
|
|
| |
| |
| |
| _URLS = { |
| "sample": "http://hyperion.bbirke.de/data/docbank/sample.zip", |
| "full": "", |
| } |
|
|
| _FEATURES = datasets.Features( |
| { |
| "id": datasets.Value("string"), |
| "input_ids": datasets.Sequence(datasets.Value("int64")), |
| "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
| "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
| "fonts": datasets.Sequence(datasets.Value("string")), |
| "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), |
| "original_image": datasets.features.Image(), |
| "labels": datasets.Sequence(datasets.features.ClassLabel( |
| names=['abstract', 'author', 'caption', 'date', 'equation', 'figure', 'footer', 'list', 'paragraph', |
| 'reference', 'section', 'table', 'title'] |
| )) |
| |
| } |
| ) |
|
|
| _DEFUNCT_FILE_IDS = [ |
| '126.tar_1706.03360.gz_dispersion_v2_7', '119.tar_1606.07466.gz_20160819Draft_8', |
| '167.tar_1412.4821.gz_IDM_TD_Paper_16', '17.tar_1701.07437.gz_muon-beam-dump_final_2', |
| '31.tar_1702.04307.gz_held-karp_21', '7.tar_1401.4493.gz_ReversibleNoise_2' |
| ] |
|
|
|
|
| def load_image(image_path, size=None): |
| image = Image.open(image_path).convert("RGB") |
| w, h = image.size |
| if size is not None: |
| |
| image = image.resize((size, size)) |
| image = np.asarray(image) |
| image = image[:, :, ::-1] |
| image = image.transpose(2, 0, 1) |
| return image, (w, h) |
|
|
|
|
| def normalize_bbox(bbox, size): |
| return [ |
| int(1000 * int(bbox[0]) / size[0]), |
| int(1000 * int(bbox[1]) / size[1]), |
| int(1000 * int(bbox[2]) / size[0]), |
| int(1000 * int(bbox[3]) / size[1]), |
| ] |
|
|
|
|
| def simplify_bbox(bbox): |
| return [ |
| min(bbox[0::2]), |
| min(bbox[1::2]), |
| max(bbox[2::2]), |
| max(bbox[3::2]), |
| ] |
|
|
|
|
| def merge_bbox(bbox_list): |
| x0, y0, x1, y1 = list(zip(*bbox_list)) |
| return [min(x0), min(y0), max(x1), max(y1)] |
|
|
|
|
| |
| class Docbank(datasets.GeneratorBasedBuilder): |
| """TODO: Short description of my dataset.""" |
|
|
| CHUNK_SIZE = 512 |
| VERSION = datasets.Version("1.0.0") |
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig(name="sample", version=VERSION, |
| description="This part of my dataset covers a first domain"), |
| datasets.BuilderConfig(name="full", version=VERSION, |
| description="This part of my dataset covers a second domain"), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "small" |
| TOKENIZER = AutoTokenizer.from_pretrained("xlm-roberta-base") |
|
|
| def _info(self): |
| |
|
|
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=_FEATURES, |
| |
| |
| |
| |
| homepage=_HOMEPAGE, |
| |
| license=_LICENSE, |
| |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| |
| |
|
|
| |
| |
| |
| urls = _URLS[self.config.name] |
| data_dir = dl_manager.download_and_extract(urls) |
| with open(os.path.join(data_dir, "train.csv")) as f: |
| files_train = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), |
| 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in |
| csv.DictReader(f, skipinitialspace=True)] |
| with open(os.path.join(data_dir, "test.csv")) as f: |
| files_test = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), |
| 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in |
| csv.DictReader(f, skipinitialspace=True)] |
| with open(os.path.join(data_dir, "validation.csv")) as f: |
| files_validation = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']), |
| 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in |
| csv.DictReader(f, skipinitialspace=True)] |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "filepath": files_train, |
| "split": "train", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| |
| gen_kwargs={ |
| "filepath": files_validation, |
| "split": "validation", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={ |
| "filepath": files_test, |
| "split": "test" |
| }, |
| ), |
| ] |
|
|
| |
| def _generate_examples(self, filepath, split): |
| |
| |
| |
| key = 0 |
| for f in filepath: |
| |
| f_id = f['id'] |
| f_fp_txt = f['filepath_txt'] |
| f_fp_img = f['filepath_img'] |
| tokens = [] |
| bboxes = [] |
| rgbs = [] |
| fonts = [] |
| labels = [] |
|
|
| image, size = load_image(f_fp_img, size=224) |
| original_image, _ = load_image(f_fp_img) |
|
|
| try: |
| with open(f_fp_txt, newline='', encoding='utf-8') as csvfile: |
| reader = csv.reader(csvfile, delimiter='\t', quotechar=' ') |
| for row in reader: |
| |
| tokenized_input = self.TOKENIZER( |
| row[0], |
| add_special_tokens=False, |
| return_offsets_mapping=False, |
| return_attention_mask=False, |
| ) |
| tokens.append(tokenized_input['input_ids'][0] if len(tokenized_input['input_ids']) == 1 else self.TOKENIZER.unk_token_id) |
| bboxes.append(normalize_bbox(row[1:5], size)) |
| rgbs.append(row[5:8]) |
| fonts.append(row[8]) |
| labels.append(row[9]) |
| except: |
| continue |
|
|
| for chunk_id, index in enumerate(range(0, len(tokens), self.CHUNK_SIZE)): |
|
|
| split_tokens = tokens[index:index + self.CHUNK_SIZE] |
| split_bboxes = bboxes[index:index + self.CHUNK_SIZE] |
| split_rgbs = rgbs[index:index + self.CHUNK_SIZE] |
| split_fonts = fonts[index:index + self.CHUNK_SIZE] |
| split_labels = labels[index:index + self.CHUNK_SIZE] |
|
|
| if len(split_tokens) > self.CHUNK_SIZE: |
| print('Err') |
| print(key) |
| print(f_id) |
| print(split_tokens) |
|
|
|
|
| yield key, { |
| "id": f"{f_id}_{chunk_id}", |
| 'input_ids': split_tokens, |
| "bboxes": split_bboxes, |
| "RGBs": split_rgbs, |
| "fonts": split_fonts, |
| "image": image, |
| "original_image": original_image, |
| "labels": split_labels |
| } |
| key += 1 |
|
|
|
|
|
|