| import json |
| import os |
| from pathlib import Path |
| import pandas as pd |
|
|
| import datasets |
| from PIL import Image |
| import numpy as np |
|
|
| from transformers import AutoTokenizer |
|
|
| logger = datasets.logging.get_logger(__name__) |
| _CITATION = """\ |
| @article{LayoutXLM for CV extractions, |
| title={LayoutXLM for Key Information Extraction}, |
| author={Liharding Nguyen}, |
| year={2023}, |
| } |
| """ |
| _DESCRIPTION = """\ |
| CV is a collection of receipts. It contains, for each photo about cv personal, a list of OCRs - with the bounding box, text, and class. The goal is to benchmark "key information extraction" - extracting key information from documents |
| """ |
|
|
| def load_image(image_path, size=None): |
| image = Image.open(image_path).convert("RGB") |
| w, h = image.size |
|
|
| if size is not None: |
| image = image.resize((size, size)) |
| image = np.asarray(image) |
| image = image[:, :, ::-1] |
| image = image.transpose(2, 0, 1) |
|
|
| return image, (w, h) |
|
|
| def normalize_bbox(bbox, size): |
| return [ |
| int(1000 * bbox[0] / size[0]), |
| int(1000 * bbox[1] / size[1]), |
| int(1000 * bbox[2] / size[0]), |
| int(1000 * bbox[3] / size[1]), |
| ] |
|
|
|
|
| def simplify_bbox(bbox): |
| return [ |
| min(bbox[0::2]), |
| min(bbox[1::2]), |
| max(bbox[2::2]), |
| max(bbox[3::2]), |
| ] |
|
|
|
|
| def merge_bbox(bbox_list): |
| x0, y0, x1, y1 = list(zip(*bbox_list)) |
| return [min(x0), min(y0), max(x1), max(y1)] |
|
|
|
|
| def _get_drive_url(url): |
| base_url = 'https://drive.google.com/uc?id=' |
| split_url = url.split('/') |
| |
| return base_url + split_url[5] |
|
|
|
|
| _URLS = [ |
| |
| _get_drive_url("https://drive.google.com/file/d/1DINFtwirA4vZFWCMYrYM1dqJbZSVA3z9/"), |
| _get_drive_url("https://drive.google.com/file/d/11SRDeRKUr8XacB7tauiGjkw1PXDGFKUx/"), |
| _get_drive_url("https://drive.google.com/file/d/1KdDBmGP96lFc7jv2Bf4eqrO121ST-TCh/"), |
| ] |
|
|
|
|
| class DatasetConfig(datasets.BuilderConfig): |
| """BuilderConfig for WildReceipt Dataset""" |
| def __init__(self, **kwargs): |
| """BuilderConfig for WildReceipt Dataset. |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(DatasetConfig, self).__init__(**kwargs) |
|
|
|
|
|
|
| class XFUN_CV(datasets.GeneratorBasedBuilder): |
| |
| BUILDER_CONFIGS = [ |
| DatasetConfig(name="CV Extractions", version=datasets.Version("1.0.0"), description="CV dataset"), |
| ] |
| |
| tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")\ |
| |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| features=datasets.Features({ |
| "id": datasets.Value("string"), |
| "input_ids": datasets.Sequence(datasets.Value("int64")), |
| "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), |
| "labels": datasets.Sequence( |
| datasets.ClassLabel( |
| names=['person_name', 'dob_key', 'dob_value', 'gender_key', 'gender_value', 'phonenumber_key', 'phonenumber_value', 'email_key', 'email_value', 'address_key', 'address_value', 'socical_address_value', 'education', 'education_name', 'education_time', 'experience', 'experience_name', 'experience_time', 'information', 'Education_detail', 'Experience_detail', 'Personal_skill', 'Personal_skill_detail', 'Certificate', 'Certificate_name', 'Certificate_detail', 'Certificate_time'] |
| ) |
| ), |
| "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), |
| "original_image": datasets.features.Image() |
| }), |
| supervised_keys=None, |
| citation=_CITATION, |
| homepage="") |
| |
| def _split_generators(self, dl_manager): |
| downloaded_file = dl_manager.download_and_extract(_URLS) |
| dest = Path(downloaded_file[0])/'xfund' |
| |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, gen_kwargs={"filepath": dest/"train.txt", "dest": dest, "dataset_type": "train"} |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"test.txt", "dest": dest, "dataset_type": "test"} |
| ), |
| ] |
| |
| def _generate_examples(self, filepath, dest, dataset_type): |
| |
| |
|
|
|
|
| logger.info("⏳ Generating examples from = %s", filepath) |
| |
| data = [] |
| with open(filepath, 'r') as f: |
| for line in f: |
| data.append(line.rstrip('\n\r')) |
|
|
| for guid, line in enumerate(data): |
| docs = json.loads(line) |
| image_path = dest/docs['file_name'] |
| image, size = load_image(image_path, size=224) |
| original_image, _ = load_image(image_path) |
| document = docs["annotations"] |
| tokenized_doc = {"input_ids": [], "bbox": [], "labels": []} |
|
|
| for annotation in document: |
| tokenized_inputs = self.tokenizer( |
| annotation["text"], |
| add_special_tokens=False, |
| return_offsets_mapping=True, |
| return_attention_mask=False, |
| ) |
|
|
| text_length = 0 |
| ocr_length = 0 |
| bbox = [] |
|
|
| for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]): |
| if token_id == 6: |
| bbox.append(None) |
| continue |
|
|
| text_length += offset[1] - offset[0] |
| tmp_box = [] |
| |
| while ocr_length < text_length: |
| ocr_word = annotation["text"] |
| ocr_length += len( |
| self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word.strip()) |
| ) |
| tmp_box.append(simplify_bbox(annotation["box"])) |
| |
| if len(tmp_box) == 0: |
| tmp_box = last_box |
|
|
| bbox.append(normalize_bbox(merge_bbox(tmp_box), size)) |
| last_box = tmp_box |
| |
| bbox = [ |
| [bbox[i + 1][0], bbox[i + 1][1], bbox[i + 1][0], bbox[i + 1][1]] if b is None else b |
| for i, b in enumerate(bbox) |
| ] |
|
|
| label = [annotation["label"]] * len(bbox) |
| tokenized_inputs.update({"bbox": bbox, "labels": label}) |
|
|
| for i in tokenized_doc: |
| tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i] |
| |
| chunk_size = 512 |
| for chunk_id, index in enumerate(range(0, len(tokenized_doc["input_ids"]), chunk_size)): |
| item = {} |
| for k in tokenized_doc: |
| item[k] = tokenized_doc[k][index : index + chunk_size] |
|
|
| item.update({ |
| "id": str(guid), |
| "image": image, |
| "original_image": original_image, |
| }) |
|
|
| yield f"{dataset_type}_{guid}_{chunk_id}", item |