import os from pathlib import Path import datasets from PIL import Image import pandas as pd import json logger = datasets.logging.get_logger(__name__) def load_image(image_path): image = Image.open(image_path).convert("RGB") w, h = image.size return image, (w, h) def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(100 * bbox[3] / size[1]), ] def _get_drive_url(url): base_url = 'https://drive.google.com/uc?id=' split_url = url.split("/") return base_url + split_url[5] _URLS = [ _get_drive_url("https://drive.google.com/file/d/1KdDBmGP96lFc7jv2Bf4eqrO121ST-TCh/"), ] _CITATION = """\ @article{liharding-nguyen, title={CVDS: A Dataset for CV Form Understanding}, author={MISA - employees}, year={2022}, } """ _DESCRIPTION = """\ Dataset for key information extraction with cv form understanding """ class DatasetConfig(datasets.BuilderConfig): """BuilderConfig for CV Dataset""" def __init__(self, **kwargs): """BuilderConfig for CV Dataset. Args: **kwargs: keyword arguments forwarded to super. """ super(DatasetConfig, self).__init__(**kwargs) class CVDS(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ DatasetConfig(name="CVDS", version=datasets.Version("1.0.0"), description="CV Dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "words": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=['person_name', 'dob_key', 'dob_value', 'gender_key', 'gender_value', 'phonenumber_key', 'phonenumber_value', 'email_key', 'email_value', 'address_key', 'address_value', 'socical_address_value', 'education', 'education_name', 'education_time', 'experience', 'experience_name', 'experience_time', 'information', 'undefined'] ) ), "image_path": datasets.Value("string"), } ), supervised_keys=None, citation=_CITATION, homepage="" ) def _split_generators(self, dl_manager): download_file = dl_manager.download_and_extract(_URLS) dest = Path(download_file[0])/"data1" return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": dest/"train.txt", "dest": dest } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": dest/"test.txt", "dest": dest} ) ] def _generate_examples(self, file_path, dest): df = pd.read_csv(dest/"class_list.txt", delimiter="\s", header=None) id2label = dict(zip(df[0].tolist(), df[1].tolist())) logger.info("⏳ Generating examples from = %s", file_path) item_list = [] with open(file_path, "r", encoding="utf8") as f: for line in f: item_list.append(line.rstrip('\n\r')) for guid, fname in enumerate(item_list): data = json.loads(fname) image_path = dest/data['file_name'] image, size = load_image(image_path) bboxes = [[i["box"][6], i["box"][7], i["box"][2]. i["box"][3]] for i in data["annotations"]] word = [i['text'] for i in data["annotations"]] label = [id2label[i["label"]] for i in data["annotations"]] bboxes = [normalize_bbox(box, size) for box in bboxes] flag=0 for i in bboxes: for j in i: if j > 1000: flag+=1 pass if flag > 0: print(image_path) yield guid, {"id": str(guid), "words": word, "bboxes": bboxes, "ner_tags": label, "image_path": image_path}