File size: 4,519 Bytes
1dd49c3 7a27876 1dd49c3 f87d19a 1dd49c3 7a27876 1dd49c3 7a27876 1dd49c3 7a27876 1dd49c3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | import os
from pathlib import Path
import datasets
from PIL import Image
import pandas as pd
import json
logger = datasets.logging.get_logger(__name__)
def load_image(image_path):
image = Image.open(image_path).convert("RGB")
w, h = image.size
return image, (w, h)
def normalize_bbox(bbox, size):
return [
int(1000 * bbox[0] / size[0]),
int(1000 * bbox[1] / size[1]),
int(1000 * bbox[2] / size[0]),
int(1000 * bbox[3] / size[1]),
]
def _get_drive_url(url):
base_url = 'https://drive.google.com/uc?id='
split_url = url.split("/")
return base_url + split_url[5]
_URLS = [
_get_drive_url("https://drive.google.com/file/d/1KdDBmGP96lFc7jv2Bf4eqrO121ST-TCh/"),
]
_CITATION = """\
@article{liharding-nguyen,
title={CVDS: A Dataset for CV Form Understanding},
author={MISA - employees},
year={2022},
}
"""
_DESCRIPTION = """\
Dataset for key information extraction with cv form understanding
"""
class DatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for CV Dataset"""
def __init__(self, **kwargs):
"""BuilderConfig for CV Dataset.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(DatasetConfig, self).__init__(**kwargs)
class CVDS(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
DatasetConfig(name="CVDS", version=datasets.Version("1.0.0"), description="CV Dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"words": datasets.Sequence(datasets.Value("string")),
"bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=['person_name', 'dob_key', 'dob_value', 'gender_key', 'gender_value', 'phonenumber_key', 'phonenumber_value', 'email_key', 'email_value', 'address_key', 'address_value', 'socical_address_value', 'education', 'education_name', 'education_time', 'experience', 'experience_name', 'experience_time', 'information', 'undefined']
)
),
"image_path": datasets.Value("string"),
}
),
supervised_keys=None,
citation=_CITATION,
homepage=""
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
"""Uses local files located with data_dir"""
downloaded_file = dl_manager.download_and_extract(_URLS)
dest = Path(downloaded_file[0])/'data1'
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": dest/"train.txt", "dest": dest}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"test.txt", "dest": dest}
),
]
def _generate_examples(self, file_path, dest):
df = pd.read_csv(dest/"class_list.txt", delimiter="\s", header=None)
id2label = dict(zip(df[0].tolist(), df[1].tolist()))
logger.info("⏳ Generating examples from = %s", file_path)
item_list = []
with open(file_path, "r", encoding="utf8") as f:
for line in f:
item_list.append(line.rstrip('\n\r'))
for guid, fname in enumerate(item_list):
data = json.loads(fname)
image_path = dest/data['file_name']
image, size = load_image(image_path)
bboxes = [[i["box"][6], i["box"][7], i["box"][2]. i["box"][3]] for i in data["annotations"]]
word = [i['text'] for i in data["annotations"]]
label = [id2label[i["label"]] for i in data["annotations"]]
bboxes = [normalize_bbox(box, size) for box in bboxes]
flag=0
for i in bboxes:
for j in i:
if j > 1000:
flag+=1
pass
if flag > 0:
print(image_path)
yield guid, {"id": str(guid), "words": word, "bboxes": bboxes, "ner_tags": label, "image_path": image_path} |