File size: 4,077 Bytes
757fee5 3b4f8c2 757fee5 3b4f8c2 757fee5 3b4f8c2 757fee5 a258d1a 757fee5 3b4f8c2 757fee5 a43fe2d 757fee5 ce8fefa cc04e11 757fee5 2e1c132 757fee5 c04ef71 757fee5 8216743 757fee5 bc81b98 757fee5 0dfc472 cc04e11 757fee5 7fdf947 757fee5 d5cd359 757fee5 cc04e11 757fee5 cc04e11 0fc0912 cc04e11 2c5027f 0fc0912 757fee5 cc04e11 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
# coding=utf-8
import json
import os
import datasets
from PIL import Image
import numpy as np
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
title={SLR dataset},
}
"""
_DESCRIPTION = """\
#
"""
def load_image(image_path):
image = Image.open(image_path).convert("RGB")
w, h = image.size
# resize image to 224x224
image = image.resize((224, 224))
image = np.asarray(image)
image = image[:, :, ::-1] # flip color channels from RGB to BGR
image = image.transpose(2, 0, 1) # move channels to first dimension
return image, (w, h)
def normalize_bbox(bbox, size):
return [
int(1000 * bbox[0] / size[0]),
int(1000 * bbox[1] / size[1]),
int(1000 * bbox[2] / size[0]),
int(1000 * bbox[3] / size[1]),
]
class SLRConfig(datasets.BuilderConfig):
"""BuilderConfig for SLR"""
def __init__(self, **kwargs):
"""BuilderConfig for SLR.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(SLRConfig, self).__init__(**kwargs)
class SLR(datasets.GeneratorBasedBuilder):
"""SLR dataset."""
BUILDER_CONFIGS = [
SLRConfig(name="SLR", version=datasets.Version("1.0.0"), description="SLR dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"words": datasets.Sequence(datasets.Value("string")),
#"tokens": datasets.Sequence(datasets.Value("string")),
"bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=["DATEISSUED","LOANTERM","PURPOSE","PRODUCT","PROPERTY","LOANAMOUNT","INTERESTRATE","MONTHLYPR","PREPENALTY","BALLOONPAYMENT","ESTMONTHLY","ESTTAXES"]
)
),
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
"image_path": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="#",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_file = dl_manager.download_and_extract("/content/SLR/SLR/SLR.zip")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
ann_dir = os.path.join(filepath, "annotations")
img_dir = os.path.join(filepath, "images")
for guid, file in enumerate(sorted(os.listdir(ann_dir))):
words=[]
bboxes = []
ner_tags = []
file_path = os.path.join(ann_dir, file)
with open(file_path, "r", encoding="utf8") as f:
data = json.load(f)
image_path = os.path.join(img_dir, file)
image_path = image_path.replace("json", "png")
image, size = load_image(image_path)
for state in data:
for item in state['form']:
labels=item['label']
word=item['text']
ner_tags.append(labels)
words.append(word)
bboxes.append(normalize_bbox(item['box'],size))
#for item in data['annotations']:
#bbox=item['bbox']
yield guid, {"id": str(guid), "words": words , "bboxes": bboxes, "ner_tags": ner_tags, "image_path": image_path, "image": image}
|