File size: 4,543 Bytes
8b1a711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5beb7db
8b1a711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8110ce0
8b1a711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# coding=utf-8
import json
import os

import datasets

from PIL import Image
import numpy as np

logger = datasets.logging.get_logger(__name__)


def load_image(image_path):
    image = Image.open(image_path).convert("RGB")
    w, h = image.size
    return image, (w, h)

def normalize_bbox(bbox, size):
    return [
        int(1000 * bbox[0] / size[0]),
        int(1000 * bbox[1] / size[1]),
        int(1000 * bbox[2] / size[0]),
        int(1000 * bbox[3] / size[1]),
    ]

class TTFormLMMConfig(datasets.BuilderConfig):
    """BuilderConfig for TTForm"""

    def __init__(self, **kwargs):
        """BuilderConfig for TTForm.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(TTFormLMMConfig, self).__init__(**kwargs)

class TTFormLMM(datasets.GeneratorBasedBuilder):
    """TTForm dataset."""

    BUILDER_CONFIGS = [
        TTFormLMMConfig(name="ttform", version=datasets.Version("1.0.0"), description="TTFormLMM dataset"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "words": datasets.Sequence(datasets.Value("string")),
                    "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=["O", "B-swift_code", "I-swift_code", "B-swift_code_ans", "I-swift_code_ans", "B-clearing_code", "I-clearing_code", "B-clearing_code_ans", "I-clearing_code_ans", "B-bank_name", "I-bank_name", "B-bank_name_ans", "I-bank_name_ans", "B-address_line_1", "I-address_line_1", "B-address_line_1_ans", "I-address_line_1_ans", "B-address_line_2", "I-address_line_2","B-address_line_3", "I-address_line_3", "B-country", "I-country", "B-country_ans", "I-country_ans","B-account", "I-account", "B-account_ans", "I-account_ans", "B-name", "I-name","B-name_ans", "I-name_ans"]
                        )
                    ),
                    "image_path": datasets.Value("string"),
                }
            ),
            supervised_keys=None
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        downloaded_file = dl_manager.download_and_extract("https://drive.google.com/uc?export=download&id=1EdRtnBjDVnaCGQ5P31Mwp1uQTwg_A7_L")
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
            ),
        ]

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        ann_dir = os.path.join(filepath, "annotations")
        img_dir = os.path.join(filepath, "images")
        for guid, file in enumerate(sorted(os.listdir(ann_dir))):
            words = []
            bboxes = []
            ner_tags = []
            file_path = os.path.join(ann_dir, file)
            with open(file_path, "r", encoding="utf8") as f:
                data = json.load(f)
            image_path = os.path.join(img_dir, file)
            image_path = image_path.replace("json", "png")
            image, size = load_image(image_path)
            for item in data["form"]:
                words_example, label = item["words"], item["label"]
                words_example = [w for w in words_example if w["text"].strip() != ""]
                if len(words_example) == 0:
                    continue
                if label == "other":
                    for w in words_example:
                        words.append(w["text"])
                        ner_tags.append("O")
                        bboxes.append(normalize_bbox(w["box"], size))
                else:
                    words.append(words_example[0]["text"])
                    ner_tags.append("B-" + label.upper())
                    bboxes.append(normalize_bbox(words_example[0]["box"], size))
                    for w in words_example[1:]:
                        words.append(w["text"])
                        ner_tags.append("I-" + label.upper())
                        bboxes.append(normalize_bbox(w["box"], size))
            yield guid, {"id": str(guid), "words": words, "bboxes": bboxes, "ner_tags": ner_tags, "image_path": image_path}