File size: 5,408 Bytes
19a4e3f
cc4710c
19a4e3f
 
 
 
 
 
 
f902922
cc4710c
19a4e3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc4710c
 
19a4e3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a57a9b7
 
2d358b0
 
19a4e3f
 
 
 
 
 
 
 
 
 
 
 
4a22524
19a4e3f
 
cc4710c
19a4e3f
 
cc4710c
19a4e3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c700483
 
19a4e3f
 
7824bd0
 
3b18fe4
19a4e3f
cc4710c
 
 
19a4e3f
cc4710c
 
 
 
 
19a4e3f
545e4ce
19a4e3f
 
cc4710c
50afa99
6a4a577
545e4ce
 
 
2834e64
545e4ce
cc4710c
 
19a4e3f
 
 
cc4710c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
# -*- coding: utf-8 -*-
"""erukaLabels.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1p0VRh0b-OtHjNNLIcNUPb2BaoiE9Mh7O
"""



# coding=utf-8
import json
import os

import datasets

from PIL import Image
import numpy as np

def load_image(image_path):
    image = Image.open(image_path).convert("RGB")
    w, h = image.size
    return image, (w, h)

def normalize_bbox(bbox, size):
    return [
        int(1000 * bbox[0] / size[0]),
        int(1000 * bbox[1] / size[1]),
        int(1000 * bbox[4] / size[0]),
        int(1000 * bbox[5] / size[1]),
    ]

logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@article{Jaume2019FUNSDAD,
  title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents},
  author={Guillaume Jaume and H. K. Ekenel and J. Thiran},
  journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)},
  year={2019},
  volume={2},
  pages={1-6}
}
"""

_DESCRIPTION = """\
https://guillaumejaume.github.io/FUNSD/
"""


class FunsdConfig(datasets.BuilderConfig):
    """BuilderConfig for FUNSD"""

    def __init__(self, **kwargs):
        """BuilderConfig for FUNSD.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(FunsdConfig, self).__init__(**kwargs)

class Funsd(datasets.GeneratorBasedBuilder):
    """Conll2003 dataset."""

    BUILDER_CONFIGS = [
        FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=["O", "R0C0", "R1C0", "R2C0", "R3C0", "R4C0", "R5C0", "R6C0", "R7C0", "R8C0", "R9C0", "R10C0",
                                   "R0C1", "R1C1", "R2C1", "R3C1", "R4C1", "R5C1", "R6C1", "R7C1", "R8C1", "R9C1", "R10C1", 
                                   "R0C2", "R1C2", "R2C2", "R3C2", "R4C2", "R5C2", "R6C2", "R7C2", "R8C2", "R9C2", "R10C2", 
                                   "VALUATIONS", "LAND", "BUILDINGS", "TOTAL"]
                        )
                    ),
                    "image": datasets.features.Image(),
                }
            ),
            supervised_keys=None,
            homepage="https://guillaumejaume.github.io/FUNSD/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        downloaded_file = dl_manager.download_and_extract("dataset_eruka2.zip")
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset_eruka/training_data/"}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset_eruka/testing_data/"}
            ),
        ]

    def get_line_bbox(self, bboxs):
        x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)]
        y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)]

        x0, y0, x1, y1 = min(x), min(y), max(x), max(y)

        assert x1 >= x0 and y1 >= y0
        bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))]
        return bbox

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        ann_dir = os.path.join(filepath, "annotations")
        img_dir = os.path.join(filepath, "images")
        for guid, file in enumerate(sorted(os.listdir(ann_dir))):
            tokens = []
            bboxes = []
            ner_tags = []
            if file == ".DS_Store":
                continue

            file_path = os.path.join(ann_dir, file)
            print(file_path)
            with open(file_path, "r", encoding="utf-8") as f:
                data = json.load(f)
            image_path = os.path.join(img_dir, file)

            # changed
            image_path = image_path.replace("json", "jpg")
            image, size = load_image(image_path)

            #new
            ddata_path = data["analyzeResult"]["pages"][0]["words"]

            for item in ddata_path:
                cur_line_bboxes = []
                words, label = [item["content"]], item["confidence"]
                if len(words) == 0:
                    continue
                tokens.append(words[0])
                print(label)
                print(item)
                if isinstance(label, float):
                    ner_tags.append("O")
                else:
                    print(label)
                    ner_tags.append(label.upper())
                cur_line_bboxes.append(normalize_bbox(item["polygon"], size))

                cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
                bboxes.extend(cur_line_bboxes)
            yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
                         "image": image}