Malisha commited on
Commit
6aee377
·
1 Parent(s): e6d8d61

Upload ttform-layoutlmv3.py

Browse files
Files changed (1) hide show
  1. ttform-layoutlmv3.py +123 -0
ttform-layoutlmv3.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ import json
4
+ import os
5
+
6
+ from PIL import Image
7
+
8
+ import datasets
9
+
10
+ def load_image(image_path):
11
+ image = Image.open(image_path).convert("RGB")
12
+ w, h = image.size
13
+ return image, (w, h)
14
+
15
+ def normalize_bbox(bbox, size):
16
+ return [
17
+ int(1000 * bbox[0] / size[0]),
18
+ int(1000 * bbox[1] / size[1]),
19
+ int(1000 * bbox[2] / size[0]),
20
+ int(1000 * bbox[3] / size[1]),
21
+ ]
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+
26
+
27
+ class TTFormConfig(datasets.BuilderConfig):
28
+ """BuilderConfig for FUNSD"""
29
+
30
+ def __init__(self, **kwargs):
31
+ """BuilderConfig for TTForm.
32
+
33
+ Args:
34
+ **kwargs: keyword arguments forwarded to super.
35
+ """
36
+ super(TTFormConfig, self).__init__(**kwargs)
37
+
38
+
39
+ class TTForm(datasets.GeneratorBasedBuilder):
40
+ """Conll2003 dataset."""
41
+
42
+ BUILDER_CONFIGS = [
43
+ TTFormConfig(name="ttform", version=datasets.Version("1.0.0"), description="TTForm dataset"),
44
+ ]
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ features=datasets.Features(
49
+ {
50
+ "id": datasets.Value("string"),
51
+ "tokens": datasets.Sequence(datasets.Value("string")),
52
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
53
+ "ner_tags": datasets.Sequence(
54
+ datasets.features.ClassLabel(
55
+ names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"]
56
+ )
57
+ ),
58
+ "image": datasets.features.Image(),
59
+ }
60
+ ),
61
+ supervised_keys=None,
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ """Returns SplitGenerators."""
66
+ downloaded_file = dl_manager.download_and_extract("https://drive.google.com/file/d/18ytJQIAE4wFtE5fDhnlFW5zcRWI_tJjR/view?usp=sharing")
67
+ return [
68
+ datasets.SplitGenerator(
69
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
70
+ ),
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
73
+ ),
74
+ ]
75
+
76
+ def get_line_bbox(self, bboxs):
77
+ x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)]
78
+ y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)]
79
+
80
+ x0, y0, x1, y1 = min(x), min(y), max(x), max(y)
81
+
82
+ assert x1 >= x0 and y1 >= y0
83
+ bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))]
84
+ return bbox
85
+
86
+ def _generate_examples(self, filepath):
87
+ logger.info("⏳ Generating examples from = %s", filepath)
88
+ ann_dir = os.path.join(filepath, "annotations")
89
+ img_dir = os.path.join(filepath, "images")
90
+ for guid, file in enumerate(sorted(os.listdir(ann_dir))):
91
+ tokens = []
92
+ bboxes = []
93
+ ner_tags = []
94
+
95
+ file_path = os.path.join(ann_dir, file)
96
+ with open(file_path, "r", encoding="utf8") as f:
97
+ data = json.load(f)
98
+ image_path = os.path.join(img_dir, file)
99
+ image_path = image_path.replace("json", "png")
100
+ image, size = load_image(image_path)
101
+ for item in data["form"]:
102
+ cur_line_bboxes = []
103
+ words, label = item["words"], item["label"]
104
+ words = [w for w in words if w["text"].strip() != ""]
105
+ if len(words) == 0:
106
+ continue
107
+ if label == "other":
108
+ for w in words:
109
+ tokens.append(w["text"])
110
+ ner_tags.append("O")
111
+ cur_line_bboxes.append(normalize_bbox(w["box"], size))
112
+ else:
113
+ tokens.append(words[0]["text"])
114
+ ner_tags.append("B-" + label.upper())
115
+ cur_line_bboxes.append(normalize_bbox(words[0]["box"], size))
116
+ for w in words[1:]:
117
+ tokens.append(w["text"])
118
+ ner_tags.append("I-" + label.upper())
119
+ cur_line_bboxes.append(normalize_bbox(w["box"], size))
120
+ cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
121
+ bboxes.extend(cur_line_bboxes)
122
+ yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
123
+ "image": image}