Kunling commited on
Commit
2ee286f
·
1 Parent(s): 519e3a1

Create new file

Browse files
Files changed (1) hide show
  1. layoutlm_resume_data.py +148 -0
layoutlm_resume_data.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ import json
4
+ import os
5
+
6
+ from PIL import Image
7
+
8
+ import datasets
9
+
10
+ def load_image(image_path):
11
+ image = Image.open(image_path).convert("RGB")
12
+ w, h = image.size
13
+ return image, (w, h)
14
+
15
+ def normalize_bbox(bbox, size):
16
+ return [
17
+ int(1000 * bbox[0] / size[0]),
18
+ int(1000 * bbox[1] / size[1]),
19
+ int(1000 * bbox[2] / size[0]),
20
+ int(1000 * bbox[3] / size[1]),
21
+ ]
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+
26
+
27
+
28
+ class ResumeDataConfig(datasets.BuilderConfig):
29
+ """BuilderConfig for Resume NER"""
30
+
31
+ def __init__(self, **kwargs):
32
+ """BuilderConfig for FUNSD.
33
+ Args:
34
+ **kwargs: keyword arguments forwarded to super.
35
+ """
36
+ super(ResumeDataConfig, self).__init__(**kwargs)
37
+
38
+
39
+ class ResumeData(datasets.GeneratorBasedBuilder):
40
+
41
+ BUILDER_CONFIGS = [
42
+ ResumeDataConfig(name="funsd",
43
+ version=datasets.Version("1.0.0"),
44
+ description="Resume Dataset"),
45
+ ]
46
+
47
+ def _info(self):
48
+ return datasets.DatasetInfo(
49
+ description="",
50
+ features=datasets.Features(
51
+ {
52
+ "id": datasets.Value("string"),
53
+ "tokens": datasets.Sequence(datasets.Value("string")),
54
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
55
+ "ner_tags": datasets.Sequence(
56
+ datasets.features.ClassLabel(
57
+ names=["O",
58
+ "B-ADDRESS",
59
+ "B-EMAIL",
60
+ "B-NAME",
61
+ "B-PHONE",
62
+ "B-SECTIONHEADER",
63
+ "E-ADDRESS",
64
+ "E-EMAIL",
65
+ "E-NAME",
66
+ "E-PHONE",
67
+ "E-SECTIONHEADER",
68
+ "I-ADDRESS",
69
+ "I-NAME",
70
+ "I-PHONE",
71
+ "I-SECTIONHEADER",
72
+ "S-ADDRESS",
73
+ "S-EMAIL",
74
+ "S-NAME",
75
+ "S-OTHER",
76
+ "S-PHONE",
77
+ "S-SECTIONHEADER"
78
+ ]
79
+ )
80
+ ),
81
+ "image": datasets.features.Image(),
82
+ }
83
+ ),
84
+ supervised_keys=None,
85
+ homepage="",
86
+ citation="",
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ """Returns SplitGenerators."""
91
+ downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/Kunling/layoutlm_resume_data/blob/main/person_resume_funsd_format_v2.zip")
92
+ return [
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
95
+ ),
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
98
+ ),
99
+ ]
100
+
101
+ def get_line_bbox(self, bboxs):
102
+ x = [bboxs[i][j] for i in range(len(bboxs)) for j in range(0, len(bboxs[i]), 2)]
103
+ y = [bboxs[i][j] for i in range(len(bboxs)) for j in range(1, len(bboxs[i]), 2)]
104
+
105
+ x0, y0, x1, y1 = min(x), min(y), max(x), max(y)
106
+
107
+ assert x1 >= x0 and y1 >= y0
108
+ bbox = [[x0, y0, x1, y1] for _ in range(len(bboxs))]
109
+ return bbox
110
+
111
+ def _generate_examples(self, filepath):
112
+ logger.info("⏳ Generating examples from = %s", filepath)
113
+ ann_dir = os.path.join(filepath, "annotations")
114
+ img_dir = os.path.join(filepath, "images")
115
+ for guid, file in enumerate(sorted(os.listdir(ann_dir))):
116
+ tokens = []
117
+ bboxes = []
118
+ ner_tags = []
119
+
120
+ file_path = os.path.join(ann_dir, file)
121
+ with open(file_path, "r", encoding="utf8") as f:
122
+ data = json.load(f)
123
+ image_path = os.path.join(img_dir, file)
124
+ image_path = image_path.replace("json", "png")
125
+ image, size = load_image(image_path)
126
+ for item in data["form"]:
127
+ cur_line_bboxes = []
128
+ words, label = item["words"], item["label"]
129
+ words = [w for w in words if w["text"].strip() != ""]
130
+ if len(words) == 0:
131
+ continue
132
+ if label == "other":
133
+ for w in words:
134
+ tokens.append(w["text"])
135
+ ner_tags.append("O")
136
+ cur_line_bboxes.append(normalize_bbox(w["box"], size))
137
+ else:
138
+ tokens.append(words[0]["text"])
139
+ ner_tags.append("B-" + label.upper())
140
+ cur_line_bboxes.append(normalize_bbox(words[0]["box"], size))
141
+ for w in words[1:]:
142
+ tokens.append(w["text"])
143
+ ner_tags.append("I-" + label.upper())
144
+ cur_line_bboxes.append(normalize_bbox(w["box"], size))
145
+ cur_line_bboxes = self.get_line_bbox(cur_line_bboxes)
146
+ bboxes.extend(cur_line_bboxes)
147
+ yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags,
148
+ "image": image}