ArneBinder commited on
Commit
b52c6bf
·
1 Parent(s): 0ff7225

Create new file

Browse files
Files changed (1) hide show
  1. xfund.py +152 -0
xfund.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ import json
3
+ import logging
4
+ import os
5
+
6
+ import datasets
7
+ from detectron2.data.detection_utils import read_image
8
+ from detectron2.data.transforms import ResizeTransform, TransformList
9
+ from torch import tensor
10
+
11
+
12
+ def load_image(image_path):
13
+ image = read_image(image_path, format="BGR")
14
+ h = image.shape[0]
15
+ w = image.shape[1]
16
+ img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)])
17
+ # convert to RGB
18
+ image = tensor(img_trans.apply_image(image).copy()).permute(
19
+ 2, 0, 1
20
+ ) # copy to make it writeable
21
+ return image, (w, h)
22
+
23
+
24
+ _URL = "https://github.com/doc-analysis/XFUND/releases/download/v1.0/"
25
+
26
+ _LANG = ["zh", "de", "es", "fr", "en", "it", "ja", "pt"]
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ class XFUNDConfig(datasets.BuilderConfig):
31
+ """BuilderConfig for XFUND."""
32
+
33
+ def __init__(self, lang, additional_langs=None, **kwargs):
34
+ """
35
+ Args:
36
+ lang: string, language for the input text
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super().__init__(**kwargs)
40
+ self.lang = lang
41
+ self.additional_langs = additional_langs
42
+
43
+
44
+ _LABELS = ["header", "question", "answer", "other"]
45
+
46
+
47
+ def _get_box_feature():
48
+ return datasets.Sequence(datasets.Value("int64"))
49
+
50
+
51
+ class XFUND(datasets.GeneratorBasedBuilder):
52
+ """XFUND dataset."""
53
+
54
+ BUILDER_CONFIGS = [XFUNDConfig(name=f"xfund.{lang}", lang=lang) for lang in _LANG]
55
+
56
+ def _info(self):
57
+ return datasets.DatasetInfo(
58
+ features=datasets.Features(
59
+ {
60
+ "id": datasets.Value("string"),
61
+ "uid": datasets.Value("string"),
62
+ "document": datasets.Sequence(
63
+ {
64
+ "id": datasets.Value("int64"),
65
+ "box": _get_box_feature(),
66
+ "text": datasets.Value("string"),
67
+ "label": datasets.ClassLabel(names=_LABELS),
68
+ "words": datasets.Sequence(
69
+ {
70
+ "box": _get_box_feature(),
71
+ "text": datasets.Value("string"),
72
+ }
73
+ ),
74
+ "linking": datasets.Sequence(
75
+ datasets.Sequence(datasets.Value("int64"))
76
+ ),
77
+ }
78
+ ),
79
+ "img_meta": {
80
+ "fname": datasets.Value("string"),
81
+ "width": datasets.Value("int64"),
82
+ "height": datasets.Value("int64"),
83
+ "format": datasets.Value("string"),
84
+ },
85
+ # has to be at the root level, crashes otherwise
86
+ "img_data": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
87
+ },
88
+ ),
89
+ supervised_keys=None,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+ """Returns SplitGenerators."""
94
+ urls_to_download = {
95
+ "train": [
96
+ f"{_URL}{self.config.lang}.train.json",
97
+ f"{_URL}{self.config.lang}.train.zip",
98
+ ],
99
+ "val": [f"{_URL}{self.config.lang}.val.json", f"{_URL}{self.config.lang}.val.zip"],
100
+ # "test": [f"{_URL}{self.config.lang}.test.json", f"{_URL}{self.config.lang}.test.zip"],
101
+ }
102
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
103
+ train_files_for_many_langs = [downloaded_files["train"]]
104
+ val_files_for_many_langs = [downloaded_files["val"]]
105
+ # test_files_for_many_langs = [downloaded_files["test"]]
106
+ if self.config.additional_langs:
107
+ additional_langs = self.config.additional_langs.split("+")
108
+ if "all" in additional_langs:
109
+ additional_langs = [lang for lang in _LANG if lang != self.config.lang]
110
+ for lang in additional_langs:
111
+ urls_to_download = {
112
+ "train": [f"{_URL}{lang}.train.json", f"{_URL}{lang}.train.zip"]
113
+ }
114
+ additional_downloaded_files = dl_manager.download_and_extract(urls_to_download)
115
+ train_files_for_many_langs.append(additional_downloaded_files["train"])
116
+
117
+ logger.info(
118
+ f"Training on {self.config.lang} with additional langs({self.config.additional_langs})"
119
+ )
120
+ logger.info(f"Evaluating on {self.config.lang}")
121
+ logger.info(f"Testing on {self.config.lang}")
122
+ return [
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_files_for_many_langs}
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files_for_many_langs}
128
+ ),
129
+ # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": test_files_for_many_langs}),
130
+ ]
131
+
132
+ def _generate_examples(self, filepaths):
133
+ for filepath in filepaths:
134
+ logger.info("Generating examples from = %s", filepath)
135
+ with open(filepath[0], encoding="utf-8") as f:
136
+ data = json.load(f)
137
+
138
+ for doc in data["documents"]:
139
+ # print(json.dumps(doc, indent=2))
140
+ fpath = os.path.join(filepath[1], doc["img"]["fname"])
141
+ image, size = load_image(fpath)
142
+ expected_size = tuple([doc["img"]["width"], doc["img"]["height"]])
143
+ if size != expected_size:
144
+ raise ValueError(
145
+ f"image has unexpected size: {size}. expected: {expected_size}"
146
+ )
147
+
148
+ doc["img_meta"] = doc.pop("img")
149
+ doc["img_meta"]["format"] = "RGB"
150
+ doc["img_data"] = image
151
+
152
+ yield doc["id"], doc