savhascelik commited on
Commit
dd5ea36
·
1 Parent(s): 2e45173

Create certificate-test.py

Browse files
Files changed (1) hide show
  1. certificate-test.py +125 -0
certificate-test.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import json
3
+ import os
4
+ from pathlib import Path
5
+ import datasets
6
+ from PIL import Image
7
+ # import torch
8
+ # from detectron2.data.transforms import ResizeTransform, TransformList
9
+ logger = datasets.logging.get_logger(__name__)
10
+ _CITATION = """\
11
+ @article{2019,
12
+ title={ICDAR2019 Competition on Scanned Receipt OCR and Information Extraction},
13
+ url={http://dx.doi.org/10.1109/ICDAR.2019.00244},
14
+ DOI={10.1109/icdar.2019.00244},
15
+ journal={2019 International Conference on Document Analysis and Recognition (ICDAR)},
16
+ publisher={IEEE},
17
+ author={Huang, Zheng and Chen, Kai and He, Jianhua and Bai, Xiang and Karatzas, Dimosthenis and Lu, Shijian and Jawahar, C. V.},
18
+ year={2019},
19
+ month={Sep}
20
+ }
21
+ """
22
+ _DESCRIPTION = """\
23
+ https://arxiv.org/abs/2103.10213
24
+ """
25
+
26
+
27
+ def load_image(image_path):
28
+ image = Image.open(image_path)
29
+ w, h = image.size
30
+ return image, (w, h)
31
+
32
+
33
+ def normalize_bbox(bbox, size):
34
+ return [
35
+ int(1000 * bbox[0] / size[0]),
36
+ int(1000 * bbox[1] / size[1]),
37
+ int(1000 * bbox[2] / size[0]),
38
+ int(1000 * bbox[3] / size[1]),
39
+ ]
40
+
41
+
42
+ def _get_drive_url(url):
43
+ base_url = 'https://drive.google.com/uc?id='
44
+ split_url = url.split('/')
45
+ return base_url + split_url[5]
46
+
47
+
48
+ _URLS = [
49
+ _get_drive_url(
50
+ "https://drive.google.com/file/d/1Dk0C_jGptIy3Ad6Cb--AIs9WeP9qS61m/view?usp=sharing"),
51
+ ]
52
+
53
+
54
+ class SroieConfig(datasets.BuilderConfig):
55
+ """BuilderConfig for SROIE"""
56
+
57
+ def __init__(self, **kwargs):
58
+ """BuilderConfig for SROIE.
59
+ Args:
60
+ **kwargs: keyword arguments forwarded to super.
61
+ """
62
+ super(SroieConfig, self).__init__(**kwargs)
63
+
64
+
65
+ class Sroie(datasets.GeneratorBasedBuilder):
66
+ BUILDER_CONFIGS = [
67
+ SroieConfig(name="sroie", version=datasets.Version(
68
+ "1.0.0"), description="SROIE dataset"),
69
+ ]
70
+
71
+ def _info(self):
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=datasets.Features(
75
+ {
76
+ "id": datasets.Value("string"),
77
+ "words": datasets.Sequence(datasets.Value("string")),
78
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
79
+ "ner_tags": datasets.Sequence(
80
+ datasets.features.ClassLabel(
81
+ names=['I-UNIVERSITYNAME','B-UNIVERSITYNAME','I-UNIVERSITYLOGO','B-UNIVERSITYLOGO','I-CERTIFICATECOURSE','B-CERTIFICATECOURSE','I-DATE','B-DATE','I-DAY','B-DAY','I-MONTH','B-MONTH','I-PARTICIPANTNAME','B-PARTICIPANTNAME','I-SEASON','B-SEASON','I-YEAR','B-YEAR','I-YEARINWORDS','B-YEARINWORDS']
82
+ )
83
+ ),
84
+ # "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
85
+ "image_path": datasets.Value("string"),
86
+ }
87
+ ),
88
+ supervised_keys=None,
89
+ citation=_CITATION,
90
+ homepage="https://arxiv.org/abs/2103.10213",
91
+ )
92
+
93
+ def _split_generators(self, dl_manager):
94
+ """Returns SplitGenerators."""
95
+ """Uses local files located with data_dir"""
96
+ downloaded_file = dl_manager.download_and_extract(_URLS)
97
+ # move files from the second URL together with files from the first one.
98
+ dest = Path(downloaded_file[0])
99
+
100
+ return [
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.TRAIN, gen_kwargs={
103
+ "filepath": dest/"training_data"}
104
+ ),
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"testing_data"}
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, filepath):
111
+ logger.info("⏳ Generating examples from = %s", filepath)
112
+ ann_dir = os.path.join(filepath, "annotations")
113
+ img_dir = os.path.join(filepath, "images")
114
+ for guid, fname in enumerate(sorted(os.listdir(img_dir))):
115
+ name, ext = os.path.splitext(fname)
116
+ file_path = os.path.join(ann_dir, name + ".json")
117
+ with open(file_path, "r", encoding="utf8") as f:
118
+ data = json.load(f)
119
+ image_path = os.path.join(img_dir, fname)
120
+
121
+ image, size = load_image(image_path)
122
+
123
+ boxes = [normalize_bbox(box, size) for box in data["bbox"]]
124
+
125
+ yield guid, {"id": str(guid), "words": data["words"], "bboxes": boxes, "ner_tags": data["labels"], "image_path": image_path}