nvm472001 commited on
Commit
100bb86
·
1 Parent(s): 99916c0

Delete cvdataset-layoutlmv3.py

Browse files
Files changed (1) hide show
  1. cvdataset-layoutlmv3.py +0 -130
cvdataset-layoutlmv3.py DELETED
@@ -1,130 +0,0 @@
1
- import os
2
- from pathlib import Path
3
- import datasets
4
- from PIL import Image
5
- import pandas as pd
6
- import json
7
-
8
- logger = datasets.logging.get_logger(__name__)
9
-
10
- def load_image(image_path):
11
- image = Image.open(image_path).convert("RGB")
12
-
13
- w, h = image.size
14
-
15
- return image, (w, h)
16
-
17
- def normalize_bbox(bbox, size):
18
- return [
19
- int(1000 * bbox[0] / size[0]),
20
- int(1000 * bbox[1] / size[1]),
21
- int(1000 * bbox[2] / size[0]),
22
- int(1000 * bbox[3] / size[1]),
23
- ]
24
-
25
- def _get_drive_url(url):
26
- base_url = 'https://drive.google.com/uc?id='
27
- split_url = url.split("/")
28
-
29
- return base_url + split_url[5]
30
-
31
- _URLS = [
32
- _get_drive_url("https://drive.google.com/file/d/1KdDBmGP96lFc7jv2Bf4eqrO121ST-TCh/"),
33
- ]
34
-
35
- _CITATION = """\
36
- @article{liharding-nguyen,
37
- title={CVDS: A Dataset for CV Form Understanding},
38
- author={MISA - employees},
39
- year={2022},
40
- }
41
- """
42
-
43
- _DESCRIPTION = """\
44
- Dataset for key information extraction with cv form understanding
45
- """
46
-
47
- class DatasetConfig(datasets.BuilderConfig):
48
- """BuilderConfig for CV Dataset"""
49
- def __init__(self, **kwargs):
50
- """BuilderConfig for CV Dataset.
51
- Args:
52
- **kwargs: keyword arguments forwarded to super.
53
- """
54
- super(DatasetConfig, self).__init__(**kwargs)
55
-
56
- class CVDS(datasets.GeneratorBasedBuilder):
57
- BUILDER_CONFIGS = [
58
- DatasetConfig(name="CVDS", version=datasets.Version("1.0.0"), description="CV Dataset"),
59
- ]
60
-
61
- def _info(self):
62
- return datasets.DatasetInfo(
63
- description=_DESCRIPTION,
64
- features=datasets.Features(
65
- {
66
- "id": datasets.Value("string"),
67
- "words": datasets.Sequence(datasets.Value("string")),
68
- "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
69
- "ner_tags": datasets.Sequence(
70
- datasets.features.ClassLabel(
71
- names=['person_name', 'dob_key', 'dob_value', 'gender_key', 'gender_value', 'phonenumber_key', 'phonenumber_value', 'email_key', 'email_value', 'address_key', 'address_value', 'socical_address_value', 'education', 'education_name', 'education_time', 'experience', 'experience_name', 'experience_time', 'information', 'undefined']
72
- )
73
- ),
74
- "image_path": datasets.Value("string"),
75
- }
76
- ),
77
- supervised_keys=None,
78
- citation=_CITATION,
79
- homepage=""
80
- )
81
-
82
- def _split_generators(self, dl_manager):
83
- """Returns SplitGenerators."""
84
- """Uses local files located with data_dir"""
85
- downloaded_file = dl_manager.download_and_extract(_URLS)
86
- dest = Path(downloaded_file[0])/'data1'
87
-
88
- return [
89
- datasets.SplitGenerator(
90
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": dest/"train.txt", "dest": dest}
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"test.txt", "dest": dest}
94
- ),
95
- ]
96
-
97
- def _generate_examples(self, file_path, dest):
98
- df = pd.read_csv(dest/"class_list.txt", delimiter="\s", header=None)
99
- id2label = dict(zip(df[0].tolist(), df[1].tolist()))
100
-
101
- logger.info("⏳ Generating examples from = %s", file_path)
102
-
103
- item_list = []
104
- with open(file_path, "r", encoding="utf8") as f:
105
- for line in f:
106
- item_list.append(line.rstrip('\n\r'))
107
-
108
- for guid, fname in enumerate(item_list):
109
- data = json.loads(fname)
110
-
111
- image_path = dest/data['file_name']
112
- image, size = load_image(image_path)
113
-
114
- bboxes = [[i["box"][6], i["box"][7], i["box"][2]. i["box"][3]] for i in data["annotations"]]
115
- word = [i['text'] for i in data["annotations"]]
116
- label = [id2label[i["label"]] for i in data["annotations"]]
117
-
118
- bboxes = [normalize_bbox(box, size) for box in bboxes]
119
-
120
- flag=0
121
- for i in bboxes:
122
- for j in i:
123
- if j > 1000:
124
- flag+=1
125
- pass
126
-
127
- if flag > 0:
128
- print(image_path)
129
-
130
- yield guid, {"id": str(guid), "words": word, "bboxes": bboxes, "ner_tags": label, "image_path": image_path}