Initial commit
Browse files- docbank.py +38 -29
docbank.py
CHANGED
|
@@ -16,6 +16,8 @@
|
|
| 16 |
|
| 17 |
import csv
|
| 18 |
import os
|
|
|
|
|
|
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
from transformers import LayoutXLMTokenizerFast
|
|
@@ -49,7 +51,10 @@ _LICENSE = ""
|
|
| 49 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 50 |
_URLS = {
|
| 51 |
"sample": "http://hyperion.bbirke.de/data/docbank/sample_resized.zip",
|
| 52 |
-
"
|
|
|
|
|
|
|
|
|
|
| 53 |
}
|
| 54 |
|
| 55 |
_FEATURES = datasets.Features(
|
|
@@ -165,40 +170,44 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 165 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 166 |
urls = _URLS[self.config.name]
|
| 167 |
data_dir = dl_manager.download_and_extract(urls)
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
with open(os.path.join(data_dir, "
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
with open(os.path.join(data_dir, "
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
return [
|
| 181 |
datasets.SplitGenerator(
|
| 182 |
name=datasets.Split.TRAIN,
|
| 183 |
# These kwargs will be passed to _generate_examples
|
| 184 |
gen_kwargs={
|
| 185 |
-
"filepath":
|
| 186 |
"split": "train",
|
| 187 |
},
|
| 188 |
),
|
| 189 |
-
datasets.SplitGenerator(
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
),
|
| 197 |
datasets.SplitGenerator(
|
| 198 |
name=datasets.Split.TEST,
|
| 199 |
# These kwargs will be passed to _generate_examples
|
| 200 |
gen_kwargs={
|
| 201 |
-
"filepath":
|
| 202 |
"split": "test"
|
| 203 |
},
|
| 204 |
),
|
|
@@ -210,11 +219,11 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 210 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 211 |
# print(filepath)
|
| 212 |
key = 0
|
| 213 |
-
for
|
| 214 |
#print(f)
|
| 215 |
-
f_id =
|
| 216 |
-
f_fp_txt = f['filepath_txt']
|
| 217 |
-
f_fp_img = f['filepath_img']
|
| 218 |
tokens = []
|
| 219 |
bboxes = []
|
| 220 |
# rgbs = []
|
|
@@ -227,14 +236,14 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 227 |
|
| 228 |
try:
|
| 229 |
with open(f_fp_txt, newline='', encoding='utf-8') as csvfile:
|
| 230 |
-
reader = csv.reader(csvfile, delimiter='
|
| 231 |
for row in reader:
|
| 232 |
# normalized_bbox = normalize_bbox(row[1:5], size)
|
| 233 |
normalized_bbox = [int(x) for x in row[1:5]]
|
| 234 |
tokens.append(row[0])
|
| 235 |
bboxes.append(normalized_bbox)
|
| 236 |
#print(f'Before: {row[9]}')
|
| 237 |
-
labels.append(row[
|
| 238 |
#print(f'After: {row[9]}')
|
| 239 |
# tokenized_input = self.TOKENIZER(
|
| 240 |
# row[0],
|
|
|
|
| 16 |
|
| 17 |
import csv
|
| 18 |
import os
|
| 19 |
+
from glob import glob
|
| 20 |
+
|
| 21 |
import numpy as np
|
| 22 |
from PIL import Image
|
| 23 |
from transformers import LayoutXLMTokenizerFast
|
|
|
|
| 51 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 52 |
_URLS = {
|
| 53 |
"sample": "http://hyperion.bbirke.de/data/docbank/sample_resized.zip",
|
| 54 |
+
"data": [
|
| 55 |
+
'http://hyperion.bbirke.de/data/geocite/train.zip',
|
| 56 |
+
'http://hyperion.bbirke.de/data/geocite/test.zip',
|
| 57 |
+
],
|
| 58 |
}
|
| 59 |
|
| 60 |
_FEATURES = datasets.Features(
|
|
|
|
| 170 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
| 171 |
urls = _URLS[self.config.name]
|
| 172 |
data_dir = dl_manager.download_and_extract(urls)
|
| 173 |
+
train_txts = glob(data_dir + '/train/txt/*.csv')
|
| 174 |
+
train_data = [(txt, data_dir + '/train/img/' + os.path.basename(txt)[:-4] + '.jpg') for txt in train_txts]
|
| 175 |
+
test_txts = glob(data_dir + '/test/txt/*.csv')
|
| 176 |
+
test_data = [(txt, data_dir + '/test/img/' + os.path.basename(txt)[:-4] + '.jpg') for txt in test_txts]
|
| 177 |
+
# with open(os.path.join(data_dir, "train.csv")) as f:
|
| 178 |
+
# files_train = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']),
|
| 179 |
+
# 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in
|
| 180 |
+
# csv.DictReader(f, skipinitialspace=True)]
|
| 181 |
+
# with open(os.path.join(data_dir, "test.csv")) as f:
|
| 182 |
+
# files_test = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']),
|
| 183 |
+
# 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in
|
| 184 |
+
# csv.DictReader(f, skipinitialspace=True)]
|
| 185 |
+
# with open(os.path.join(data_dir, "validation.csv")) as f:
|
| 186 |
+
# files_validation = [{'id': row['id'], 'filepath_txt': os.path.join(data_dir, row['filepath_txt']),
|
| 187 |
+
# 'filepath_img': os.path.join(data_dir, row['filepath_img'])} for row in
|
| 188 |
+
# csv.DictReader(f, skipinitialspace=True)]
|
| 189 |
return [
|
| 190 |
datasets.SplitGenerator(
|
| 191 |
name=datasets.Split.TRAIN,
|
| 192 |
# These kwargs will be passed to _generate_examples
|
| 193 |
gen_kwargs={
|
| 194 |
+
"filepath": train_data,
|
| 195 |
"split": "train",
|
| 196 |
},
|
| 197 |
),
|
| 198 |
+
# datasets.SplitGenerator(
|
| 199 |
+
# name=datasets.Split.VALIDATION,
|
| 200 |
+
# # These kwargs will be passed to _generate_examples
|
| 201 |
+
# gen_kwargs={
|
| 202 |
+
# "filepath": files_validation,
|
| 203 |
+
# "split": "validation",
|
| 204 |
+
# },
|
| 205 |
+
# ),
|
| 206 |
datasets.SplitGenerator(
|
| 207 |
name=datasets.Split.TEST,
|
| 208 |
# These kwargs will be passed to _generate_examples
|
| 209 |
gen_kwargs={
|
| 210 |
+
"filepath": test_data,
|
| 211 |
"split": "test"
|
| 212 |
},
|
| 213 |
),
|
|
|
|
| 219 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 220 |
# print(filepath)
|
| 221 |
key = 0
|
| 222 |
+
for f_fp_txt, f_fp_img in filepath:
|
| 223 |
#print(f)
|
| 224 |
+
f_id = key
|
| 225 |
+
#f_fp_txt = f['filepath_txt']
|
| 226 |
+
#f_fp_img = f['filepath_img']
|
| 227 |
tokens = []
|
| 228 |
bboxes = []
|
| 229 |
# rgbs = []
|
|
|
|
| 236 |
|
| 237 |
try:
|
| 238 |
with open(f_fp_txt, newline='', encoding='utf-8') as csvfile:
|
| 239 |
+
reader = csv.reader(csvfile, delimiter=',')
|
| 240 |
for row in reader:
|
| 241 |
# normalized_bbox = normalize_bbox(row[1:5], size)
|
| 242 |
normalized_bbox = [int(x) for x in row[1:5]]
|
| 243 |
tokens.append(row[0])
|
| 244 |
bboxes.append(normalized_bbox)
|
| 245 |
#print(f'Before: {row[9]}')
|
| 246 |
+
labels.append(row[5])
|
| 247 |
#print(f'After: {row[9]}')
|
| 248 |
# tokenized_input = self.TOKENIZER(
|
| 249 |
# row[0],
|