updated constrains
Browse files- docbank.py +19 -29
docbank.py
CHANGED
|
@@ -16,7 +16,6 @@
|
|
| 16 |
|
| 17 |
import csv
|
| 18 |
import os
|
| 19 |
-
import itertools
|
| 20 |
import numpy as np
|
| 21 |
from PIL import Image
|
| 22 |
from transformers import AutoTokenizer
|
|
@@ -58,9 +57,9 @@ _FEATURES = datasets.Features(
|
|
| 58 |
"id": datasets.Value("string"),
|
| 59 |
"input_ids": datasets.Sequence(datasets.Value("int64")),
|
| 60 |
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 61 |
-
"RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 62 |
-
"fonts": datasets.Sequence(datasets.Value("string")),
|
| 63 |
-
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
|
| 64 |
"original_image": datasets.features.Image(),
|
| 65 |
"labels": datasets.Sequence(datasets.features.ClassLabel(
|
| 66 |
names=['abstract', 'author', 'caption', 'date', 'equation', 'figure', 'footer', 'list', 'paragraph',
|
|
@@ -78,8 +77,8 @@ def load_image(image_path, size=None):
|
|
| 78 |
# resize image
|
| 79 |
image = image.resize((size, size))
|
| 80 |
image = np.asarray(image)
|
| 81 |
-
image = image[:, :, ::-1]
|
| 82 |
-
image = image.transpose(2, 0, 1)
|
| 83 |
return image, (w, h)
|
| 84 |
|
| 85 |
|
|
@@ -131,7 +130,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 131 |
description="This part of my dataset covers a second domain"),
|
| 132 |
]
|
| 133 |
|
| 134 |
-
DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 135 |
TOKENIZER = AutoTokenizer.from_pretrained("xlm-roberta-base")
|
| 136 |
|
| 137 |
def _info(self):
|
|
@@ -205,20 +204,20 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 205 |
def _generate_examples(self, filepath, split):
|
| 206 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 207 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 208 |
-
#print(filepath)
|
| 209 |
key = 0
|
| 210 |
for f in filepath:
|
| 211 |
-
#print(f)
|
| 212 |
f_id = f['id']
|
| 213 |
f_fp_txt = f['filepath_txt']
|
| 214 |
f_fp_img = f['filepath_img']
|
| 215 |
tokens = []
|
| 216 |
bboxes = []
|
| 217 |
-
rgbs = []
|
| 218 |
-
fonts = []
|
| 219 |
labels = []
|
| 220 |
|
| 221 |
-
image, size = load_image(f_fp_img, size=224)
|
| 222 |
original_image, _ = load_image(f_fp_img)
|
| 223 |
|
| 224 |
try:
|
|
@@ -232,42 +231,33 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 232 |
add_special_tokens=False,
|
| 233 |
return_offsets_mapping=False,
|
| 234 |
return_attention_mask=False,
|
|
|
|
| 235 |
)
|
| 236 |
for tkn in tokenized_input['input_ids']:
|
| 237 |
tokens.append(tkn)
|
| 238 |
bboxes.append(normalized_bbox)
|
| 239 |
-
rgbs.append(row[5:8])
|
| 240 |
-
fonts.append(row[8])
|
| 241 |
labels.append(row[9])
|
| 242 |
|
| 243 |
except:
|
| 244 |
continue
|
| 245 |
|
| 246 |
for chunk_id, index in enumerate(range(0, len(tokens), self.CHUNK_SIZE)):
|
| 247 |
-
|
| 248 |
split_tokens = tokens[index:index + self.CHUNK_SIZE]
|
| 249 |
split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
| 250 |
-
split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 251 |
-
split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 252 |
split_labels = labels[index:index + self.CHUNK_SIZE]
|
| 253 |
|
| 254 |
-
if len(split_tokens) > self.CHUNK_SIZE:
|
| 255 |
-
print('Err')
|
| 256 |
-
print(key)
|
| 257 |
-
print(f_id)
|
| 258 |
-
print(split_tokens)
|
| 259 |
-
|
| 260 |
-
|
| 261 |
yield key, {
|
| 262 |
"id": f"{f_id}_{chunk_id}",
|
| 263 |
'input_ids': split_tokens,
|
| 264 |
"bbox": split_bboxes,
|
| 265 |
-
"RGBs": split_rgbs,
|
| 266 |
-
"fonts": split_fonts,
|
| 267 |
-
"image": image,
|
| 268 |
"original_image": original_image,
|
| 269 |
"labels": split_labels
|
| 270 |
}
|
| 271 |
key += 1
|
| 272 |
-
if key >= 500:
|
| 273 |
-
break
|
|
|
|
| 16 |
|
| 17 |
import csv
|
| 18 |
import os
|
|
|
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
from transformers import AutoTokenizer
|
|
|
|
| 57 |
"id": datasets.Value("string"),
|
| 58 |
"input_ids": datasets.Sequence(datasets.Value("int64")),
|
| 59 |
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 60 |
+
# "RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 61 |
+
# "fonts": datasets.Sequence(datasets.Value("string")),
|
| 62 |
+
# "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
|
| 63 |
"original_image": datasets.features.Image(),
|
| 64 |
"labels": datasets.Sequence(datasets.features.ClassLabel(
|
| 65 |
names=['abstract', 'author', 'caption', 'date', 'equation', 'figure', 'footer', 'list', 'paragraph',
|
|
|
|
| 77 |
# resize image
|
| 78 |
image = image.resize((size, size))
|
| 79 |
image = np.asarray(image)
|
| 80 |
+
image = image[:, :, ::-1] # flip color channels from RGB to BGR
|
| 81 |
+
image = image.transpose(2, 0, 1) # move channels to first dimension
|
| 82 |
return image, (w, h)
|
| 83 |
|
| 84 |
|
|
|
|
| 130 |
description="This part of my dataset covers a second domain"),
|
| 131 |
]
|
| 132 |
|
| 133 |
+
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 134 |
TOKENIZER = AutoTokenizer.from_pretrained("xlm-roberta-base")
|
| 135 |
|
| 136 |
def _info(self):
|
|
|
|
| 204 |
def _generate_examples(self, filepath, split):
|
| 205 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 206 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 207 |
+
# print(filepath)
|
| 208 |
key = 0
|
| 209 |
for f in filepath:
|
| 210 |
+
# print(f)
|
| 211 |
f_id = f['id']
|
| 212 |
f_fp_txt = f['filepath_txt']
|
| 213 |
f_fp_img = f['filepath_img']
|
| 214 |
tokens = []
|
| 215 |
bboxes = []
|
| 216 |
+
# rgbs = []
|
| 217 |
+
# fonts = []
|
| 218 |
labels = []
|
| 219 |
|
| 220 |
+
# image, size = load_image(f_fp_img, size=224)
|
| 221 |
original_image, _ = load_image(f_fp_img)
|
| 222 |
|
| 223 |
try:
|
|
|
|
| 231 |
add_special_tokens=False,
|
| 232 |
return_offsets_mapping=False,
|
| 233 |
return_attention_mask=False,
|
| 234 |
+
max_length=512, truncation=True
|
| 235 |
)
|
| 236 |
for tkn in tokenized_input['input_ids']:
|
| 237 |
tokens.append(tkn)
|
| 238 |
bboxes.append(normalized_bbox)
|
| 239 |
+
# rgbs.append(row[5:8])
|
| 240 |
+
# fonts.append(row[8])
|
| 241 |
labels.append(row[9])
|
| 242 |
|
| 243 |
except:
|
| 244 |
continue
|
| 245 |
|
| 246 |
for chunk_id, index in enumerate(range(0, len(tokens), self.CHUNK_SIZE)):
|
|
|
|
| 247 |
split_tokens = tokens[index:index + self.CHUNK_SIZE]
|
| 248 |
split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
| 249 |
+
# split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 250 |
+
# split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 251 |
split_labels = labels[index:index + self.CHUNK_SIZE]
|
| 252 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
yield key, {
|
| 254 |
"id": f"{f_id}_{chunk_id}",
|
| 255 |
'input_ids': split_tokens,
|
| 256 |
"bbox": split_bboxes,
|
| 257 |
+
# "RGBs": split_rgbs,
|
| 258 |
+
# "fonts": split_fonts,
|
| 259 |
+
# "image": image,
|
| 260 |
"original_image": original_image,
|
| 261 |
"labels": split_labels
|
| 262 |
}
|
| 263 |
key += 1
|
|
|
|
|
|