Upload docbank.py
Browse files- docbank.py +10 -14
docbank.py
CHANGED
|
@@ -57,7 +57,7 @@ _FEATURES = datasets.Features(
|
|
| 57 |
{
|
| 58 |
"id": datasets.Value("string"),
|
| 59 |
"input_ids": datasets.Sequence(datasets.Value("int64")),
|
| 60 |
-
"
|
| 61 |
"RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 62 |
"fonts": datasets.Sequence(datasets.Value("string")),
|
| 63 |
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
|
|
@@ -70,12 +70,6 @@ _FEATURES = datasets.Features(
|
|
| 70 |
}
|
| 71 |
)
|
| 72 |
|
| 73 |
-
_DEFUNCT_FILE_IDS = [
|
| 74 |
-
'126.tar_1706.03360.gz_dispersion_v2_7', '119.tar_1606.07466.gz_20160819Draft_8',
|
| 75 |
-
'167.tar_1412.4821.gz_IDM_TD_Paper_16', '17.tar_1701.07437.gz_muon-beam-dump_final_2',
|
| 76 |
-
'31.tar_1702.04307.gz_held-karp_21', '7.tar_1401.4493.gz_ReversibleNoise_2'
|
| 77 |
-
]
|
| 78 |
-
|
| 79 |
|
| 80 |
def load_image(image_path, size=None):
|
| 81 |
image = Image.open(image_path).convert("RGB")
|
|
@@ -231,18 +225,20 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 231 |
with open(f_fp_txt, newline='', encoding='utf-8') as csvfile:
|
| 232 |
reader = csv.reader(csvfile, delimiter='\t', quotechar=' ')
|
| 233 |
for row in reader:
|
| 234 |
-
|
| 235 |
tokenized_input = self.TOKENIZER(
|
| 236 |
row[0],
|
| 237 |
add_special_tokens=False,
|
| 238 |
return_offsets_mapping=False,
|
| 239 |
return_attention_mask=False,
|
| 240 |
)
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
|
|
|
|
|
|
| 246 |
except:
|
| 247 |
continue
|
| 248 |
|
|
@@ -264,7 +260,7 @@ class Docbank(datasets.GeneratorBasedBuilder):
|
|
| 264 |
yield key, {
|
| 265 |
"id": f"{f_id}_{chunk_id}",
|
| 266 |
'input_ids': split_tokens,
|
| 267 |
-
"
|
| 268 |
"RGBs": split_rgbs,
|
| 269 |
"fonts": split_fonts,
|
| 270 |
"image": image,
|
|
|
|
| 57 |
{
|
| 58 |
"id": datasets.Value("string"),
|
| 59 |
"input_ids": datasets.Sequence(datasets.Value("int64")),
|
| 60 |
+
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 61 |
"RGBs": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
| 62 |
"fonts": datasets.Sequence(datasets.Value("string")),
|
| 63 |
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
|
|
|
|
| 70 |
}
|
| 71 |
)
|
| 72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
def load_image(image_path, size=None):
|
| 75 |
image = Image.open(image_path).convert("RGB")
|
|
|
|
| 225 |
with open(f_fp_txt, newline='', encoding='utf-8') as csvfile:
|
| 226 |
reader = csv.reader(csvfile, delimiter='\t', quotechar=' ')
|
| 227 |
for row in reader:
|
| 228 |
+
normalized_bbox = normalize_bbox(row[1:5], size)
|
| 229 |
tokenized_input = self.TOKENIZER(
|
| 230 |
row[0],
|
| 231 |
add_special_tokens=False,
|
| 232 |
return_offsets_mapping=False,
|
| 233 |
return_attention_mask=False,
|
| 234 |
)
|
| 235 |
+
for tkn in tokenized_input['input_ids']:
|
| 236 |
+
tokens.append(tkn)
|
| 237 |
+
bboxes.append(normalized_bbox)
|
| 238 |
+
rgbs.append(row[5:8])
|
| 239 |
+
fonts.append(row[8])
|
| 240 |
+
labels.append(row[9])
|
| 241 |
+
|
| 242 |
except:
|
| 243 |
continue
|
| 244 |
|
|
|
|
| 260 |
yield key, {
|
| 261 |
"id": f"{f_id}_{chunk_id}",
|
| 262 |
'input_ids': split_tokens,
|
| 263 |
+
"bbox": split_bboxes,
|
| 264 |
"RGBs": split_rgbs,
|
| 265 |
"fonts": split_fonts,
|
| 266 |
"image": image,
|