changed input ids to tokens
Browse files- ref_seg_ger.py +10 -15
ref_seg_ger.py
CHANGED
|
@@ -18,7 +18,7 @@ from glob import glob
|
|
| 18 |
import os
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
-
from
|
| 22 |
import datasets
|
| 23 |
from itertools import chain
|
| 24 |
import pandas as pd
|
|
@@ -136,7 +136,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 136 |
# ]
|
| 137 |
|
| 138 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 139 |
-
TOKENIZER =
|
| 140 |
|
| 141 |
def _info(self):
|
| 142 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
@@ -214,30 +214,25 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 214 |
labels = []
|
| 215 |
for i, row in df.iterrows():
|
| 216 |
|
| 217 |
-
|
| 218 |
-
row['token'],
|
| 219 |
-
add_special_tokens=False,
|
| 220 |
-
return_offsets_mapping=False,
|
| 221 |
-
return_attention_mask=False,
|
| 222 |
-
)
|
| 223 |
#print(tokenized_input)
|
| 224 |
-
if len(
|
| 225 |
if row['tag'] == 'B':
|
| 226 |
-
input_ids.append(
|
| 227 |
labels.append(row['tag'] + '-' + row['label'])
|
| 228 |
-
for input_id in
|
| 229 |
input_ids.append(input_id)
|
| 230 |
labels.append('I-' + row['label'])
|
| 231 |
elif row['tag'] == 'I':
|
| 232 |
-
for input_id in
|
| 233 |
input_ids.append(input_id)
|
| 234 |
labels.append('I-' + row['label'])
|
| 235 |
else:
|
| 236 |
-
for input_id in
|
| 237 |
input_ids.append(input_id)
|
| 238 |
labels.append('O')
|
| 239 |
-
elif len(
|
| 240 |
-
input_ids.append(
|
| 241 |
if row['tag'] == 'O':
|
| 242 |
labels.append(row['tag'])
|
| 243 |
else:
|
|
|
|
| 18 |
import os
|
| 19 |
import numpy as np
|
| 20 |
from PIL import Image
|
| 21 |
+
from tokenizers.pre_tokenizers import Whitespace
|
| 22 |
import datasets
|
| 23 |
from itertools import chain
|
| 24 |
import pandas as pd
|
|
|
|
| 136 |
# ]
|
| 137 |
|
| 138 |
# DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 139 |
+
TOKENIZER = Whitespace()
|
| 140 |
|
| 141 |
def _info(self):
|
| 142 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
|
|
|
| 214 |
labels = []
|
| 215 |
for i, row in df.iterrows():
|
| 216 |
|
| 217 |
+
tokens = self.TOKENIZER.pre_tokenize_str(row['token'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 218 |
#print(tokenized_input)
|
| 219 |
+
if len(tokens) > 1:
|
| 220 |
if row['tag'] == 'B':
|
| 221 |
+
input_ids.append(tokens[0])
|
| 222 |
labels.append(row['tag'] + '-' + row['label'])
|
| 223 |
+
for input_id in tokens[1:]:
|
| 224 |
input_ids.append(input_id)
|
| 225 |
labels.append('I-' + row['label'])
|
| 226 |
elif row['tag'] == 'I':
|
| 227 |
+
for input_id in tokens:
|
| 228 |
input_ids.append(input_id)
|
| 229 |
labels.append('I-' + row['label'])
|
| 230 |
else:
|
| 231 |
+
for input_id in tokens:
|
| 232 |
input_ids.append(input_id)
|
| 233 |
labels.append('O')
|
| 234 |
+
elif len(tokens) == 1:
|
| 235 |
+
input_ids.append(tokens[0])
|
| 236 |
if row['tag'] == 'O':
|
| 237 |
labels.append(row['tag'])
|
| 238 |
else:
|