MrPotato commited on
Commit
d7e3da1
·
1 Parent(s): 7943e1a

changed input ids to tokens

Browse files
Files changed (1) hide show
  1. ref_seg_ger.py +10 -15
ref_seg_ger.py CHANGED
@@ -18,7 +18,7 @@ from glob import glob
18
  import os
19
  import numpy as np
20
  from PIL import Image
21
- from transformers import AutoTokenizer
22
  import datasets
23
  from itertools import chain
24
  import pandas as pd
@@ -136,7 +136,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
136
  # ]
137
 
138
  # DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
139
- TOKENIZER = AutoTokenizer.from_pretrained("xlm-roberta-base")
140
 
141
  def _info(self):
142
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -214,30 +214,25 @@ class RefSeg(datasets.GeneratorBasedBuilder):
214
  labels = []
215
  for i, row in df.iterrows():
216
 
217
- tokenized_input = self.TOKENIZER(
218
- row['token'],
219
- add_special_tokens=False,
220
- return_offsets_mapping=False,
221
- return_attention_mask=False,
222
- )
223
  #print(tokenized_input)
224
- if len(tokenized_input['input_ids']) > 1:
225
  if row['tag'] == 'B':
226
- input_ids.append(tokenized_input['input_ids'][0])
227
  labels.append(row['tag'] + '-' + row['label'])
228
- for input_id in tokenized_input['input_ids'][1:]:
229
  input_ids.append(input_id)
230
  labels.append('I-' + row['label'])
231
  elif row['tag'] == 'I':
232
- for input_id in tokenized_input['input_ids']:
233
  input_ids.append(input_id)
234
  labels.append('I-' + row['label'])
235
  else:
236
- for input_id in tokenized_input['input_ids']:
237
  input_ids.append(input_id)
238
  labels.append('O')
239
- elif len(tokenized_input['input_ids']) == 1:
240
- input_ids.append(tokenized_input['input_ids'][0])
241
  if row['tag'] == 'O':
242
  labels.append(row['tag'])
243
  else:
 
18
  import os
19
  import numpy as np
20
  from PIL import Image
21
+ from tokenizers.pre_tokenizers import Whitespace
22
  import datasets
23
  from itertools import chain
24
  import pandas as pd
 
136
  # ]
137
 
138
  # DEFAULT_CONFIG_NAME = "small" # It's not mandatory to have a default configuration. Just use one if it make sense.
139
+ TOKENIZER = Whitespace()
140
 
141
  def _info(self):
142
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
214
  labels = []
215
  for i, row in df.iterrows():
216
 
217
+ tokens = self.TOKENIZER.pre_tokenize_str(row['token'])
 
 
 
 
 
218
  #print(tokenized_input)
219
+ if len(tokens) > 1:
220
  if row['tag'] == 'B':
221
+ input_ids.append(tokens[0])
222
  labels.append(row['tag'] + '-' + row['label'])
223
+ for input_id in tokens[1:]:
224
  input_ids.append(input_id)
225
  labels.append('I-' + row['label'])
226
  elif row['tag'] == 'I':
227
+ for input_id in tokens:
228
  input_ids.append(input_id)
229
  labels.append('I-' + row['label'])
230
  else:
231
+ for input_id in tokens:
232
  input_ids.append(input_id)
233
  labels.append('O')
234
+ elif len(tokens) == 1:
235
+ input_ids.append(tokens[0])
236
  if row['tag'] == 'O':
237
  labels.append(row['tag'])
238
  else: