fixed bug
Browse files- ref_seg_ger.py +7 -7
ref_seg_ger.py
CHANGED
|
@@ -202,10 +202,10 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 202 |
def _generate_examples(self, filepath, split):
|
| 203 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 204 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 205 |
-
print(filepath)
|
| 206 |
-
print(split)
|
| 207 |
paths = glob(filepath + '/' + split + '/*.csv')
|
| 208 |
-
print(paths)
|
| 209 |
key = 0
|
| 210 |
for f in paths:
|
| 211 |
df = pd.read_csv(f)
|
|
@@ -219,7 +219,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 219 |
return_offsets_mapping=False,
|
| 220 |
return_attention_mask=False,
|
| 221 |
)
|
| 222 |
-
print(tokenized_input)
|
| 223 |
if len(tokenized_input['input_ids']) > 1:
|
| 224 |
if row['tag'] == 'B':
|
| 225 |
input_ids.append(tokenized_input['input_ids'][0])
|
|
@@ -244,13 +244,13 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 244 |
else:
|
| 245 |
continue
|
| 246 |
|
| 247 |
-
print('got all')
|
| 248 |
for chunk_id, index in enumerate(range(0, len(input_ids), self.CHUNK_SIZE)):
|
| 249 |
-
split_ids = input_ids[index:index + self.CHUNK_SIZE]
|
| 250 |
#split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
| 251 |
# split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 252 |
# split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 253 |
-
split_labels =
|
| 254 |
|
| 255 |
yield key, {
|
| 256 |
"id": f"{os.path.basename(f)}_{chunk_id}",
|
|
|
|
| 202 |
def _generate_examples(self, filepath, split):
|
| 203 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 204 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 205 |
+
#print(filepath)
|
| 206 |
+
#print(split)
|
| 207 |
paths = glob(filepath + '/' + split + '/*.csv')
|
| 208 |
+
#print(paths)
|
| 209 |
key = 0
|
| 210 |
for f in paths:
|
| 211 |
df = pd.read_csv(f)
|
|
|
|
| 219 |
return_offsets_mapping=False,
|
| 220 |
return_attention_mask=False,
|
| 221 |
)
|
| 222 |
+
#print(tokenized_input)
|
| 223 |
if len(tokenized_input['input_ids']) > 1:
|
| 224 |
if row['tag'] == 'B':
|
| 225 |
input_ids.append(tokenized_input['input_ids'][0])
|
|
|
|
| 244 |
else:
|
| 245 |
continue
|
| 246 |
|
| 247 |
+
#print('got all')
|
| 248 |
for chunk_id, index in enumerate(range(0, len(input_ids), self.CHUNK_SIZE)):
|
| 249 |
+
split_ids = input_ids[index:max(len(input_ids), index + self.CHUNK_SIZE)]
|
| 250 |
#split_bboxes = bboxes[index:index + self.CHUNK_SIZE]
|
| 251 |
# split_rgbs = rgbs[index:index + self.CHUNK_SIZE]
|
| 252 |
# split_fonts = fonts[index:index + self.CHUNK_SIZE]
|
| 253 |
+
split_labels = input_ids[index:max(len(input_ids), index + self.CHUNK_SIZE)]
|
| 254 |
|
| 255 |
yield key, {
|
| 256 |
"id": f"{os.path.basename(f)}_{chunk_id}",
|