changed tokenization
Browse files- ref_seg_ger.py +1 -0
ref_seg_ger.py
CHANGED
|
@@ -280,6 +280,7 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 280 |
split_ids = np.array_split(clean_input_ids, n_chunks)
|
| 281 |
split_labels = np.array_split(clean_labels, n_chunks)
|
| 282 |
split_refs = np.array_split(clean_refs, n_chunks)
|
|
|
|
| 283 |
for chunk_ids, chunk_labels, chunk_refs in zip(clean_input_ids, clean_labels, clean_refs):
|
| 284 |
# for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
|
| 285 |
# split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
|
|
|
|
| 280 |
split_ids = np.array_split(clean_input_ids, n_chunks)
|
| 281 |
split_labels = np.array_split(clean_labels, n_chunks)
|
| 282 |
split_refs = np.array_split(clean_refs, n_chunks)
|
| 283 |
+
print(clean_input_ids)
|
| 284 |
for chunk_ids, chunk_labels, chunk_refs in zip(clean_input_ids, clean_labels, clean_refs):
|
| 285 |
# for chunk_id, index in enumerate(range(0, len(clean_input_ids), self.CHUNK_SIZE)):
|
| 286 |
# split_ids = clean_input_ids[index:max(len(clean_input_ids), index + self.CHUNK_SIZE)]
|