fixed bug
Browse files- ref_seg_ger.py +2 -1
ref_seg_ger.py
CHANGED
|
@@ -203,13 +203,14 @@ class RefSeg(datasets.GeneratorBasedBuilder):
|
|
| 203 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 204 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 205 |
# print(filepath)
|
| 206 |
-
paths = glob(filepath + '
|
| 207 |
key = 0
|
| 208 |
for f in paths:
|
| 209 |
df = pd.read_csv(f)
|
| 210 |
input_ids = []
|
| 211 |
labels = []
|
| 212 |
for i, row in df.iterrows():
|
|
|
|
| 213 |
tokenized_input = self.TOKENIZER(
|
| 214 |
row['token'],
|
| 215 |
add_special_tokens=False,
|
|
|
|
| 203 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 204 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 205 |
# print(filepath)
|
| 206 |
+
paths = glob(filepath + '/*.csv')
|
| 207 |
key = 0
|
| 208 |
for f in paths:
|
| 209 |
df = pd.read_csv(f)
|
| 210 |
input_ids = []
|
| 211 |
labels = []
|
| 212 |
for i, row in df.iterrows():
|
| 213 |
+
|
| 214 |
tokenized_input = self.TOKENIZER(
|
| 215 |
row['token'],
|
| 216 |
add_special_tokens=False,
|