Datasets:
Update convert_czner.py
Browse files- convert_czner.py +1 -1
convert_czner.py
CHANGED
|
@@ -33,7 +33,7 @@ def whitespace_tokenize_with_offsets(text):
|
|
| 33 |
return tokens, start_tok_offsets, end_tok_offsets
|
| 34 |
|
| 35 |
|
| 36 |
-
def proc_dataset(dataset, max_text_length=
|
| 37 |
r = []
|
| 38 |
for doc in dataset:
|
| 39 |
text = doc["text"]
|
|
|
|
| 33 |
return tokens, start_tok_offsets, end_tok_offsets
|
| 34 |
|
| 35 |
|
| 36 |
+
def proc_dataset(dataset, max_text_length=200):
|
| 37 |
r = []
|
| 38 |
for doc in dataset:
|
| 39 |
text = doc["text"]
|