fix tokenizer load issue when rerun the example
Browse filesadd `tokenizer.save_pretrained(MODEL)` right under the model's save_pretrained
README.md
CHANGED
|
@@ -62,6 +62,8 @@ labels = [row[1] for row in csvreader if len(row) > 1]
|
|
| 62 |
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
|
| 63 |
model.save_pretrained(MODEL)
|
| 64 |
|
|
|
|
|
|
|
| 65 |
text = "Good night 😊"
|
| 66 |
text = preprocess(text)
|
| 67 |
encoded_input = tokenizer(text, return_tensors='pt')
|
|
|
|
| 62 |
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
|
| 63 |
model.save_pretrained(MODEL)
|
| 64 |
|
| 65 |
+
tokenizer.save_pretrained(MODEL)
|
| 66 |
+
|
| 67 |
text = "Good night 😊"
|
| 68 |
text = preprocess(text)
|
| 69 |
encoded_input = tokenizer(text, return_tensors='pt')
|