Update README.md
Browse files
README.md
CHANGED
|
@@ -13,11 +13,10 @@ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
|
|
| 13 |
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
|
| 14 |
|
| 15 |
text = "Wishing you all a very good morning"
|
| 16 |
-
|
| 17 |
tokenized = tokenizer([text], return_tensors='np') # Convert input text to numerical format first
|
| 18 |
out = model.generate(**tokenized, max_length=128) # Performs translation
|
| 19 |
|
| 20 |
-
#
|
| 21 |
with tokenizer.as_target_tokenizer():
|
| 22 |
translated_text = tokenizer.decode(out[0], skip_special_tokens=True)
|
| 23 |
print(translated_text) # यह एक परीक्षण का मामला है।
|
|
|
|
| 13 |
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
|
| 14 |
|
| 15 |
text = "Wishing you all a very good morning"
|
|
|
|
| 16 |
tokenized = tokenizer([text], return_tensors='np') # Convert input text to numerical format first
|
| 17 |
out = model.generate(**tokenized, max_length=128) # Performs translation
|
| 18 |
|
| 19 |
+
# Get translated text
|
| 20 |
with tokenizer.as_target_tokenizer():
|
| 21 |
translated_text = tokenizer.decode(out[0], skip_special_tokens=True)
|
| 22 |
print(translated_text) # यह एक परीक्षण का मामला है।
|