Minor Changes.
Browse files
README.md
CHANGED
|
@@ -19,13 +19,13 @@ tokenizer = AutoTokenizer.from_pretrained("offiongbassey/efik-mt")
|
|
| 19 |
model = AutoModelForSeq2SeqLM.from_pretrained("offiongbassey/efik-mt")
|
| 20 |
|
| 21 |
# English → Efik
|
| 22 |
-
text = "
|
| 23 |
inputs = tokenizer(f"eng_Latn {text}", return_tensors="pt")
|
| 24 |
outputs = model.generate(**inputs, max_length=128)
|
| 25 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 26 |
|
| 27 |
# Efik → English
|
| 28 |
-
text = "
|
| 29 |
inputs = tokenizer(f"ibo_Latn {text}", return_tensors="pt")
|
| 30 |
outputs = model.generate(**inputs, max_length=128)
|
| 31 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
|
|
|
| 19 |
model = AutoModelForSeq2SeqLM.from_pretrained("offiongbassey/efik-mt")
|
| 20 |
|
| 21 |
# English → Efik
|
| 22 |
+
text = "My child is very sick and I need to take him to the hospital for treatment."
|
| 23 |
inputs = tokenizer(f"eng_Latn {text}", return_tensors="pt")
|
| 24 |
outputs = model.generate(**inputs, max_length=128)
|
| 25 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 26 |
|
| 27 |
# Efik → English
|
| 28 |
+
text = "Okon ama adaha utom tọñọ usenubọk."
|
| 29 |
inputs = tokenizer(f"ibo_Latn {text}", return_tensors="pt")
|
| 30 |
outputs = model.generate(**inputs, max_length=128)
|
| 31 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|