|
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
|
|
|
|
|
|
model_name = "t5-small" |
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
text = "Translate this text to French." |
|
|
|
|
|
|
|
|
inputs = tokenizer.encode("translate English to French: " + text, return_tensors="pt") |
|
|
outputs = model.generate(inputs, max_length=40, num_beams=4, early_stopping=True) |
|
|
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
print("Generated Text:", generated_text) |
|
|
|