|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline |
|
|
|
|
|
|
|
|
model_name = "t5-small" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
tutor_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
|
|
|
def tutor_ai(question): |
|
|
input_text = f"explain: {question}" |
|
|
response = tutor_pipeline(input_text, max_length=200, num_return_sequences=1) |
|
|
return response[0]['generated_text'] |
|
|
|
|
|
|
|
|
question = "What is the Pythagorean theorem?" |
|
|
answer = tutor_ai(question) |
|
|
print(f"Q: {question}\nA: {answer}") |
|
|
|
|
|
question = "How do you solve a quadratic equation?" |
|
|
answer = tutor_ai(question) |
|
|
print(f"Q: {question}\nA: {answer}") |
|
|
|