danrdoran's picture
Update app.py
5904ac5 verified
import streamlit as st
from transformers import T5Tokenizer, T5ForConditionalGeneration
# Load the tokenizer and model
MODEL_NAME = "google/flan-t5-base"
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME)
model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME)
# Streamlit app UI
st.title("AI English Tutor")
st.write("Ask me a question or give me a sentence, and I will help you.")
# Sidebar for user to control model generation parameters
st.sidebar.title("Model Parameters")
temperature = st.sidebar.slider("Temperature", 0.1, 1.5, 1.0, 0.1) # Default 1.0
top_p = st.sidebar.slider("Top-p (Nucleus Sampling)", 0.0, 1.0, 0.9, 0.05) # Default 0.9
top_k = st.sidebar.slider("Top-k", 0, 100, 50, 1) # Default 50
do_sample = st.sidebar.checkbox("Enable Random Sampling", value=True) # Enable sampling
# Input field for the student
student_question = st.text_input("Ask your question!")
# Generate and display response using the Hugging Face model
if student_question:
# Adjust prompt to ask for complete sentences
prompt = f"Please explain the answer to this question in simple terms: '{student_question}'"
# Tokenize input
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
# Generate response
outputs = model.generate(
inputs["input_ids"],
max_length=150, # Adjust this based on how long you'd want the responses
min_length=50, # Encourage longer responses
temperature=temperature, # Control randomness
top_p=top_p, # Nucleus sampling
top_k=top_k, # Top-k sampling
do_sample=do_sample # Enable or disable sampling
)
# Decode and display the output
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
st.write("Tutor's Answer:", response_text)