dantedgp's picture
Changed tokenizer
df3aba6
raw
history blame
786 Bytes
import streamlit as st
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
# Instantiate the model
model = AutoModelForSeq2SeqLM.from_pretrained("dantedgp/question-generator")
tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
def generate_response(txt):
input_text = f"ask: {txt}"
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
outputs = model.generate(input_ids, max_new_tokens=100, do_sample=False)
question = tokenizer.decode(outputs[0], skip_special_tokens=True)
print('question: ', question)
return question
st.set_page_config(page_title='AI Quiz Generator')
st.title('AI Quiz Generator')
# Text input
result = []
txt_input = st.text_area('Enter your text', '', height=200)
if len(result):
st.info(result)