File size: 923 Bytes
4a80b55 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Load your LOCAL model (update this path)
MODEL_PATH = "./aba-retrained-final"
# Initialize components
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_PATH)
def ask_aba(question):
inputs = tokenizer(f"question: {question}", return_tensors="pt")
outputs = model.generate(**inputs, max_length=150)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# ABA Therapy Assistant")
with gr.Row():
question = gr.Textbox(label="Ask about ABA")
output = gr.Textbox(label="Answer")
gr.Examples(
examples=["What is positive reinforcement?", "How to reduce tantrums?"],
inputs=question
)
question.submit(ask_aba, inputs=question, outputs=output)
demo.launch() |