import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Load the Flan-T5-XL model and tokenizer tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xl") model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-xl") # Define a function to generate responses def respond(message, chat_history): # Format the message for the model prompt = f"User: {message}\nAssistant:" # Generate the response inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Update the chat history chat_history.append((message, response)) return "", chat_history # Create the Gradio chatbot interface iface = gr.Chatbot() iface.launch(handle_messages=respond)