Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import T5Tokenizer, T5ForConditionalGeneration | |
| # Load the tokenizer and model for flan-t5 | |
| tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base") | |
| model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base") | |
| # Define the chatbot function | |
| def chat_with_flan(input_text): | |
| # Prepare the input for the model | |
| input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
| # Generate the response from the model | |
| outputs = model.generate(input_ids, max_length=200, num_return_sequences=1) | |
| # Decode and return the response | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return response | |
| # Set up the Gradio interface | |
| interface = gr.Interface( | |
| fn=chat_with_flan, | |
| inputs=gr.Textbox(label="Chat with FLAN-T5"), | |
| outputs=gr.Textbox(label="FLAN-T5's Response"), | |
| title="FLAN-T5 Chatbot", | |
| description="This is a simple chatbot powered by the FLAN-T5 model.", | |
| ) | |
| # Launch the Gradio app | |
| interface.launch() | |