| import gradio as gr | |
| import torch | |
| from transformers import pipeline | |
| generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B') | |
| def get_msg(prompt): | |
| res = generator(prompt, max_length=50, do_sample=True, temperature=0.9) | |
| message = res[0]['generated_text'] | |
| return message.strip() | |
| def chatbot(input, history=[]): | |
| output = get_msg(input) | |
| history.append((input, output)) | |
| return history, history | |
| gr.Interface(fn = chatbot, | |
| inputs = ["text",'state'], | |
| outputs = ["chatbot",'state']).queue(concurrency_count=1,max_size=1).launch() |