Spaces:
Sleeping
Sleeping
| from transformers import AutoModelWithLMHead, AutoTokenizer | |
| import gradio as gr | |
| tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small', padding_side='right') | |
| model = AutoModelWithLMHead.from_pretrained('tomkr000/scottbotai') | |
| def chat(message, history=[]): | |
| inputs = tokenizer.encode(message + tokenizer.eos_token, return_tensors="pt") | |
| reply_ids = model.generate( | |
| inputs, max_length=1000, | |
| pad_token_id=tokenizer.eos_token_id, | |
| no_repeat_ngram_size=3, | |
| do_sample=True, | |
| top_k=100, | |
| top_p=0.7, | |
| temperature = 0.8 | |
| ) | |
| response = tokenizer.decode(reply_ids[:,inputs.shape[1]:][0], skip_special_tokens=True) | |
| history.append((message, response)) | |
| return history, history | |
| # chatbot = gr.Chatbot().style(color_map=("green", "pink")) | |
| demo = gr.Interface( | |
| fn=chat, | |
| inputs = ["text", "state"], | |
| outputs = ['chatbot', "state"], | |
| allow_flagging="never", | |
| ).launch() | |