from transformers import AutoModelForCausalLM, AutoTokenizer import torch import gradio as gr # Load a conversational model model_name = "microsoft/DialoGPT-medium" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def chatbot(input_text, history=[]): # Tokenize input and history inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors="pt") # Append the conversation history bot_input_ids = torch.cat([torch.tensor(history, dtype=torch.long), inputs], dim=-1) if history else inputs # Generate response history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id) response = tokenizer.decode(history[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) return response, history.tolist() # Define the interface with gr.Blocks() as demo: chatbot_widget = gr.Chatbot() user_input = gr.Textbox(label="Type your message here") submit_button = gr.Button("Send") # Function to handle chat interactions def respond(message, history=[]): response, history = chatbot(message, history) history.append((message, response)) # Append to chat history return history, history # Bind the button click to the chatbot response function submit_button.click(respond, [user_input, chatbot_widget], [chatbot_widget, chatbot_widget]) # Launch the app demo.launch()