import streamlit as st from transformers import GPT2LMHeadModel, GPT2Tokenizer import torch # Configure device (CPU/GPU) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load the model and tokenizer @st.cache_resource def load_model(): model = GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-medium").to(device) tokenizer = GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-medium") return model, tokenizer model, tokenizer = load_model() # Function to generate chatbot response def get_response(conversation_history, user_input): input_text = " ".join(conversation_history) + " " + user_input + tokenizer.eos_token inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) with torch.no_grad(): outputs = model.generate( inputs, max_length=75, num_return_sequences=1, no_repeat_ngram_size=2, top_k=40, top_p=0.8, temperature=0.7 ) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Streamlit app interface st.title("Chatbot with Hugging Face Model") st.write("### Chat with the chatbot powered by DialoGPT. Type your message below!") # Initialize conversation history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat history for message in st.session_state.messages: st.markdown(f"{message['role']}: {message['content']}") # User input user_input = st.text_input("You: ", key="user_input") # Generate response if user submits a message if user_input: # Add user input to conversation history st.session_state.messages.append({"role": "User", "content": user_input}) # Prepare context for the chatbot history = [msg["content"] for msg in st.session_state.messages[-3:] if msg["role"] == "User"] # Generate chatbot response response = get_response(history, user_input) st.session_state.messages.append({"role": "Chatbot", "content": response}) # Clear input box for new input st.text_input("You: ", key="user_input", value="")