import streamlit as st import random import time from transformers import GPT2LMHeadModel, GPT2Tokenizer st.title("Simple chat with Hugging Face") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Load pre-trained model and tokenizer model = GPT2LMHeadModel.from_pretrained("gpt2") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("What is up?"): # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Encode the new user input and add end of sentence token inputs = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt") # Generate a response outputs = model.generate(inputs, max_length=50, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id) # Decode the response response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Display the response in the chat with st.chat_message("bot"): st.markdown(response) # Add bot message to chat history st.session_state.messages.append({"role": "bot", "content": response})