File size: 1,468 Bytes
368bfbc
209398d
 
b4eed25
368bfbc
d13a97d
368bfbc
209398d
 
 
368bfbc
d13a97d
 
 
 
209398d
 
 
 
 
 
 
 
 
 
 
d13a97d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import streamlit as st
import random
import time
from transformers import GPT2LMHeadModel, GPT2Tokenizer

st.title("Simple chat with Hugging Face")

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Load pre-trained model and tokenizer
model = GPT2LMHeadModel.from_pretrained("gpt2")
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
if prompt := st.chat_input("What is up?"):
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})
    
    # Encode the new user input and add end of sentence token
    inputs = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt")
    
    # Generate a response
    outputs = model.generate(inputs, max_length=50, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
    
    # Decode the response
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    # Display the response in the chat
    with st.chat_message("bot"):
        st.markdown(response)
    # Add bot message to chat history
    st.session_state.messages.append({"role": "bot", "content": response})