|
|
import streamlit as st |
|
|
import random |
|
|
import time |
|
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
|
|
st.title("Simple chat with Hugging Face") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
|
|
|
|
|
|
model = GPT2LMHeadModel.from_pretrained("gpt2") |
|
|
tokenizer = GPT2Tokenizer.from_pretrained("gpt2") |
|
|
|
|
|
|
|
|
for message in st.session_state.messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
|
|
|
|
|
|
if prompt := st.chat_input("What is up?"): |
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(prompt) |
|
|
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
|
|
|
inputs = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt") |
|
|
|
|
|
|
|
|
outputs = model.generate(inputs, max_length=50, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id) |
|
|
|
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
with st.chat_message("bot"): |
|
|
st.markdown(response) |
|
|
|
|
|
st.session_state.messages.append({"role": "bot", "content": response}) |
|
|
|