File size: 1,484 Bytes
b20312f dc81733 b20312f dc81733 d704978 dc81733 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the model and tokenizer
model_name = "openai/gpt-oss-20b" # Replace with the actual GPT-OSS 20B model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set up the Streamlit app interface
st.title("GPT-OSS 20B Chatbot")
st.markdown("### Chat with GPT-OSS 20B. Ask your question below!")
# Create a conversation box
if 'history' not in st.session_state:
st.session_state.history = []
# Function to display the conversation
def display_conversation():
for message in st.session_state.history:
if message['role'] == 'user':
st.markdown(f"**You**: {message['text']}")
else:
st.markdown(f"**GPT-OSS 20B**: {message['text']}")
# Handle user input
user_input = st.text_input("Enter your prompt:")
if user_input:
# Store user input in the session history
st.session_state.history.append({"role": "user", "text": user_input})
# Tokenize user input and generate response
inputs = tokenizer(user_input, return_tensors="pt")
outputs = model.generate(**inputs, max_length=500)
# Decode the model's response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Store the model's response in the history
st.session_state.history.append({"role": "gpt", "text": response})
# Display updated conversation
display_conversation()
|