|
|
import streamlit as st |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
|
|
|
model_name = "openai/gpt-oss-20b" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
st.title("GPT-OSS 20B Chatbot") |
|
|
st.markdown("### Chat with GPT-OSS 20B. Ask your question below!") |
|
|
|
|
|
|
|
|
if 'history' not in st.session_state: |
|
|
st.session_state.history = [] |
|
|
|
|
|
|
|
|
def display_conversation(): |
|
|
for message in st.session_state.history: |
|
|
if message['role'] == 'user': |
|
|
st.markdown(f"**You**: {message['text']}") |
|
|
else: |
|
|
st.markdown(f"**GPT-OSS 20B**: {message['text']}") |
|
|
|
|
|
|
|
|
user_input = st.text_input("Enter your prompt:") |
|
|
|
|
|
if user_input: |
|
|
|
|
|
st.session_state.history.append({"role": "user", "text": user_input}) |
|
|
|
|
|
|
|
|
inputs = tokenizer(user_input, return_tensors="pt") |
|
|
outputs = model.generate(**inputs, max_length=500) |
|
|
|
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
st.session_state.history.append({"role": "gpt", "text": response}) |
|
|
|
|
|
|
|
|
display_conversation() |
|
|
|