File size: 1,430 Bytes
37fa3f6
 
f9a9244
37fa3f6
 
f9a9244
37fa3f6
f9a9244
37fa3f6
 
f9a9244
 
feb1536
f9a9244
37fa3f6
 
f9a9244
 
 
 
 
65d3e29
 
 
 
37fa3f6
f9a9244
 
37fa3f6
 
f9a9244
37fa3f6
 
 
f9a9244
37fa3f6
 
 
 
f9a9244
37fa3f6
 
feb1536
37fa3f6
 
 
 
 
 
 
 
f9a9244
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import streamlit as st
from openai import OpenAI
 
def clear_chat():
    st.session_state.messages = []
 
st.title("Inference as a Service Playground")
 
endpoint = ""
hardware_option = ""
api_key = ""
base_url = ""
model_name = ""
 
with st.sidebar:
    st.button("Start New Chat", on_click=clear_chat)
   
    # Input fields for API key and base URL
    api_key = st.text_input("API Key", type="password")
    base_url = st.text_input("Base URL")
    model_name = st.text_input("Model Id")

if not api_key:
    api_key = "EMPTY"
    
client = OpenAI(api_key=api_key, base_url=base_url)
 
 
if "messages" not in st.session_state:
    st.session_state.messages = []
 
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])
 
if prompt := st.chat_input("What is up?"):
    st.session_state.messages.append({"role": "user", "content": prompt})
    with st.chat_message("user"):
        st.markdown(prompt)
 
    with st.chat_message("assistant"):
        stream = client.chat.completions.create(
            model=model_name,
            messages=[
                {"role": m["role"], "content": m["content"]}
                for m in st.session_state.messages
            ],
            max_tokens=512,
            stream=True,
        )
        response = st.write_stream(stream)
    st.session_state.messages.append({"role": "assistant", "content": response})