diff --git "a/src/streamlit_app.py" "b/src/streamlit_app.py" --- "a/src/streamlit_app.py" +++ "b/src/streamlit_app.py" @@ -1,81 +1,848 @@ -# Import necessary libraries - -import streamlit as st -import os -from openai import OpenAI -import json - -working_dir = os.path.dirname(os.path.abspath(__file__)) -endpoint_data = json.load(open(f"{working_dir}/model_info.json")) - -def clear_chat(): - st.session_state.messages = [] - -def get_api_key(): - # Retrieve API key from environment variable or prompt user - return os.getenv("OPENAI_API_KEY") or st.text_input("Enter your API Key", type="password") - -st.title("AIaaS on Intel® Gaudi® Demo") - -# Extract endpoint and model names from JSON data -endpoint = endpoint_data['endpoint'] -model_names = endpoint_data['models'] - - -with st.sidebar: - modelname = st.selectbox("Select a LLM model (Running on Intel® Gaudi®) ", model_names) - st.write(f"You selected: {modelname}") - st.button("Start New Chat", on_click=clear_chat) - - # Add a text input for the API key - api_key = get_api_key() - if api_key: - st.session_state.api_key = api_key - -# Check if the API key is provided -if "api_key" not in st.session_state or not st.session_state.api_key: - st.error("Please enter your API Key in the sidebar.") -else: - try: - api_key = st.session_state.api_key - base_url = endpoint - client = OpenAI(api_key=api_key, base_url=base_url) - - print(f"Selected Model --> {modelname}") - st.write(f"**Model Info:** `{modelname}`") - - if "messages" not in st.session_state: - st.session_state.messages = [] - - for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - - if prompt := st.chat_input("What is up?"): - st.session_state.messages.append({"role": "user", "content": prompt}) - with st.chat_message("user"): - st.markdown(prompt) - - with st.chat_message("assistant"): - try: - stream = client.chat.completions.create( - model=modelname, - messages=[ - {"role": m["role"], "content": m["content"]} - for m in st.session_state.messages - ], - max_tokens=1024, - temperature=0, - stream=True, - ) - - response = st.write_stream(stream) - except Exception as e: - st.error(f"An error occurred while generating the response: {e}") - response = "An error occurred while generating the response." - - st.session_state.messages.append({"role": "assistant", "content": response}) - except KeyError as e: - st.error(f"Key error: {e}") - except Exception as e: - st.error(f"An unexpected error occurred: {e}") + + + + + + + + + + + + + + + + + + + + + + + + + + + + src/streamlit_app.py · Amironox/Test3 at main + + + + + + + +
+ +
Hugging Face's logo + +
+ +
+
+ +
+
+ + + +
+ + + +
+ +

+ Spaces: +
+
+ + + + +
+ + +Amironox + +
/
+ +
Test3 +
+
+
+ + + + + + + +
+
+ Running +
+
+
+ + + + + + + +
+ + + +
+ + + + + +

+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + +
+
Test3 + / + src + + /streamlit_app.py +
+
+ +
+
Amironox's picture + +
Update src/streamlit_app.py
+ a99f6a6 + verified +
+
+ + raw +
+ history + + blame + + contribute + + delete + + +
+ +
+
+ +
+ +
+ 2.84 kB
+ +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
# Import necessary libraries
+
+
+
import streamlit as st
+
import os
+
from openai import OpenAI
+
import json
+
+
+
working_dir = os.path.dirname(os.path.abspath(__file__))
+
endpoint_data = json.load(open(f"{working_dir}/model_info.json"))
+
+
+
def clear_chat():
+
st.session_state.messages = []
+
+
+
def get_api_key():
+
# Retrieve API key from environment variable or prompt user
+
return os.getenv("OPENAI_API_KEY") or st.text_input("Enter your API Key", type="password")
+
+
+
st.title("AIaaS on Intel® Gaudi® Demo")
+
+
+
# Extract endpoint and model names from JSON data
+
endpoint = endpoint_data['endpoint']
+
model_names = endpoint_data['models']
+
+
+
+
+
with st.sidebar:
+
modelname = st.selectbox("Select a LLM model (Running on Intel® Gaudi®) ", model_names)
+
st.write(f"You selected: {modelname}")
+
st.button("Start New Chat", on_click=clear_chat)
+
+
+
# Add a text input for the API key
+
api_key = get_api_key()
+
if api_key:
+
st.session_state.api_key = api_key
+
+
+
# Check if the API key is provided
+
if "api_key" not in st.session_state or not st.session_state.api_key:
+
st.error("Please enter your API Key in the sidebar.")
+
else:
+
try:
+
api_key = st.session_state.api_key
+
base_url = endpoint
+
client = OpenAI(api_key=api_key, base_url=base_url)
+
+
+
print(f"Selected Model --> {modelname}")
+
st.write(f"**Model Info:** `{modelname}`")
+
+
+
if "messages" not in st.session_state:
+
st.session_state.messages = []
+
+
+
for message in st.session_state.messages:
+
with st.chat_message(message["role"]):
+
st.markdown(message["content"])
+
+
+
if prompt := st.chat_input("What is up?"):
+
st.session_state.messages.append({"role": "user", "content": prompt})
+
with st.chat_message("user"):
+
st.markdown(prompt)
+
+
+
with st.chat_message("assistant"):
+
try:
+
stream = client.chat.completions.create(
+
model=modelname,
+
messages=[
+
{"role": m["role"], "content": m["content"]}
+
for m in st.session_state.messages
+
],
+
max_tokens=1024,
+
temperature=0,
+
stream=True,
+
)
+
+
response = st.write_stream(stream)
+
except Exception as e:
+
st.error(f"An error occurred while generating the response: {e}")
+
response = "An error occurred while generating the response."
+
+
+
st.session_state.messages.append({"role": "assistant", "content": response})
+
except KeyError as e:
+
st.error(f"Key error: {e}")
+
except Exception as e:
+
st.error(f"An unexpected error occurred: {e}")
+
+
+
+ +
+ + + + + + +