chatLLM / src /app.py
iurbinah's picture
Rename src/streamlit_app.py to src/app.py
ab8a895 verified
# app.py
import os
import json
import streamlit as st
import requests
from requests.exceptions import RequestException
BACKEND_URL = os.getenv("BACKEND_URL", "http://127.0.0.1:8000/chat")
DEFAULT_SYSTEM_PROMPT = "You are a helpful assistant."
# Some example OpenRouter model IDs; adjust as needed
PRESET_MODELS = [
"openai/gpt-4o-mini",
"openai/gpt-4o",
"meta-llama/llama-3.1-8b-instruct:free",
"anthropic/claude-3.5-sonnet",
"google/gemini-1.5-flash",
"mistralai/mistral-medium",
"cohere/command-r-plus",
"custom", # sentinel for custom entry
]
st.set_page_config(page_title="OpenRouter Chat", page_icon="💬", layout="centered")
# Session state initialization
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}]
if "api_key" not in st.session_state:
st.session_state.api_key = "" # kept only in browser memory
if "model_choice" not in st.session_state:
st.session_state.model_choice = PRESET_MODELS[0]
if "custom_model" not in st.session_state:
st.session_state.custom_model = ""
if "temperature" not in st.session_state:
st.session_state.temperature = 0.7
if "max_tokens" not in st.session_state:
st.session_state.max_tokens = 0
st.title("💬 OpenRouter Chat")
with st.expander("Settings", expanded=False):
# API key in browser memory only
st.caption("API key is stored only in your browser session and sent with each request.")
api_key = st.text_input("OpenRouter API Key", type="password", value=st.session_state.api_key)
st.session_state.api_key = api_key
# System prompt editor
current_system = next((m for m in st.session_state.messages if m["role"] == "system"), None)
sys_prompt = st.text_area("System Message", value=current_system["content"] if current_system else DEFAULT_SYSTEM_PROMPT, height=120)
# Model selector
model_choice = st.selectbox("Model (preset)", PRESET_MODELS, index=PRESET_MODELS.index(st.session_state.model_choice) if st.session_state.model_choice in PRESET_MODELS else 0)
st.session_state.model_choice = model_choice
custom_model = ""
if model_choice == "custom":
custom_model = st.text_input("Custom model id (OpenRouter id)", value=st.session_state.custom_model)
st.session_state.custom_model = custom_model
# Sampling params
temperature = st.slider("Temperature", 0.0, 1.5, st.session_state.temperature, 0.1)
max_tokens = st.number_input("Max tokens (0 = auto)", min_value=0, value=st.session_state.max_tokens)
col1, col2 = st.columns(2)
with col1:
apply_clicked = st.button("Apply settings")
with col2:
reset_chat = st.button("Reset conversation")
if apply_clicked:
# Update system message at index 0 or insert if missing
if st.session_state.messages and st.session_state.messages[0]["role"] == "system":
st.session_state.messages[0]["content"] = sys_prompt
else:
st.session_state.messages.insert(0, {"role": "system", "content": sys_prompt})
st.session_state.temperature = float(temperature)
st.session_state.max_tokens = int(max_tokens)
st.success("Settings applied.")
if reset_chat:
st.session_state.messages = [{"role": "system", "content": sys_prompt}]
st.experimental_rerun()
# Resolve model to use
def get_selected_model():
if st.session_state.model_choice == "custom":
return st.session_state.custom_model.strip() or PRESET_MODELS[0]
return st.session_state.model_choice
# Show chat history (exclude system)
for m in st.session_state.messages:
if m["role"] == "user":
with st.chat_message("user"):
st.markdown(m["content"])
elif m["role"] == "assistant":
with st.chat_message("assistant"):
st.markdown(m["content"])
prompt = st.chat_input("Type your message")
def stream_chat(messages, model, temperature, max_tokens, api_key):
payload = {
"messages": messages,
"model": model,
"temperature": temperature,
}
if max_tokens and max_tokens > 0:
payload["max_tokens"] = int(max_tokens)
headers = {}
# Pass API key via header; never persisted server-side
if api_key:
headers["X-OpenRouter-Api-Key"] = api_key
# Optional attribution header
headers["HTTP-Referer"] = "http://localhost:8501"
try:
with requests.post(BACKEND_URL, json=payload, headers=headers, stream=True, timeout=300) as r:
r.raise_for_status()
for line in r.iter_lines(decode_unicode=True):
if not line:
continue
if line.startswith("data: "):
data = line[len("data: "):]
if data.strip() == "done":
break
try:
obj = json.loads(data)
if "content" in obj:
yield obj["content"]
except json.JSONDecodeError:
continue
except RequestException as e:
yield f"\n[Connection error: {e}]"
if prompt:
if not get_selected_model():
st.error("Please select or enter a model.")
else:
# Append user message
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("assistant"):
ai_area = st.empty()
streamed = ""
for chunk in stream_chat(
st.session_state.messages,
model=get_selected_model(),
temperature=st.session_state.temperature,
max_tokens=st.session_state.max_tokens,
api_key=st.session_state.api_key,
):
streamed += chunk
ai_area.markdown(streamed)
st.session_state.messages.append({"role": "assistant", "content": streamed})
# Sidebar quick actions
with st.sidebar:
if st.button("Clear chat"):
sys_msg = next((m for m in st.session_state.messages if m["role"] == "system"), None)
content = sys_msg["content"] if sys_msg else DEFAULT_SYSTEM_PROMPT
st.session_state.messages = [{"role": "system", "content": content}]
st.experimental_rerun()