| |
| import os |
| import json |
| import streamlit as st |
| import requests |
| from requests.exceptions import RequestException |
|
|
| BACKEND_URL = os.getenv("BACKEND_URL", "http://127.0.0.1:8000/chat") |
| DEFAULT_SYSTEM_PROMPT = "You are a helpful assistant." |
|
|
| |
| PRESET_MODELS = [ |
| "openai/gpt-4o-mini", |
| "openai/gpt-4o", |
| "meta-llama/llama-3.1-8b-instruct:free", |
| "anthropic/claude-3.5-sonnet", |
| "google/gemini-1.5-flash", |
| "mistralai/mistral-medium", |
| "cohere/command-r-plus", |
| "custom", |
| ] |
|
|
| st.set_page_config(page_title="OpenRouter Chat", page_icon="💬", layout="centered") |
|
|
| |
| if "messages" not in st.session_state: |
| st.session_state.messages = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}] |
| if "api_key" not in st.session_state: |
| st.session_state.api_key = "" |
| if "model_choice" not in st.session_state: |
| st.session_state.model_choice = PRESET_MODELS[0] |
| if "custom_model" not in st.session_state: |
| st.session_state.custom_model = "" |
| if "temperature" not in st.session_state: |
| st.session_state.temperature = 0.7 |
| if "max_tokens" not in st.session_state: |
| st.session_state.max_tokens = 0 |
|
|
| st.title("💬 OpenRouter Chat") |
|
|
| with st.expander("Settings", expanded=False): |
| |
| st.caption("API key is stored only in your browser session and sent with each request.") |
| api_key = st.text_input("OpenRouter API Key", type="password", value=st.session_state.api_key) |
| st.session_state.api_key = api_key |
|
|
| |
| current_system = next((m for m in st.session_state.messages if m["role"] == "system"), None) |
| sys_prompt = st.text_area("System Message", value=current_system["content"] if current_system else DEFAULT_SYSTEM_PROMPT, height=120) |
|
|
| |
| model_choice = st.selectbox("Model (preset)", PRESET_MODELS, index=PRESET_MODELS.index(st.session_state.model_choice) if st.session_state.model_choice in PRESET_MODELS else 0) |
| st.session_state.model_choice = model_choice |
| custom_model = "" |
| if model_choice == "custom": |
| custom_model = st.text_input("Custom model id (OpenRouter id)", value=st.session_state.custom_model) |
| st.session_state.custom_model = custom_model |
|
|
| |
| temperature = st.slider("Temperature", 0.0, 1.5, st.session_state.temperature, 0.1) |
| max_tokens = st.number_input("Max tokens (0 = auto)", min_value=0, value=st.session_state.max_tokens) |
|
|
| col1, col2 = st.columns(2) |
| with col1: |
| apply_clicked = st.button("Apply settings") |
| with col2: |
| reset_chat = st.button("Reset conversation") |
|
|
| if apply_clicked: |
| |
| if st.session_state.messages and st.session_state.messages[0]["role"] == "system": |
| st.session_state.messages[0]["content"] = sys_prompt |
| else: |
| st.session_state.messages.insert(0, {"role": "system", "content": sys_prompt}) |
| st.session_state.temperature = float(temperature) |
| st.session_state.max_tokens = int(max_tokens) |
| st.success("Settings applied.") |
| if reset_chat: |
| st.session_state.messages = [{"role": "system", "content": sys_prompt}] |
| st.experimental_rerun() |
|
|
| |
| def get_selected_model(): |
| if st.session_state.model_choice == "custom": |
| return st.session_state.custom_model.strip() or PRESET_MODELS[0] |
| return st.session_state.model_choice |
|
|
| |
| for m in st.session_state.messages: |
| if m["role"] == "user": |
| with st.chat_message("user"): |
| st.markdown(m["content"]) |
| elif m["role"] == "assistant": |
| with st.chat_message("assistant"): |
| st.markdown(m["content"]) |
|
|
| prompt = st.chat_input("Type your message") |
|
|
| def stream_chat(messages, model, temperature, max_tokens, api_key): |
| payload = { |
| "messages": messages, |
| "model": model, |
| "temperature": temperature, |
| } |
| if max_tokens and max_tokens > 0: |
| payload["max_tokens"] = int(max_tokens) |
|
|
| headers = {} |
| |
| if api_key: |
| headers["X-OpenRouter-Api-Key"] = api_key |
| |
| headers["HTTP-Referer"] = "http://localhost:8501" |
|
|
| try: |
| with requests.post(BACKEND_URL, json=payload, headers=headers, stream=True, timeout=300) as r: |
| r.raise_for_status() |
| for line in r.iter_lines(decode_unicode=True): |
| if not line: |
| continue |
| if line.startswith("data: "): |
| data = line[len("data: "):] |
| if data.strip() == "done": |
| break |
| try: |
| obj = json.loads(data) |
| if "content" in obj: |
| yield obj["content"] |
| except json.JSONDecodeError: |
| continue |
| except RequestException as e: |
| yield f"\n[Connection error: {e}]" |
|
|
| if prompt: |
| if not get_selected_model(): |
| st.error("Please select or enter a model.") |
| else: |
| |
| st.session_state.messages.append({"role": "user", "content": prompt}) |
| with st.chat_message("assistant"): |
| ai_area = st.empty() |
| streamed = "" |
| for chunk in stream_chat( |
| st.session_state.messages, |
| model=get_selected_model(), |
| temperature=st.session_state.temperature, |
| max_tokens=st.session_state.max_tokens, |
| api_key=st.session_state.api_key, |
| ): |
| streamed += chunk |
| ai_area.markdown(streamed) |
| st.session_state.messages.append({"role": "assistant", "content": streamed}) |
|
|
| |
| with st.sidebar: |
| if st.button("Clear chat"): |
| sys_msg = next((m for m in st.session_state.messages if m["role"] == "system"), None) |
| content = sys_msg["content"] if sys_msg else DEFAULT_SYSTEM_PROMPT |
| st.session_state.messages = [{"role": "system", "content": content}] |
| st.experimental_rerun() |