Spaces:
Paused
Paused
| import streamlit as st | |
| import requests | |
| import json | |
| from PIL import Image | |
| import io | |
| import base64 | |
| import pandas as pd | |
| import zipfile | |
| import PyPDF2 | |
| # --- Page Config --- | |
| st.set_page_config(page_title="OpenRouter + Gemini AI Chat", layout="wide", initial_sidebar_state="expanded") | |
| OPENROUTER_API_BASE = "https://openrouter.ai/api/v1" | |
| # --- Title --- | |
| st.title("π€ OpenRouter + Gemini AI Chat Interface") | |
| st.markdown(""" | |
| **Chat with Free-Tier OpenRouter models and optionally upload files (Text, PDF, ZIP, Images) for context.** | |
| π‘ Responses are copyable with a single click. | |
| """) | |
| # --- Session State --- | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| if "uploaded_content" not in st.session_state: | |
| st.session_state.uploaded_content = None | |
| # --- Utilities --- | |
| def encode_image(image): | |
| buf = io.BytesIO() | |
| image.save(buf, format="JPEG") | |
| return base64.b64encode(buf.getvalue()).decode("utf-8") | |
| def process_file(uploaded_file): | |
| """Process uploaded file (text, image, PDF, ZIP)""" | |
| file_type = uploaded_file.name.split('.')[-1].lower() | |
| text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx') | |
| if file_type in ["jpg", "jpeg", "png"]: | |
| return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')} | |
| if file_type in ["txt"] + [ext.strip('.') for ext in text_exts if ext not in ('.csv', '.xlsx')]: | |
| return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors="ignore")} | |
| if file_type in ["csv", "xlsx"]: | |
| try: | |
| df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file) | |
| return {"type": "text", "content": df.to_string()} | |
| except Exception as e: | |
| return {"type": "error", "content": f"Failed reading table: {e}"} | |
| if file_type == "pdf": | |
| try: | |
| reader = PyPDF2.PdfReader(uploaded_file) | |
| return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)} | |
| except Exception as e: | |
| return {"type": "error", "content": f"PDF Error: {e}"} | |
| if file_type == "zip": | |
| try: | |
| with zipfile.ZipFile(uploaded_file) as z: | |
| content = "ZIP Contents:\n" | |
| for f in z.infolist(): | |
| if not f.is_dir() and f.filename.lower().endswith(text_exts): | |
| content += f"\nπ {f.filename}:\n" | |
| content += z.read(f.filename).decode("utf-8", errors="ignore") | |
| return {"type": "text", "content": content or "ZIP contains no readable text files."} | |
| except Exception as e: | |
| return {"type": "error", "content": f"ZIP Error: {e}"} | |
| return {"type": "error", "content": "Unsupported file type."} | |
| def fetch_model_contexts(api_key): | |
| """Fetch context lengths for models""" | |
| if not api_key: | |
| return {} | |
| headers = {"Authorization": f"Bearer {api_key}"} | |
| try: | |
| res = requests.get(f"{OPENROUTER_API_BASE}/models", headers=headers, timeout=10) | |
| contexts = {} | |
| if res.status_code == 200: | |
| for m in res.json().get("data", []): | |
| contexts[m.get("id")] = m.get("context_length", 4096) | |
| return contexts | |
| except Exception as e: | |
| st.warning(f"β οΈ Failed to fetch model info: {e}") | |
| return {} | |
| def call_openrouter(model, messages, temp, max_tok, key): | |
| headers = { | |
| "Authorization": f"Bearer {key}", | |
| "Content-Type": "application/json", | |
| "Referer": "https://aicodecraft.io", | |
| "X-Title": "OpenRouter-Free-Interface", | |
| } | |
| payload = { | |
| "model": model, | |
| "messages": messages, | |
| "temperature": temp, | |
| "max_tokens": max_tok, | |
| } | |
| res = requests.post(f"{OPENROUTER_API_BASE}/chat/completions", headers=headers, data=json.dumps(payload)) | |
| if res.status_code == 200: | |
| try: | |
| return res.json()["choices"][0]["message"]["content"] | |
| except (KeyError, IndexError): | |
| raise Exception("Invalid API response") | |
| else: | |
| try: | |
| err = res.json() | |
| msg = err.get("error", {}).get("message", res.text) | |
| except: | |
| msg = res.text | |
| raise Exception(f"API Error {res.status_code}: {msg}") | |
| # --- Sidebar --- | |
| with st.sidebar: | |
| st.header("βοΈ API Settings") | |
| api_key = st.text_input("OpenRouter API Key", type="password") | |
| FREE_MODEL_LIST = [ | |
| "cognitivecomputations/dolphin-mistral-24b-venice-edition:free", | |
| "deepseek/deepseek-chat-v3", | |
| "google/gemma-2-9b-it", | |
| "mistralai/mistral-7b-instruct-v0.2", | |
| "qwen/qwen2-72b-instruct", | |
| "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", | |
| ] | |
| model = st.selectbox("Select a model", FREE_MODEL_LIST, index=0) | |
| # Context-length slider | |
| model_contexts = fetch_model_contexts(api_key) | |
| default_ctx = model_contexts.get(model, 4096) | |
| max_tokens = st.slider(f"Max Tokens (max {default_ctx})", 1, min(default_ctx, 32000), min(512, default_ctx)) | |
| temperature = st.slider("Temperature", 0.0, 1.0, 0.7) | |
| if st.button("π Reset Chat"): | |
| st.session_state.messages = [] | |
| st.session_state.uploaded_content = None | |
| st.success("Chat and attachment cleared!") | |
| # --- File Upload --- | |
| uploaded_file = st.file_uploader("Upload File (optional)", | |
| type=["jpg","jpeg","png","txt","pdf","zip","csv","xlsx","html","css","js","py"]) | |
| if uploaded_file and st.session_state.uploaded_content is None: | |
| st.session_state.uploaded_content = process_file(uploaded_file) | |
| if st.session_state.uploaded_content: | |
| processed = st.session_state.uploaded_content | |
| st.subheader("π Current Attachment:") | |
| if processed["type"] == "image": | |
| st.image(processed["content"], width=300) | |
| elif processed["type"] == "text": | |
| st.text_area("File Preview", processed["content"], height=150) | |
| elif processed["type"] == "error": | |
| st.error(processed["content"]) | |
| if st.button("β Remove Attachment"): | |
| st.session_state.uploaded_content = None | |
| st.experimental_rerun() | |
| # --- Chat History --- | |
| for msg in st.session_state.messages: | |
| with st.chat_message(msg["role"]): | |
| st.markdown(msg["content"]) | |
| # --- Chat Input --- | |
| if prompt := st.chat_input("Your message..."): | |
| if not api_key: | |
| st.warning("Please enter your OpenRouter API Key in the sidebar.") | |
| st.stop() | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages] | |
| # Attach file if exists | |
| if st.session_state.uploaded_content: | |
| content = st.session_state.uploaded_content | |
| if content["type"] == "image": | |
| base64_img = encode_image(content["content"]) | |
| messages[-1]["content"] = [ | |
| {"type": "text", "text": prompt}, | |
| {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}} | |
| ] | |
| elif content["type"] == "text": | |
| messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}" | |
| # Generate response | |
| with st.chat_message("assistant"): | |
| with st.spinner(f"Asking {model}..."): | |
| try: | |
| reply = call_openrouter(model, messages, temperature, max_tokens, api_key) | |
| # Clipboard-ready response with JS | |
| st.markdown(f""" | |
| <div style="position: relative;"> | |
| <button onclick="navigator.clipboard.writeText(`{reply.replace('`','\\`')}`)" | |
| style="position:absolute; right:0; top:0;">π Copy</button> | |
| <div style="padding-right:50px;">{reply}</div> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| st.session_state.messages.append({"role": "assistant", "content": reply}) | |
| except Exception as e: | |
| st.error(str(e)) | |
| st.session_state.messages.append({"role": "assistant", "content": f"β {str(e)}"}) | |