sendox-sync / app.py
eshanj7's picture
Update app.py
dcb9934 verified
import os
import sys
# Fix encoding issues before anything else
os.environ["PYTHONIOENCODING"] = "utf-8"
if sys.stdout.encoding != "utf-8":
sys.stdout.reconfigure(encoding="utf-8")
import gradio as gr
from huggingface_hub import InferenceClient, HfApi
# ─────────────────────────────────────────────
# CONSTANTS
# ─────────────────────────────────────────────
APP_TITLE = "SendoX - Sync"
DEFAULT_SYSTEM_PROMPT = (
"You are SendoX, an advanced AI coding assistant. "
"You help developers write, debug, and understand code. "
"Be concise, precise, and professional."
)
DEFAULT_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
FALLBACK_MODELS = [
"mistralai/Mistral-7B-Instruct-v0.3",
"HuggingFaceH4/zephyr-7b-beta",
"meta-llama/Llama-3.2-3B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"google/gemma-2-2b-it",
]
# ─────────────────────────────────────────────
# HELPERS
# ─────────────────────────────────────────────
def get_user_models(profile: gr.OAuthProfile | None) -> list[str]:
"""Fetch models authored by the logged-in user. Falls back to curated list."""
models = list(FALLBACK_MODELS)
if profile is None:
return models
try:
api = HfApi()
# Use filter="author:username" β€” avoids deprecated liked_by argument
user_models = api.list_models(filter=f"author:{profile.username}", limit=30)
user_model_ids = [m.modelId for m in user_models if m.modelId]
if user_model_ids:
# Prepend user models, deduplicate
combined = user_model_ids + [m for m in models if m not in user_model_ids]
return combined
except Exception as e:
print(f"[SendoX] Could not fetch user models: {e}", flush=True)
return models
def chat_with_model(
message: str,
history: list,
system_prompt: str,
model_id: str,
temperature: float,
profile: gr.OAuthProfile | None,
):
"""Stream a response from the selected HF Inference model."""
if profile is None:
yield "⚠️ You must be logged in to use SendoX - Sync."
return
if not message.strip():
yield ""
return
# Build message list
messages = [{"role": "system", "content": system_prompt or DEFAULT_SYSTEM_PROMPT}]
for user_msg, assistant_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": message})
client = InferenceClient(model_id)
response = ""
try:
for chunk in client.chat_completion(
messages=messages,
temperature=max(0.01, float(temperature)),
max_tokens=2048,
stream=True,
):
delta = chunk.choices[0].delta.content or ""
response += delta
yield response
except Exception as e:
yield f"❌ Error contacting model `{model_id}`:\n\n```\n{e}\n```\n\nTry selecting a different model from the sidebar."
# ─────────────────────────────────────────────
# CSS β€” Dark IDE Theme
# ─────────────────────────────────────────────
CUSTOM_CSS = """
/* ── Base ── */
:root {
--bg-deep: #0d0f14;
--bg-panel: #141720;
--bg-sidebar: #10131a;
--border: #2a2f3d;
--accent: #00e5ff;
--accent2: #7c5cfc;
--text-pri: #e8eaf0;
--text-sec: #7a8099;
--danger: #ff4d6d;
--success: #00e676;
--radius: 10px;
--font-mono: 'JetBrains Mono', 'Fira Code', monospace;
--font-ui: 'Syne', 'Segoe UI', sans-serif;
}
body, .gradio-container {
background: var(--bg-deep) !important;
color: var(--text-pri) !important;
font-family: var(--font-ui) !important;
}
/* ── Login Gate ── */
#login-gate {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
min-height: 100vh;
background: radial-gradient(ellipse at 60% 40%, #1a1040 0%, var(--bg-deep) 70%);
}
#login-gate .logo-title {
font-size: 3rem;
font-weight: 800;
letter-spacing: -1px;
background: linear-gradient(135deg, var(--accent), var(--accent2));
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
margin-bottom: 0.5rem;
}
#login-gate .logo-sub {
color: var(--text-sec);
font-size: 1.05rem;
margin-bottom: 2.5rem;
letter-spacing: 2px;
text-transform: uppercase;
}
/* ── IDE Workspace ── */
#workspace {
display: flex;
height: 100vh;
overflow: hidden;
}
#sidebar {
width: 280px;
min-width: 240px;
background: var(--bg-sidebar);
border-right: 1px solid var(--border);
padding: 1.5rem 1rem;
overflow-y: auto;
display: flex;
flex-direction: column;
gap: 1.25rem;
}
#sidebar .sidebar-header {
font-size: 1.3rem;
font-weight: 700;
letter-spacing: -0.5px;
background: linear-gradient(90deg, var(--accent), var(--accent2));
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
padding-bottom: 0.75rem;
border-bottom: 1px solid var(--border);
}
#main-panel {
flex: 1;
display: flex;
flex-direction: column;
overflow: hidden;
}
/* ── Gradio overrides ── */
.gr-button-primary, button.primary {
background: linear-gradient(135deg, var(--accent2), var(--accent)) !important;
border: none !important;
color: #fff !important;
font-weight: 600 !important;
border-radius: var(--radius) !important;
}
.gr-textbox textarea, .gr-textbox input {
background: var(--bg-panel) !important;
border: 1px solid var(--border) !important;
color: var(--text-pri) !important;
font-family: var(--font-mono) !important;
border-radius: var(--radius) !important;
}
.gr-dropdown select, .gr-dropdown input {
background: var(--bg-panel) !important;
border: 1px solid var(--border) !important;
color: var(--text-pri) !important;
}
.gr-slider input[type=range] {
accent-color: var(--accent2);
}
.gr-chatbot {
background: var(--bg-panel) !important;
border: 1px solid var(--border) !important;
border-radius: var(--radius) !important;
font-family: var(--font-mono) !important;
}
.message.user { background: #1e2230 !important; }
.message.bot { background: #161b27 !important; }
label { color: var(--text-sec) !important; font-size: 0.82rem !important; text-transform: uppercase; letter-spacing: 1px; }
/* ── User badge ── */
#user-badge {
font-size: 0.8rem;
color: var(--success);
padding: 0.3rem 0.6rem;
border: 1px solid var(--success);
border-radius: 20px;
display: inline-block;
}
"""
# ─────────────────────────────────────────────
# BUILD UI
# ─────────────────────────────────────────────
with gr.Blocks(
title=APP_TITLE,
css=CUSTOM_CSS,
theme=gr.themes.Base(
primary_hue="cyan",
secondary_hue="violet",
neutral_hue="slate",
font=[gr.themes.GoogleFont("Syne"), "sans-serif"],
font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "monospace"],
),
) as demo:
# ── Hidden state ──
oauth_profile_state = gr.State(None)
# ══════════════════════════════════════════
# LOGIN GATE β€” shown until authenticated
# ══════════════════════════════════════════
with gr.Column(visible=True, elem_id="login-gate") as login_gate:
gr.HTML("""
<div class="logo-title">SendoX β€” Sync</div>
<div class="logo-sub">AI Developer Workspace</div>
<p style="color:#7a8099;max-width:400px;text-align:center;margin-bottom:2rem;">
A professional IDE-style AI coding assistant powered by Hugging Face Inference.
Sign in with your HF account to continue.
</p>
""")
login_btn = gr.LoginButton(size="lg")
# ══════════════════════════════════════════
# IDE WORKSPACE β€” shown after auth
# ══════════════════════════════════════════
with gr.Row(visible=False, elem_id="workspace") as workspace:
# ── Sidebar ──
with gr.Column(scale=0, min_width=260, elem_id="sidebar"):
gr.HTML('<div class="sidebar-header">⚑ SendoX</div>')
user_badge = gr.HTML('<span id="user-badge">Not logged in</span>')
model_selector = gr.Dropdown(
label="Model",
choices=FALLBACK_MODELS,
value=DEFAULT_MODEL,
interactive=True,
)
system_prompt_box = gr.Textbox(
label="System Prompt",
value=DEFAULT_SYSTEM_PROMPT,
lines=5,
placeholder="Define the AI's behaviour...",
)
temperature_slider = gr.Slider(
label="Temperature",
minimum=0.01,
maximum=1.5,
value=0.7,
step=0.05,
)
gr.HTML("<hr style='border-color:#2a2f3d;margin:0.5rem 0;'>")
logout_btn = gr.LogoutButton(size="sm")
# ── Main Chat Panel ──
with gr.Column(scale=1, elem_id="main-panel"):
chat_interface = gr.ChatInterface(
fn=chat_with_model,
additional_inputs=[
system_prompt_box,
model_selector,
temperature_slider,
oauth_profile_state,
],
chatbot=gr.Chatbot(
label="SendoX Chat",
height=600,
show_copy_button=True,
placeholder="<div style='text-align:center;color:#7a8099;padding:3rem;'>Start a conversation with SendoX...</div>",
),
textbox=gr.Textbox(
placeholder="Ask anything β€” code, debug, explain...",
container=False,
),
submit_btn="Send β†—",
retry_btn="β†Ί Retry",
undo_btn="↩ Undo",
clear_btn="βœ• Clear",
)
# ══════════════════════════════════════════
# AUTH LOGIC β€” the robust lock-fix
# ══════════════════════════════════════════
def handle_auth_on_load(request: gr.Request, profile: gr.OAuthProfile | None):
"""
Called on every page load (including after OAuth redirect).
If profile is not None β†’ user is authenticated β†’ show workspace.
"""
if profile is not None:
username = profile.username or "User"
# Fetch user models for the dropdown
user_models = get_user_models(profile)
badge_html = f'<span id="user-badge">βœ“ {username}</span>'
return (
gr.update(visible=False), # hide login_gate
gr.update(visible=True), # show workspace
gr.update(choices=user_models, value=user_models[0]), # model_selector
badge_html, # user_badge
profile, # oauth_profile_state
)
else:
return (
gr.update(visible=True), # show login_gate
gr.update(visible=False), # hide workspace
gr.update(choices=FALLBACK_MODELS, value=DEFAULT_MODEL),
'<span id="user-badge">Not logged in</span>',
None,
)
# demo.load fires on every page load β€” this is the key fix
demo.load(
fn=handle_auth_on_load,
inputs=None,
outputs=[login_gate, workspace, model_selector, user_badge, oauth_profile_state],
)
# ─────────────────────────────────────────────
# LAUNCH
# ─────────────────────────────────────────────
if __name__ == "__main__":
demo.launch(show_error=True)