MaindAI / app.py
Teotonix's picture
Update app.py
32a4a3b verified
import os
import gradio as gr
import torch
from openai import OpenAI
from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
# ----------------- CHAT (HF Router) -----------------
HF_TOKEN = os.getenv("HF_TOKEN")
client = OpenAI(
base_url="https://router.huggingface.co/v1",
api_key=HF_TOKEN,
)
CHAT_MODEL = "HuggingFaceTB/SmolLM3-3B:hf-inference"
SYSTEM = "Sen MAIND AI'sin. Kısa, net ve yardımcı cevap ver."
def llm_reply(user_text: str) -> str:
resp = client.chat.completions.create(
model=CHAT_MODEL,
messages=[
{"role": "system", "content": SYSTEM},
{"role": "user", "content": user_text},
],
temperature=0.7,
max_tokens=300,
)
return (resp.choices[0].message.content or "").strip()
def chat_fn(message, history):
history = history or []
history.append({"role": "user", "content": message})
try:
reply = llm_reply(message)
except Exception as e:
reply = f"⚠️ Chat hata: {e}"
history.append({"role": "assistant", "content": reply})
return history, ""
# ----------------- IMAGE (LOCAL in Space, CPU) -----------------
IMG_MODEL = "segmind/tiny-sd" # küçük ve Space için uygun :contentReference[oaicite:1]{index=1}
img_pipe = None
def get_pipe():
global img_pipe
if img_pipe is None:
pipe = StableDiffusionPipeline.from_pretrained(
IMG_MODEL,
torch_dtype=torch.float32,
)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.enable_attention_slicing() # RAM azaltır
pipe = pipe.to("cpu")
img_pipe = pipe
return img_pipe
def gen_image(prompt: str, steps: int, size: int):
if not prompt or not prompt.strip():
raise gr.Error("Prompt boş.")
pipe = get_pipe()
with torch.inference_mode():
out = pipe(
prompt=prompt,
num_inference_steps=int(steps),
guidance_scale=6.5,
height=int(size),
width=int(size),
)
return out.images[0]
# ----------------- UI (mavi neon) -----------------
CSS = """
:root{--bg:#050b1a;--panel:#0a1636;--stroke:rgba(56,189,248,.25);--txt:#e6f1ff;--muted:#93c5fd;}
body,.gradio-container{background:radial-gradient(1200px 800px at 20% 0%, rgba(56,189,248,.18), transparent 55%),var(--bg)!important;color:var(--txt)!important;}
#wrap{max-width:1100px;margin:0 auto;}
#topbar{display:flex;align-items:center;gap:14px;padding:14px 16px;background:rgba(10,22,54,.75);border:1px solid var(--stroke);border-radius:16px;}
#card{margin-top:14px;padding:14px;background:rgba(10,22,54,.55);border:1px solid var(--stroke);border-radius:18px;}
button{border-radius:14px!important;}
textarea,input{background:rgba(5,11,26,.55)!important;border:1px solid rgba(56,189,248,.22)!important;color:var(--txt)!important;}
"""
with gr.Blocks(css=CSS, title="MaindAI") as demo:
with gr.Column(elem_id="wrap"):
with gr.Row(elem_id="topbar"):
gr.Image("logo.png", show_label=False, height=58, width=58, container=False)
gr.Markdown("## 💙 MAIND AI\n<small style='color:#93c5fd'>Chat + Görsel (HF Space CPU)</small>")
with gr.Row(elem_id="card"):
# SOL: Chat
with gr.Column(scale=6):
gr.Markdown("### 💬 Chat")
chatbot = gr.Chatbot(value=[], height=420)
msg = gr.Textbox(placeholder="Bir şey sor…", show_label=False)
send = gr.Button("Gönder")
send.click(chat_fn, [msg, chatbot], [chatbot, msg])
msg.submit(chat_fn, [msg, chatbot], [chatbot, msg])
# SAĞ: Image
with gr.Column(scale=5):
gr.Markdown("### 🎨 Görsel Üret")
img_prompt = gr.Textbox(placeholder="Örn: neon mavi cyberpunk şehir, yağmur, gece", show_label=False)
with gr.Row():
steps = gr.Slider(2, 12, value=6, step=1, label="Steps (hız)")
size = gr.Slider(256, 512, value=384, step=64, label="Boyut")
img_btn = gr.Button("Görsel Oluştur")
img_out = gr.Image(height=420)
img_btn.click(gen_image, [img_prompt, steps, size], img_out)
demo.launch()