File size: 4,340 Bytes
d69a7a4 aa05679 32a4a3b aa05679 d69a7a4 32a4a3b d69a7a4 aa05679 2694a30 ab5a2d8 32a4a3b 86117b0 aa05679 86117b0 32a4a3b aa05679 2694a30 4c498b9 2694a30 86117b0 0ebee1b 014c233 4c498b9 938f40b aa05679 32a4a3b aa05679 3891dcb 32a4a3b 3891dcb 32a4a3b 3891dcb 32a4a3b 3891dcb 32a4a3b 3891dcb 32a4a3b 3891dcb 32a4a3b 3891dcb 32a4a3b 839d3a0 2d33c7a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 | import os
import gradio as gr
import torch
from openai import OpenAI
from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
# ----------------- CHAT (HF Router) -----------------
HF_TOKEN = os.getenv("HF_TOKEN")
client = OpenAI(
base_url="https://router.huggingface.co/v1",
api_key=HF_TOKEN,
)
CHAT_MODEL = "HuggingFaceTB/SmolLM3-3B:hf-inference"
SYSTEM = "Sen MAIND AI'sin. Kısa, net ve yardımcı cevap ver."
def llm_reply(user_text: str) -> str:
resp = client.chat.completions.create(
model=CHAT_MODEL,
messages=[
{"role": "system", "content": SYSTEM},
{"role": "user", "content": user_text},
],
temperature=0.7,
max_tokens=300,
)
return (resp.choices[0].message.content or "").strip()
def chat_fn(message, history):
history = history or []
history.append({"role": "user", "content": message})
try:
reply = llm_reply(message)
except Exception as e:
reply = f"⚠️ Chat hata: {e}"
history.append({"role": "assistant", "content": reply})
return history, ""
# ----------------- IMAGE (LOCAL in Space, CPU) -----------------
IMG_MODEL = "segmind/tiny-sd" # küçük ve Space için uygun :contentReference[oaicite:1]{index=1}
img_pipe = None
def get_pipe():
global img_pipe
if img_pipe is None:
pipe = StableDiffusionPipeline.from_pretrained(
IMG_MODEL,
torch_dtype=torch.float32,
)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.enable_attention_slicing() # RAM azaltır
pipe = pipe.to("cpu")
img_pipe = pipe
return img_pipe
def gen_image(prompt: str, steps: int, size: int):
if not prompt or not prompt.strip():
raise gr.Error("Prompt boş.")
pipe = get_pipe()
with torch.inference_mode():
out = pipe(
prompt=prompt,
num_inference_steps=int(steps),
guidance_scale=6.5,
height=int(size),
width=int(size),
)
return out.images[0]
# ----------------- UI (mavi neon) -----------------
CSS = """
:root{--bg:#050b1a;--panel:#0a1636;--stroke:rgba(56,189,248,.25);--txt:#e6f1ff;--muted:#93c5fd;}
body,.gradio-container{background:radial-gradient(1200px 800px at 20% 0%, rgba(56,189,248,.18), transparent 55%),var(--bg)!important;color:var(--txt)!important;}
#wrap{max-width:1100px;margin:0 auto;}
#topbar{display:flex;align-items:center;gap:14px;padding:14px 16px;background:rgba(10,22,54,.75);border:1px solid var(--stroke);border-radius:16px;}
#card{margin-top:14px;padding:14px;background:rgba(10,22,54,.55);border:1px solid var(--stroke);border-radius:18px;}
button{border-radius:14px!important;}
textarea,input{background:rgba(5,11,26,.55)!important;border:1px solid rgba(56,189,248,.22)!important;color:var(--txt)!important;}
"""
with gr.Blocks(css=CSS, title="MaindAI") as demo:
with gr.Column(elem_id="wrap"):
with gr.Row(elem_id="topbar"):
gr.Image("logo.png", show_label=False, height=58, width=58, container=False)
gr.Markdown("## 💙 MAIND AI\n<small style='color:#93c5fd'>Chat + Görsel (HF Space CPU)</small>")
with gr.Row(elem_id="card"):
# SOL: Chat
with gr.Column(scale=6):
gr.Markdown("### 💬 Chat")
chatbot = gr.Chatbot(value=[], height=420)
msg = gr.Textbox(placeholder="Bir şey sor…", show_label=False)
send = gr.Button("Gönder")
send.click(chat_fn, [msg, chatbot], [chatbot, msg])
msg.submit(chat_fn, [msg, chatbot], [chatbot, msg])
# SAĞ: Image
with gr.Column(scale=5):
gr.Markdown("### 🎨 Görsel Üret")
img_prompt = gr.Textbox(placeholder="Örn: neon mavi cyberpunk şehir, yağmur, gece", show_label=False)
with gr.Row():
steps = gr.Slider(2, 12, value=6, step=1, label="Steps (hız)")
size = gr.Slider(256, 512, value=384, step=64, label="Boyut")
img_btn = gr.Button("Görsel Oluştur")
img_out = gr.Image(height=420)
img_btn.click(gen_image, [img_prompt, steps, size], img_out)
demo.launch()
|