Teotonix commited on
Commit
b15336a
·
verified ·
1 Parent(s): 4625971

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -23
app.py CHANGED
@@ -2,45 +2,44 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
 
5
- # Secret olarak eklediğin Token'ı al
6
  token = os.getenv("HF_TOKEN")
7
  client = InferenceClient(token=token)
8
 
9
  def chat_fn(message, history):
10
  try:
11
- # Chat arayüzünü bozmamak için basit bir prompt yapısı kuruyoruz
12
- prompt = f"User: {message}\nAssistant:"
13
-
14
- # En kararlı çalışan Llama-3 modelini doğrudan metin üretme ile çağırıyoruz
15
- response = client.text_generation(
16
- model="meta-llama/Meta-Llama-3-8B-Instruct",
17
- prompt=prompt,
18
- max_new_tokens=500,
19
- stream=False # Hata riskini azaltmak için stream'i kapattık
20
- )
21
- return response
 
 
22
  except Exception as e:
23
- return f"Hata: {str(e)}"
24
 
25
  def image_fn(prompt):
26
  if not prompt: return None
27
  try:
28
- # Görsel oluşturma
29
  return client.text_to_image(prompt, model="stabilityai/sdxl-turbo")
30
  except Exception as e:
 
31
  return None
32
 
33
- # Arayüz Ayarları
34
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
35
- gr.Markdown("<h1 style='text-align: center; color: #38bdf8;'>🚀 Maind AI Studio</h1>")
36
 
37
  with gr.Tabs():
38
  with gr.TabItem("💬 Sohbet"):
39
- # ChatInterface bazen karmaşık hata verebilir, manuel kurguluyoruz
40
- msg = gr.Textbox(label="Mesajınızı yazın")
41
- out_txt = gr.Textbox(label="Yanıt", interactive=False)
42
- btn_txt = gr.Button("Gönder", variant="primary")
43
- btn_txt.click(chat_fn, msg, out_txt)
44
 
45
  with gr.TabItem("🎨 Görsel"):
46
  with gr.Row():
@@ -51,4 +50,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
51
  out_img = gr.Image()
52
  btn_img.click(image_fn, inp, out_img)
53
 
54
- demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  import os
4
 
5
+ # Token'ı al
6
  token = os.getenv("HF_TOKEN")
7
  client = InferenceClient(token=token)
8
 
9
  def chat_fn(message, history):
10
  try:
11
+ # Gemma-2-9b, yeni sistemde en kararlı 'conversational' modeldir
12
+ response = ""
13
+ for msg in client.chat_completion(
14
+ model="google/gemma-2-9b-it",
15
+ messages=[{"role": "user", "content": message}],
16
+ max_tokens=500,
17
+ stream=True,
18
+ ):
19
+ if msg.choices and len(msg.choices) > 0:
20
+ token_str = msg.choices[0].delta.content
21
+ if token_str:
22
+ response += token_str
23
+ yield response
24
  except Exception as e:
25
+ yield f"Hata: {str(e)}"
26
 
27
  def image_fn(prompt):
28
  if not prompt: return None
29
  try:
30
+ # Görsel üretme (SDXL Turbo)
31
  return client.text_to_image(prompt, model="stabilityai/sdxl-turbo")
32
  except Exception as e:
33
+ print(f"Görsel Hatası: {e}")
34
  return None
35
 
36
+ # Arayüz
37
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
38
+ gr.Markdown("<h1 style='text-align: center;'>🚀 Maind AI Studio</h1>")
39
 
40
  with gr.Tabs():
41
  with gr.TabItem("💬 Sohbet"):
42
+ gr.ChatInterface(fn=chat_fn)
 
 
 
 
43
 
44
  with gr.TabItem("🎨 Görsel"):
45
  with gr.Row():
 
50
  out_img = gr.Image()
51
  btn_img.click(image_fn, inp, out_img)
52
 
53
+ demo.queue().launch()