Zenkad commited on
Commit
de93bde
·
verified ·
1 Parent(s): e9c6398

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -73
app.py CHANGED
@@ -1,16 +1,8 @@
1
- import os
2
- import time
3
- from typing import List, Dict, Any
4
-
5
  import gradio as gr
6
- from fastapi import FastAPI
7
- from fastapi.middleware.cors import CORSMiddleware
8
- from pydantic import BaseModel
9
  from huggingface_hub import InferenceClient
 
10
 
11
- # =============================
12
- # MODEL
13
- # =============================
14
  REPO_ID = "Qwen/Qwen2.5-7B-Instruct"
15
  HF_TOKEN = os.getenv("HF_TOKEN")
16
 
@@ -19,78 +11,36 @@ client = InferenceClient(
19
  token=HF_TOKEN
20
  )
21
 
22
- # =============================
23
- # FASTAPI
24
- # =============================
25
- app = FastAPI()
26
-
27
- app.add_middleware(
28
- CORSMiddleware,
29
- allow_origins=["*"],
30
- allow_methods=["*"],
31
- allow_headers=["*"],
32
- )
33
-
34
- # =============================
35
- # REQUEST MODEL
36
- # =============================
37
- class ChatRequest(BaseModel):
38
- message: str
39
- history: List[Any] | None = []
40
 
41
- # =============================
42
- # API ENDPOINT
43
- # =============================
44
- @app.post("/api/chat")
45
- async def chat(req: ChatRequest):
46
- messages = [
47
- {
48
- "role": "system",
49
- "content": "Sen ZenkaMind isimli bir yapay zekasın. Sadece Türkçe konuş."
50
- }
51
- ]
52
 
53
- for h in req.history or []:
54
- if isinstance(h, list) and len(h) == 2:
55
- messages.append({"role": "user", "content": h[0]})
56
- messages.append({"role": "assistant", "content": h[1]})
57
 
58
- messages.append({"role": "user", "content": req.message})
59
 
60
- try:
61
- out = client.chat_completion(
62
  messages=messages,
63
  max_tokens=300,
64
- temperature=0.7,
65
  )
66
 
67
- reply = out.choices[0].message.content
68
- return {"response": reply}
69
 
70
  except Exception as e:
71
- return {"response": f"Model hatası: {str(e)}"}
72
 
73
- # =============================
74
- # GRADIO UI (HF İÇİN ŞART)
75
- # =============================
76
- def demo_chat(msg, history):
77
- history = history or []
78
- r = client.chat_completion(
79
- messages=[
80
- {"role": "system", "content": "Türkçe konuş."},
81
- {"role": "user", "content": msg},
82
- ],
83
- max_tokens=200,
84
- )
85
- answer = r.choices[0].message.content
86
- history.append((msg, answer))
87
- return history, history
88
-
89
- with gr.Blocks() as demo:
90
- gr.Markdown("# 🤖 ZenkaMind – Test Sunucusu")
91
- chatbot = gr.Chatbot()
92
- inp = gr.Textbox(placeholder="Mesaj yaz...")
93
- inp.submit(demo_chat, [inp, chatbot], [chatbot, chatbot])
94
 
95
- # ⚠️ HF BURAYI ÇALIŞTIRIR
96
- app = gr.mount_gradio_app(app, demo, path="/")
 
 
 
 
 
1
  import gradio as gr
 
 
 
2
  from huggingface_hub import InferenceClient
3
+ import os
4
 
5
+ # HF model
 
 
6
  REPO_ID = "Qwen/Qwen2.5-7B-Instruct"
7
  HF_TOKEN = os.getenv("HF_TOKEN")
8
 
 
11
  token=HF_TOKEN
12
  )
13
 
14
+ def chat_fn(message, history):
15
+ if not message:
16
+ return "Bir şey yazmadın."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ try:
19
+ messages = [
20
+ {"role": "system", "content": "Sen ZenkaMind isimli, sadece Türkçe konuşan bir asistansın."}
21
+ ]
 
 
 
 
 
 
 
22
 
23
+ for u, a in history:
24
+ messages.append({"role": "user", "content": u})
25
+ messages.append({"role": "assistant", "content": a})
 
26
 
27
+ messages.append({"role": "user", "content": message})
28
 
29
+ output = client.chat_completion(
 
30
  messages=messages,
31
  max_tokens=300,
32
+ temperature=0.7
33
  )
34
 
35
+ return output.choices[0].message.content
 
36
 
37
  except Exception as e:
38
+ return f"Hata oluştu: {str(e)}"
39
 
40
+ demo = gr.ChatInterface(
41
+ fn=chat_fn,
42
+ title="ZenkaMind API Test",
43
+ description="Bu ekran sadece backend test içindir."
44
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ demo.launch()