Zenkad commited on
Commit
6d86c47
·
verified ·
1 Parent(s): de93bde

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -32
app.py CHANGED
@@ -1,46 +1,78 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  import os
 
 
 
 
4
 
5
- # HF model
6
- REPO_ID = "Qwen/Qwen2.5-7B-Instruct"
7
- HF_TOKEN = os.getenv("HF_TOKEN")
 
 
8
 
9
- client = InferenceClient(
10
- model=REPO_ID,
11
- token=HF_TOKEN
12
- )
13
 
14
- def chat_fn(message, history):
15
- if not message:
16
- return "Bir şey yazmadın."
 
17
 
18
- try:
19
- messages = [
20
- {"role": "system", "content": "Sen ZenkaMind isimli, sadece Türkçe konuşan bir asistansın."}
21
- ]
 
 
22
 
23
- for u, a in history:
24
- messages.append({"role": "user", "content": u})
25
- messages.append({"role": "assistant", "content": a})
 
 
26
 
27
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
28
 
29
- output = client.chat_completion(
30
- messages=messages,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  max_tokens=300,
32
- temperature=0.7
33
  )
34
 
35
- return output.choices[0].message.content
 
36
 
37
  except Exception as e:
38
- return f"Hata oluştu: {str(e)}"
39
-
40
- demo = gr.ChatInterface(
41
- fn=chat_fn,
42
- title="ZenkaMind API Test",
43
- description="Bu ekran sadece backend test içindir."
44
- )
45
 
46
- demo.launch()
 
 
 
 
 
 
 
 
1
  import os
2
+ from fastapi import FastAPI
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ from pydantic import BaseModel
5
+ from huggingface_hub import InferenceClient
6
 
7
+ # =========================
8
+ # MODEL (ÜCRETSİZ)
9
+ # =========================
10
+ MODEL_ID = "google/gemma-2b-it"
11
+ HF_TOKEN = os.getenv("HF_TOKEN") # HF ayarlardan ekle (READ yeterli)
12
 
13
+ client = InferenceClient(model=MODEL_ID, token=HF_TOKEN)
 
 
 
14
 
15
+ # =========================
16
+ # FASTAPI
17
+ # =========================
18
+ app = FastAPI(title="ZenkaMind API Test")
19
 
20
+ app.add_middleware(
21
+ CORSMiddleware,
22
+ allow_origins=["*"],
23
+ allow_methods=["*"],
24
+ allow_headers=["*"],
25
+ )
26
 
27
+ # =========================
28
+ # MODELLER
29
+ # =========================
30
+ class ChatRequest(BaseModel):
31
+ message: str
32
 
33
+ # =========================
34
+ # ENDPOINTLER
35
+ # =========================
36
+ @app.get("/")
37
+ def root():
38
+ return {
39
+ "status": "ok",
40
+ "service": "ZenkaMind API Test",
41
+ "model": MODEL_ID
42
+ }
43
 
44
+ @app.get("/health")
45
+ def health():
46
+ return {
47
+ "status": "healthy",
48
+ "model": MODEL_ID,
49
+ "token_loaded": bool(HF_TOKEN)
50
+ }
51
+
52
+ @app.post("/api/chat")
53
+ def chat(req: ChatRequest):
54
+ try:
55
+ completion = client.chat_completion(
56
+ messages=[
57
+ {"role": "system", "content": "Sen ZenkaMind adlı Türkçe konuşan bir yapay zekasın."},
58
+ {"role": "user", "content": req.message},
59
+ ],
60
  max_tokens=300,
61
+ temperature=0.7,
62
  )
63
 
64
+ reply = completion.choices[0].message.content
65
+ return {"response": reply}
66
 
67
  except Exception as e:
68
+ return {
69
+ "response": "Model şu anda yanıt veremedi.",
70
+ "error": str(e)
71
+ }
 
 
 
72
 
73
+ # =========================
74
+ # LOCAL (HF IGNORE EDER AMA KALSIN)
75
+ # =========================
76
+ if __name__ == "__main__":
77
+ import uvicorn
78
+ uvicorn.run(app, host="0.0.0.0", port=7860)