SiddhJagani commited on
Commit
8bbde7f
·
verified ·
1 Parent(s): 1b450b4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, json, httpx, gradio as gr, uvicorn
2
+ from fastapi import FastAPI, Request, Header, HTTPException
3
+ from fastapi.responses import JSONResponse
4
+
5
+ # ---------------------------------------------------------------------
6
+ # Configuration
7
+ # ---------------------------------------------------------------------
8
+ BYTEZ_CHAT_URL = "https://api.bytez.com/models/v2/openai/v1/chat/completions"
9
+ BYTEZ_MODELS_URL = "https://api.bytez.com/models/v2/list/models"
10
+ BYTEZ_AUTH = os.getenv("BYTEZ_API_KEY")
11
+ LOCAL_API_KEY = os.getenv("LOCAL_API_KEY")
12
+
13
+ # ---------------------------------------------------------------------
14
+ # FastAPI backend
15
+ # ---------------------------------------------------------------------
16
+ api = FastAPI(title="Bytez → OpenAI Proxy")
17
+
18
+ def check_key(auth: str | None):
19
+ if not auth or not auth.startswith("Bearer "):
20
+ raise HTTPException(status_code=401, detail="Missing or invalid API key")
21
+ user_key = auth.split("Bearer ")[1].strip()
22
+ if LOCAL_API_KEY and user_key != LOCAL_API_KEY:
23
+ raise HTTPException(status_code=403, detail="Unauthorized API key")
24
+
25
+ # ---------------------------------------------------------------------
26
+ # Root / health
27
+ # ---------------------------------------------------------------------
28
+ @api.get("/")
29
+ def root():
30
+ return {"status": "ok", "message": "Bytez proxy running"}
31
+
32
+ # ---------------------------------------------------------------------
33
+ # /v1/models → must look OpenAI-style
34
+ # ---------------------------------------------------------------------
35
+ @api.get("/v1/models")
36
+ async def models(authorization: str = Header(None)):
37
+ check_key(authorization)
38
+ if not BYTEZ_AUTH:
39
+ raise HTTPException(status_code=500, detail="Server BYTEZ_API_KEY not configured")
40
+
41
+ async with httpx.AsyncClient(timeout=30) as c:
42
+ r = await c.get(BYTEZ_MODELS_URL, headers={"Authorization": BYTEZ_AUTH})
43
+ try:
44
+ data = r.json()
45
+ except json.JSONDecodeError:
46
+ raise HTTPException(status_code=502, detail="Upstream returned invalid JSON")
47
+
48
+ # Transform Bytez format → OpenAI format
49
+ models_list = [
50
+ {"id": m.get("id") or m.get("name"), "object": "model"}
51
+ for m in (data if isinstance(data, list) else data.get("data", []))
52
+ ]
53
+ return JSONResponse(
54
+ {"object": "list", "data": models_list},
55
+ headers={"Access-Control-Allow-Origin": "*"}
56
+ )
57
+
58
+ # ---------------------------------------------------------------------
59
+ # /v1/chat/completions
60
+ # ---------------------------------------------------------------------
61
+ @api.post("/v1/chat/completions")
62
+ async def chat(request: Request, authorization: str = Header(None)):
63
+ check_key(authorization)
64
+ if not BYTEZ_AUTH:
65
+ raise HTTPException(status_code=500, detail="Server BYTEZ_API_KEY not configured")
66
+
67
+ payload = await request.json()
68
+ headers = {
69
+ "Authorization": BYTEZ_AUTH,
70
+ "Content-Type": "application/json",
71
+ }
72
+ async with httpx.AsyncClient(timeout=120) as c:
73
+ r = await c.post(BYTEZ_CHAT_URL, headers=headers, json=payload)
74
+ try:
75
+ data = r.json()
76
+ except json.JSONDecodeError:
77
+ raise HTTPException(status_code=502, detail="Upstream returned invalid JSON")
78
+
79
+ return JSONResponse(data, headers={"Access-Control-Allow-Origin": "*"})
80
+
81
+ # ---------------------------------------------------------------------
82
+ # Minimal Gradio UI (to make HF Space start)
83
+ # ---------------------------------------------------------------------
84
+ with gr.Blocks() as ui:
85
+ gr.Markdown("### ✅ Jwero Bytez → OpenAI Proxy\n"
86
+ "Endpoints: `/v1/models`, `/v1/chat/completions`")
87
+
88
+ demo = gr.mount_gradio_app(api, ui, path="/")
89
+
90
+ # local only
91
+ if __name__ == "__main__":
92
+ uvicorn.run(demo, host="0.0.0.0", port=7860)