Raiff1982 commited on
Commit
0beba87
Β·
verified Β·
1 Parent(s): ff09025

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +145 -193
app.py CHANGED
@@ -1,9 +1,14 @@
1
  import json
2
  import asyncio
3
  import os
 
 
 
 
4
  from fastapi import FastAPI, Request
5
  from fastapi.middleware.cors import CORSMiddleware
6
- from fastapi.responses import StreamingResponse, HTMLResponse
 
7
  from huggingface_hub import InferenceClient
8
 
9
  # ─────────────────────────────────────────────
@@ -14,203 +19,28 @@ MODEL_ID = "meta-llama/Llama-3.1-8B-Instruct"
14
  HF_TOKEN = os.environ.get("HF_TOKEN")
15
 
16
  if not HF_TOKEN:
17
- raise RuntimeError("HF_TOKEN missing. Set in HF Space secrets.")
18
 
19
- # βœ… FIX: bind model correctly (this was your silent bug)
20
  client = InferenceClient(model=MODEL_ID, token=HF_TOKEN)
21
 
22
  # ─────────────────────────────────────────────
23
- # FASTAPI INIT
24
  # ─────────────────────────────────────────────
25
 
26
- app = FastAPI()
27
 
28
- app.add_middleware(
29
  CORSMiddleware,
30
  allow_origins=["*"],
31
  allow_methods=["*"],
32
  allow_headers=["*"],
33
  )
34
 
35
- # ─────────────────────────────────────────────
36
- # ROOT (CODETTE UI)
37
- # ─────────────────────────────────────────────
38
-
39
- @app.get("/", response_class=HTMLResponse)
40
- async def root():
41
- return """
42
- <!DOCTYPE html>
43
- <html>
44
- <head>
45
- <title>Codette</title>
46
- <style>
47
- body {
48
- margin:0;
49
- font-family: Inter, sans-serif;
50
- background: radial-gradient(circle at top, #14142b, #0b0b17);
51
- color:#e5e7eb;
52
- }
53
-
54
- .container {
55
- display:flex;
56
- height:100vh;
57
- }
58
-
59
- .left {
60
- flex:3;
61
- display:flex;
62
- flex-direction:column;
63
- padding:15px;
64
- }
65
-
66
- .right {
67
- flex:1;
68
- padding:15px;
69
- }
70
-
71
- .chat {
72
- flex:1;
73
- overflow:auto;
74
- background: rgba(20,20,40,0.7);
75
- border-radius:10px;
76
- padding:10px;
77
- }
78
-
79
- .input {
80
- display:flex;
81
- margin-top:10px;
82
- }
83
-
84
- input {
85
- flex:1;
86
- padding:10px;
87
- background:#0f0f1e;
88
- border:1px solid #06b6d4;
89
- color:white;
90
- }
91
-
92
- button {
93
- padding:10px;
94
- background: linear-gradient(135deg,#a855f7,#06b6d4);
95
- border:none;
96
- color:white;
97
- cursor:pointer;
98
- }
99
-
100
- .metric {
101
- background: rgba(20,20,40,0.7);
102
- padding:10px;
103
- margin-bottom:10px;
104
- border-radius:8px;
105
- font-family: monospace;
106
- }
107
- </style>
108
- </head>
109
-
110
- <body>
111
- <div class="container">
112
-
113
- <div class="left">
114
- <h2>CODETTE</h2>
115
-
116
- <div id="chat" class="chat"></div>
117
-
118
- <div class="input">
119
- <input id="msg" placeholder="Ask Codette..." />
120
- <button onclick="send()">β–Ά</button>
121
- </div>
122
- </div>
123
-
124
- <div class="right">
125
- <div class="metric" id="metrics">
126
- Ξ“: 0.0000<br>
127
- Ξ·: 0.0000<br>
128
- Risk: LOW
129
- </div>
130
- </div>
131
-
132
- </div>
133
-
134
- <script>
135
- async function send() {
136
- const input = document.getElementById("msg");
137
- const chat = document.getElementById("chat");
138
-
139
- const userText = input.value;
140
- if (!userText) return;
141
-
142
- chat.innerHTML += "<div><b>You:</b> " + userText + "</div>";
143
-
144
- input.value = "";
145
-
146
- const res = await fetch("/api/chat", {
147
- method:"POST",
148
- headers:{"Content-Type":"application/json"},
149
- body: JSON.stringify({
150
- messages:[{role:"user", content:userText}]
151
- })
152
- });
153
-
154
- const reader = res.body.getReader();
155
- const decoder = new TextDecoder();
156
-
157
- let botDiv = document.createElement("div");
158
- botDiv.innerHTML = "<b>Codette:</b> ";
159
- chat.appendChild(botDiv);
160
-
161
- while (true) {
162
- const {done, value} = await reader.read();
163
- if (done) break;
164
-
165
- const chunk = decoder.decode(value);
166
- const lines = chunk.split("\\n");
167
-
168
- for (let line of lines) {
169
- if (!line.trim()) continue;
170
- try {
171
- const data = JSON.parse(line);
172
- if (data.message?.content) {
173
- botDiv.innerHTML += data.message.content;
174
- }
175
- } catch {}
176
- }
177
- }
178
-
179
- chat.scrollTop = chat.scrollHeight;
180
-
181
- // fake metrics update (you can wire real later)
182
- document.getElementById("metrics").innerHTML =
183
- "Ξ“: " + (0.8 + Math.random()*0.2).toFixed(4) + "<br>" +
184
- "Ξ·: " + (0.6 + Math.random()*0.3).toFixed(4) + "<br>" +
185
- "Risk: LOW";
186
- }
187
- </script>
188
-
189
- </body>
190
- </html>
191
- """
192
-
193
- # ─────────────────────────────────────────────
194
- # DEBUG
195
- # ─────────────────────────────────────────────
196
-
197
- @app.get("/test")
198
- async def test():
199
- try:
200
- res = client.chat.completions.create(
201
- model=MODEL_ID,
202
- messages=[{"role": "user", "content": "Say hello"}],
203
- max_tokens=10,
204
- )
205
- return {"status": "ok"}
206
- except Exception as e:
207
- return {"status": "error", "error": str(e)}
208
-
209
  # ─────────────────────────────────────────────
210
  # CHAT ENDPOINT (UNCHANGED CORE)
211
  # ─────────────────────────────────────────────
212
 
213
- @app.post("/api/chat")
214
  async def chat(request: Request):
215
  body = await request.json()
216
  messages = body.get("messages", [])
@@ -225,21 +55,16 @@ async def chat(request: Request):
225
  stream=True,
226
  )
227
 
228
- yield json.dumps({
229
- "message": {"role": "assistant", "content": ""},
230
- "done": False
231
- }) + "\n"
232
 
233
  for chunk in stream:
234
  try:
235
  delta = chunk.choices[0].delta
236
  if delta and delta.content:
 
 
237
  yield json.dumps({
238
- "message": {
239
- "role": "assistant",
240
- "content": delta.content
241
- },
242
- "done": False
243
  }) + "\n"
244
 
245
  await asyncio.sleep(0.01)
@@ -248,18 +73,145 @@ async def chat(request: Request):
248
  continue
249
 
250
  yield json.dumps({
251
- "message": {"role": "assistant", "content": ""},
252
- "done": True
253
  }) + "\n"
254
 
255
  except Exception as e:
256
  yield json.dumps({
257
- "message": {"role": "assistant", "content": f"Error: {str(e)}"},
258
  "done": True
259
  }) + "\n"
260
 
261
  return StreamingResponse(event_stream(), media_type="application/x-ndjson")
262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  # ─────────────────────────────────────────────
264
  # RUN
265
  # ─────────────────────────────────────────────
 
1
  import json
2
  import asyncio
3
  import os
4
+ import numpy as np
5
+ import gradio as gr
6
+ import plotly.graph_objects as go
7
+
8
  from fastapi import FastAPI, Request
9
  from fastapi.middleware.cors import CORSMiddleware
10
+ from fastapi.responses import StreamingResponse
11
+
12
  from huggingface_hub import InferenceClient
13
 
14
  # ─────────────────────────────────────────────
 
19
  HF_TOKEN = os.environ.get("HF_TOKEN")
20
 
21
  if not HF_TOKEN:
22
+ raise RuntimeError("HF_TOKEN missing.")
23
 
 
24
  client = InferenceClient(model=MODEL_ID, token=HF_TOKEN)
25
 
26
  # ─────────────────────────────────────────────
27
+ # FASTAPI
28
  # ─────────────────────────────────────────────
29
 
30
+ api = FastAPI()
31
 
32
+ api.add_middleware(
33
  CORSMiddleware,
34
  allow_origins=["*"],
35
  allow_methods=["*"],
36
  allow_headers=["*"],
37
  )
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  # ─────────────────────────────────────────────
40
  # CHAT ENDPOINT (UNCHANGED CORE)
41
  # ─────────────────────────────────────────────
42
 
43
+ @api.post("/api/chat")
44
  async def chat(request: Request):
45
  body = await request.json()
46
  messages = body.get("messages", [])
 
55
  stream=True,
56
  )
57
 
58
+ full_text = ""
 
 
 
59
 
60
  for chunk in stream:
61
  try:
62
  delta = chunk.choices[0].delta
63
  if delta and delta.content:
64
+ full_text += delta.content
65
+
66
  yield json.dumps({
67
+ "content": delta.content
 
 
 
 
68
  }) + "\n"
69
 
70
  await asyncio.sleep(0.01)
 
73
  continue
74
 
75
  yield json.dumps({
76
+ "done": True,
77
+ "full": full_text
78
  }) + "\n"
79
 
80
  except Exception as e:
81
  yield json.dumps({
82
+ "error": str(e),
83
  "done": True
84
  }) + "\n"
85
 
86
  return StreamingResponse(event_stream(), media_type="application/x-ndjson")
87
 
88
+ # ─────────────────────────────────────────────
89
+ # CODETTE UI (GRADIO)
90
+ # ─────────────────────────────────────────────
91
+
92
+ CUSTOM_CSS = """
93
+ body {
94
+ background: radial-gradient(circle at top, #14142b, #0b0b17);
95
+ color: #e5e7eb;
96
+ }
97
+
98
+ .metric-box {
99
+ background: rgba(20,20,40,0.7);
100
+ border: 1px solid rgba(168,85,247,0.3);
101
+ padding: 10px;
102
+ border-radius: 10px;
103
+ font-family: monospace;
104
+ margin-bottom: 10px;
105
+ }
106
+
107
+ button {
108
+ background: linear-gradient(135deg,#a855f7,#06b6d4) !important;
109
+ border: none !important;
110
+ }
111
+ """
112
+
113
+ def call_backend(message):
114
+
115
+ import requests
116
+
117
+ url = "http://localhost:7860/api/chat"
118
+
119
+ response = requests.post(
120
+ url,
121
+ json={"messages": [{"role": "user", "content": message}]},
122
+ stream=True,
123
+ )
124
+
125
+ full = ""
126
+
127
+ for line in response.iter_lines():
128
+ if not line:
129
+ continue
130
+ data = json.loads(line.decode())
131
+
132
+ if "content" in data:
133
+ full += data["content"]
134
+
135
+ return full
136
+
137
+ def process(msg, history):
138
+
139
+ if not msg.strip():
140
+ return history, "", "", None
141
+
142
+ history.append({"role": "user", "content": msg})
143
+
144
+ response = call_backend(msg)
145
+
146
+ history.append({"role": "assistant", "content": response})
147
+
148
+ # simple metrics (can upgrade later)
149
+ coherence = min(0.99, 0.6 + len(msg)/200)
150
+ eta = 0.7
151
+
152
+ metrics_html = f"""
153
+ <div class="metric-box">
154
+ Ξ“ Phase Coherence: {coherence:.4f}<br>
155
+ Ξ· Ethical Alignment: {eta:.4f}<br>
156
+ Risk: LOW
157
+ </div>
158
+ """
159
+
160
+ fig = go.Figure()
161
+ fig.add_trace(go.Scatter(
162
+ x=[0,1,0],
163
+ y=[0,1,1],
164
+ mode='markers+text',
165
+ text=["newton","empathy","quantum"]
166
+ ))
167
+
168
+ return history, "", metrics_html, fig
169
+
170
+ def create_ui():
171
+
172
+ with gr.Blocks(title="Codette") as demo:
173
+
174
+ gr.Markdown("# CODETTE")
175
+
176
+ with gr.Row():
177
+
178
+ with gr.Column(scale=3):
179
+ chat = gr.Chatbot(height=520)
180
+
181
+ msg = gr.Textbox(
182
+ lines=2,
183
+ placeholder="Ask Codette..."
184
+ )
185
+
186
+ send = gr.Button("β–Ά")
187
+
188
+ with gr.Column(scale=2):
189
+ metrics = gr.HTML()
190
+ graph = gr.Plot()
191
+
192
+ def run(m, h):
193
+ return process(m, h)
194
+
195
+ send.click(
196
+ run,
197
+ [msg, chat],
198
+ [chat, msg, metrics, graph]
199
+ )
200
+
201
+ msg.submit(
202
+ run,
203
+ [msg, chat],
204
+ [chat, msg, metrics, graph]
205
+ )
206
+
207
+ return demo
208
+
209
+ # ─────────────────────────────────────────────
210
+ # COMBINE (IMPORTANT PART)
211
+ # ─────────────────────────────────────────────
212
+
213
+ app = gr.mount_gradio_app(api, create_ui(), path="/")
214
+
215
  # ─────────────────────────────────────────────
216
  # RUN
217
  # ─────────────────────────────────────────────