kcrobot25 commited on
Commit
54f9e18
·
verified ·
1 Parent(s): e931ce1
Files changed (1) hide show
  1. app.py +371 -0
app.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # app.py -- KC Robot AI V4.0 (Cloud Brain)
3
+ # Flask server: Chat (HF), TTS, STT, Telegram poller, REST API cho ESP32
4
+ # Setup: set env HF_API_TOKEN, (optional) HF_MODEL, HF_TTS_MODEL, HF_STT_MODEL, TELEGRAM_TOKEN
5
+ # requirements: see requirements.txt
6
+
7
+ import os
8
+ import io
9
+ import time
10
+ import json
11
+ import threading
12
+ import logging
13
+ from typing import Optional, List, Tuple
14
+
15
+ import requests
16
+ from flask import Flask, request, jsonify, send_file, render_template_string
17
+
18
+ logging.basicConfig(level=logging.INFO)
19
+ logger = logging.getLogger("kcrobot.v4")
20
+
21
+ app = Flask(__name__)
22
+
23
+ # ====== Config from env / Secrets ======
24
+ HF_API_TOKEN = os.getenv("HF_API_TOKEN", "")
25
+ HF_MODEL = os.getenv("HF_MODEL", "google/flan-t5-large")
26
+ HF_TTS_MODEL = os.getenv("HF_TTS_MODEL", "facebook/tts_transformer-es-css10")
27
+ HF_STT_MODEL = os.getenv("HF_STT_MODEL", "openai/whisper-small")
28
+ TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN", "")
29
+ PORT = int(os.getenv("PORT", os.getenv("SERVER_PORT", 7860)))
30
+
31
+ if not HF_API_TOKEN:
32
+ logger.warning("HF_API_TOKEN not set. Put HF_API_TOKEN in Secrets.")
33
+
34
+ HF_HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"} if HF_API_TOKEN else {}
35
+
36
+ # ====== In-memory storage (simple) ======
37
+ # conversation: list of (user, bot) pairs
38
+ CONV: List[Tuple[str,str]] = []
39
+ # display_lines for ESP32 OLED (last few lines)
40
+ DISPLAY_LINES: List[str] = []
41
+
42
+ # helper to maintain display buffer
43
+ def push_display(line: str, limit=6):
44
+ global DISPLAY_LINES
45
+ DISPLAY_LINES.append(line)
46
+ if len(DISPLAY_LINES) > limit:
47
+ DISPLAY_LINES = DISPLAY_LINES[-limit:]
48
+
49
+ # ====== HuggingFace helpers (REST inference) ======
50
+ def hf_text_generate(prompt: str, model: Optional[str] = None, max_new_tokens: int = 256, temperature: float = 0.7) -> str:
51
+ model = model or HF_MODEL
52
+ url = f"https://api-inference.huggingface.co/models/{model}"
53
+ payload = {
54
+ "inputs": prompt,
55
+ "parameters": {"max_new_tokens": int(max_new_tokens), "temperature": float(temperature)},
56
+ "options": {"wait_for_model": True}
57
+ }
58
+ r = requests.post(url, headers=HF_HEADERS, json=payload, timeout=120)
59
+ if r.status_code != 200:
60
+ logger.error("HF text gen error %s: %s", r.status_code, r.text[:200])
61
+ raise RuntimeError(f"HF text generation failed: {r.status_code}: {r.text}")
62
+ data = r.json()
63
+ # parse common shapes
64
+ if isinstance(data, list) and len(data) and isinstance(data[0], dict):
65
+ return data[0].get("generated_text", "") or str(data[0])
66
+ if isinstance(data, dict) and "generated_text" in data:
67
+ return data.get("generated_text", "")
68
+ return str(data)
69
+
70
+ def hf_tts_get_mp3(text: str, model: Optional[str] = None) -> bytes:
71
+ model = model or HF_TTS_MODEL
72
+ url = f"https://api-inference.huggingface.co/models/{model}"
73
+ payload = {"inputs": text}
74
+ headers = dict(HF_HEADERS)
75
+ headers["Content-Type"] = "application/json"
76
+ r = requests.post(url, headers=headers, json=payload, stream=True, timeout=120)
77
+ if r.status_code != 200:
78
+ logger.error("HF TTS error %s: %s", r.status_code, r.text[:200])
79
+ raise RuntimeError(f"HF TTS failed: {r.status_code}: {r.text}")
80
+ return r.content
81
+
82
+ def hf_stt_from_bytes(audio_bytes: bytes, model: Optional[str] = None) -> str:
83
+ model = model or HF_STT_MODEL
84
+ url = f"https://api-inference.huggingface.co/models/{model}"
85
+ headers = dict(HF_HEADERS)
86
+ headers["Content-Type"] = "application/octet-stream"
87
+ r = requests.post(url, headers=headers, data=audio_bytes, timeout=180)
88
+ if r.status_code != 200:
89
+ logger.error("HF STT error %s: %s", r.status_code, r.text[:200])
90
+ raise RuntimeError(f"HF STT failed: {r.status_code}: {r.text}")
91
+ j = r.json()
92
+ # common: {"text":"..."}
93
+ if isinstance(j, dict) and "text" in j:
94
+ return j["text"]
95
+ # fallback
96
+ return str(j)
97
+
98
+ # ====== Core endpoints for ESP32 ======
99
+ @app.route("/ask", methods=["POST"])
100
+ def api_ask():
101
+ """ESP32 or web call: JSON {text, lang (opt)} -> returns {"answer": "..."}"""
102
+ data = request.get_json(force=True)
103
+ text = data.get("text","").strip()
104
+ lang = data.get("lang","auto")
105
+ if not text:
106
+ return jsonify({"error":"no text"}), 400
107
+ # build instructive prompt to encourage clear Vietnamese/English responses
108
+ if lang == "vi":
109
+ prompt = "Bạn là trợ lý thông minh, trả lời bằng tiếng Việt, rõ ràng và ngắn gọn:\n\n" + text
110
+ elif lang == "en":
111
+ prompt = "You are a helpful assistant. Answer in clear English, concise:\n\n" + text
112
+ else:
113
+ # auto: simple system instruction bilingual
114
+ prompt = "Bạn là trợ lý thông minh song ngữ (Vietnamese/English). Trả lời bằng ngôn ngữ phù hợp với câu hỏi.\n\n" + text
115
+ try:
116
+ ans = hf_text_generate(prompt)
117
+ except Exception as e:
118
+ logger.exception("ask failed")
119
+ return jsonify({"error": str(e)}), 500
120
+ # store conversation and display
121
+ CONV.append((text, ans))
122
+ push_display("YOU: " + (text[:40]))
123
+ push_display("BOT: " + (ans[:40]))
124
+ return jsonify({"answer": ans})
125
+
126
+ @app.route("/tts", methods=["POST"])
127
+ def api_tts():
128
+ """POST JSON {text: "..."} -> return audio/mpeg bytes (mp3 or wav)"""
129
+ data = request.get_json(force=True)
130
+ text = data.get("text","").strip()
131
+ if not text:
132
+ return jsonify({"error":"no text"}), 400
133
+ try:
134
+ audio = hf_tts_get_mp3(text)
135
+ except Exception as e:
136
+ logger.exception("tts failed")
137
+ return jsonify({"error": str(e)}), 500
138
+ return send_file(
139
+ io.BytesIO(audio),
140
+ mimetype="audio/mpeg",
141
+ as_attachment=False,
142
+ download_name="tts.mp3"
143
+ )
144
+
145
+ @app.route("/stt", methods=["POST"])
146
+ def api_stt():
147
+ """
148
+ Accepts raw audio bytes in body OR multipart 'file'.
149
+ Returns JSON {"text": "..."}
150
+ """
151
+ if "file" in request.files:
152
+ f = request.files["file"]
153
+ audio_bytes = f.read()
154
+ else:
155
+ audio_bytes = request.get_data()
156
+ if not audio_bytes:
157
+ return jsonify({"error":"no audio"}), 400
158
+ try:
159
+ text = hf_stt_from_bytes(audio_bytes)
160
+ except Exception as e:
161
+ logger.exception("stt failed")
162
+ return jsonify({"error": str(e)}), 500
163
+ # push to display
164
+ push_display("UserAudio: " + (text[:40]))
165
+ return jsonify({"text": text})
166
+
167
+ @app.route("/presence", methods=["POST"])
168
+ def api_presence():
169
+ """
170
+ ESP32 radar -> POST JSON {"event":"presence","note": "..."}.
171
+ Server: will announce greeting (call TTS) and send Telegram alert.
172
+ """
173
+ data = request.get_json(force=True)
174
+ note = data.get("note","Có người tới")
175
+ # create greeting text
176
+ greeting = f"Xin chào! {note}"
177
+ # store
178
+ CONV.append(("__presence__", greeting))
179
+ push_display("RADAR: " + note[:40])
180
+ # Telegram notify
181
+ if TELEGRAM_TOKEN:
182
+ try:
183
+ send_telegram_message(f"⚠️ Robot: Phát hiện người - {note}")
184
+ except Exception:
185
+ logger.exception("telegram notify failed")
186
+ # Return greeting so ESP can call /tts to download and play (or include mp3 directly)
187
+ return jsonify({"greeting": greeting})
188
+
189
+ @app.route("/display", methods=["GET"])
190
+ def api_display():
191
+ """ESP32 GET -> returns last display lines to show on OLED."""
192
+ return jsonify({"lines": DISPLAY_LINES, "conv_len": len(CONV)})
193
+
194
+ # ====== Web UI (simple mobile-friendly) ======
195
+ INDEX_HTML = """
196
+ <!doctype html>
197
+ <html>
198
+ <head>
199
+ <meta charset="utf-8">
200
+ <title>KC Robot AI V4.0</title>
201
+ <meta name="viewport" content="width=device-width, initial-scale=1">
202
+ <style>
203
+ body{font-family:Arial,Helvetica;color:#111;margin:10px;padding:0}
204
+ .box{max-width:900px;margin:auto}
205
+ textarea{width:100%;height:80px;padding:8px;font-size:16px}
206
+ button{padding:10px 16px;margin-top:6px;font-size:16px}
207
+ #chat{border:1px solid #ddd;padding:8px;height:320px;overflow:auto;background:#f9f9f9}
208
+ .msg-user{color:#006; margin:6px 0}
209
+ .msg-bot{color:#080; margin:6px 0}
210
+ </style>
211
+ </head>
212
+ <body>
213
+ <div class="box">
214
+ <h2>🤖 KC Robot AI V4.0 — Cloud Brain</h2>
215
+ <div id="chat"></div>
216
+ <textarea id="txt" placeholder="Nhập tiếng Việt hoặc English..."></textarea><br>
217
+ <button onclick="send()">Gửi (Ask)</button>
218
+ <button onclick="playLastTTS()">Phát TTS trả lời</button>
219
+ <hr/>
220
+ <input type="file" id="audiofile" accept="audio/*"><button onclick="uploadAudio()">Upload audio → STT</button>
221
+ <hr/>
222
+ <h4>Logs</h4><div id="log"></div>
223
+ </div>
224
+ <script>
225
+ async function send(){
226
+ const txt = document.getElementById('txt').value;
227
+ if(!txt) return;
228
+ appendUser(txt);
229
+ document.getElementById('txt').value='';
230
+ const res = await fetch('/ask',{method:'POST',headers:{'Content-Type':'application/json'}, body: JSON.stringify({text: txt})});
231
+ const j = await res.json();
232
+ if(j.answer){
233
+ appendBot(j.answer);
234
+ // cache last answer for TTS
235
+ window._lastAnswer = j.answer;
236
+ } else {
237
+ appendBot('[Error] '+JSON.stringify(j));
238
+ }
239
+ }
240
+ function appendUser(t){document.getElementById('chat').innerHTML += '<div class="msg-user"><b>You:</b> '+escapeHtml(t)+'</div>'; scrollChat();}
241
+ function appendBot(t){document.getElementById('chat').innerHTML += '<div class="msg-bot"><b>Robot:</b> '+escapeHtml(t)+'</div>'; scrollChat();}
242
+ function scrollChat(){let c=document.getElementById('chat'); c.scrollTop = c.scrollHeight;}
243
+ function escapeHtml(s){ return s.replace(/&/g,'&amp;').replace(/</g,'&lt;').replace(/>/g,'&gt;');}
244
+ async function playLastTTS(){
245
+ const txt = window._lastAnswer || '';
246
+ if(!txt){ alert('Chưa có câu trả lời để phát'); return; }
247
+ const r = await fetch('/tts',{method:'POST',headers:{'Content-Type':'application/json'},body: JSON.stringify({text:txt})});
248
+ if(r.ok){
249
+ const blob = await r.blob();
250
+ const url = URL.createObjectURL(blob);
251
+ const a = new Audio(url);
252
+ a.play();
253
+ } else {
254
+ alert('TTS lỗi');
255
+ }
256
+ }
257
+ async function uploadAudio(){
258
+ const f = document.getElementById('audiofile').files[0];
259
+ if(!f){ alert('Chọn file audio'); return; }
260
+ const fd = new FormData(); fd.append('file', f);
261
+ const r = await fetch('/stt', {method:'POST', body: fd});
262
+ const j = await r.json();
263
+ if(j.text){ appendUser('[voice] '+j.text); window._lastSTT = j.text; }
264
+ else appendUser('[stt error] '+JSON.stringify(j));
265
+ }
266
+ // simple logger
267
+ function log(msg){ document.getElementById('log').innerText += '\\n'+msg; }
268
+ </script>
269
+ </body>
270
+ </html>
271
+ """
272
+
273
+ @app.route("/", methods=["GET"])
274
+ def index():
275
+ return render_template_string(INDEX_HTML)
276
+
277
+ # ====== Telegram integration (polling minimal) ======
278
+ def send_telegram_message(text: str):
279
+ if not TELEGRAM_TOKEN:
280
+ logger.warning("Telegram token not set")
281
+ return
282
+ url = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/sendMessage"
283
+ payload = {"chat_id": os.getenv("TELEGRAM_CHATID", ""), "text": text}
284
+ try:
285
+ r = requests.post(url, json=payload, timeout=10)
286
+ if not r.ok:
287
+ logger.warning("Telegram send failed: %s %s", r.status_code, r.text)
288
+ except Exception:
289
+ logger.exception("send_telegram_message error")
290
+
291
+ def telegram_poll_loop(server_url: str):
292
+ if not TELEGRAM_TOKEN:
293
+ logger.info("No TELEGRAM_TOKEN -> telegram disabled")
294
+ return
295
+ logger.info("Starting Telegram poller")
296
+ offset = None
297
+ base = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}"
298
+ while True:
299
+ try:
300
+ params = {"timeout": 30}
301
+ if offset:
302
+ params["offset"] = offset
303
+ r = requests.get(base + "/getUpdates", params=params, timeout=35)
304
+ if r.status_code != 200:
305
+ time.sleep(2); continue
306
+ j = r.json()
307
+ for u in j.get("result", []):
308
+ offset = u["update_id"] + 1
309
+ msg = u.get("message") or {}
310
+ chat = msg.get("chat", {})
311
+ chat_id = chat.get("id")
312
+ text = (msg.get("text") or "").strip()
313
+ if not text:
314
+ continue
315
+ logger.info("TG msg %s: %s", chat_id, text)
316
+ # commands: /ask , /say, /status
317
+ if text.lower().startswith("/ask "):
318
+ q = text[5:].strip()
319
+ try:
320
+ ans = hf_text_generate(q)
321
+ except Exception as e:
322
+ ans = f"[HF error] {e}"
323
+ # reply
324
+ try:
325
+ requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": ans}, timeout=10)
326
+ except Exception:
327
+ logger.exception("tg reply failed")
328
+ elif text.lower().startswith("/say "):
329
+ tts_text = text[5:].strip()
330
+ # get mp3 and send as audio
331
+ try:
332
+ mp3 = hf_tts_get_mp3(tts_text)
333
+ files = {"audio": ("reply.mp3", mp3, "audio/mpeg")}
334
+ requests.post(base + "/sendAudio", files=files, data={"chat_id": chat_id}, timeout=30)
335
+ except Exception:
336
+ logger.exception("tg say failed")
337
+ elif text.lower().startswith("/status"):
338
+ try:
339
+ requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": "Robot brain running"}, timeout=10)
340
+ except Exception:
341
+ pass
342
+ else:
343
+ # default help
344
+ try:
345
+ requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": "Commands: /ask <q> | /say <text> | /status"}, timeout=10)
346
+ except Exception:
347
+ pass
348
+ except Exception:
349
+ logger.exception("telegram poll loop exception")
350
+ time.sleep(3)
351
+
352
+ # ====== Background threads startup ======
353
+ def start_background():
354
+ # Start telegram thread if token exists
355
+ if TELEGRAM_TOKEN:
356
+ t = threading.Thread(target=telegram_poll_loop, args=(f"http://127.0.0.1:{PORT}",), daemon=True)
357
+ t.start()
358
+ logger.info("Telegram poller started.")
359
+ else:
360
+ logger.info("Telegram not configured.")
361
+
362
+ # start background when app runs
363
+ @app.before_first_request
364
+ def _startup():
365
+ start_background()
366
+
367
+ # ====== run ======
368
+ if __name__ == "__main__":
369
+ start_background()
370
+ logger.info(f"Starting server on port {PORT}")
371
+ app.run(host="0.0.0.0", port=PORT)