Spaces:
Sleeping
Sleeping
initial commit
Browse files
app.py
CHANGED
|
@@ -1,664 +1,468 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
# Minimal deps: flask, requests, gTTS, python-multipart
|
| 13 |
-
# Keep requirements.txt consistent with these packages.
|
| 14 |
-
|
| 15 |
import os
|
| 16 |
import io
|
| 17 |
-
import sys
|
| 18 |
-
import time
|
| 19 |
import json
|
| 20 |
-
import
|
| 21 |
import logging
|
| 22 |
-
import
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
import
|
| 27 |
-
from flask import Flask, request, jsonify,
|
| 28 |
-
|
| 29 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
try:
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
| 33 |
except Exception:
|
| 34 |
-
_HAS_GTTS = False
|
| 35 |
-
|
| 36 |
-
# ---------------- logging ----------------
|
| 37 |
-
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
|
| 38 |
-
format="%(asctime)s %(levelname)s %(name)s: %(message)s")
|
| 39 |
-
logger = logging.getLogger("kcrobot.v7.5")
|
| 40 |
-
|
| 41 |
-
# ---------------- env / secrets ----------------
|
| 42 |
-
HF_TOKEN = os.getenv("HF_TOKEN", "").strip()
|
| 43 |
-
HF_MODEL = os.getenv("HF_MODEL", "auto").strip() # preferred model (may be empty)
|
| 44 |
-
HF_TTS_MODEL = os.getenv("HF_TTS_MODEL", "").strip() # optional HF TTS model
|
| 45 |
-
HF_STT_MODEL = os.getenv("HF_STT_MODEL", "openai/whisper-small").strip()
|
| 46 |
-
TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN", "").strip()
|
| 47 |
-
TELEGRAM_CHAT_ID = os.getenv("TELEGRAM_CHAT_ID", "").strip()
|
| 48 |
-
PORT = int(os.getenv("PORT", 7860))
|
| 49 |
-
|
| 50 |
-
HF_HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
|
| 51 |
-
|
| 52 |
-
# ---------------- tmp dir ----------------
|
| 53 |
-
TMPDIR = Path("/tmp/kcrobot") if os.name != "nt" else Path.cwd() / "tmp_kcrobot"
|
| 54 |
-
TMPDIR.mkdir(parents=True, exist_ok=True)
|
| 55 |
-
CONV_LOG = TMPDIR / "conversation_log.jsonl"
|
| 56 |
-
|
| 57 |
-
# ---------------- in-memory ----------------
|
| 58 |
-
CONVERSATION: List[Tuple[str, str]] = []
|
| 59 |
-
DISPLAY_BUFFER: List[str] = []
|
| 60 |
-
DISPLAY_LIMIT = 6
|
| 61 |
-
|
| 62 |
-
def push_display(line: str):
|
| 63 |
-
global DISPLAY_BUFFER
|
| 64 |
-
DISPLAY_BUFFER.append(line)
|
| 65 |
-
if len(DISPLAY_BUFFER) > DISPLAY_LIMIT:
|
| 66 |
-
DISPLAY_BUFFER = DISPLAY_BUFFER[-DISPLAY_LIMIT:]
|
| 67 |
-
|
| 68 |
-
def save_conv(user: str, bot: str):
|
| 69 |
try:
|
| 70 |
-
|
| 71 |
-
|
|
|
|
| 72 |
except Exception:
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
# ---------------- small helpers ----------------
|
| 76 |
-
def clean_text(text: Any) -> str:
|
| 77 |
-
if text is None:
|
| 78 |
-
return ""
|
| 79 |
-
s = str(text)
|
| 80 |
-
import re
|
| 81 |
-
s = re.sub(r'[\x00-\x08\x0b-\x0c\x0e-\x1f]+', ' ', s)
|
| 82 |
-
s = re.sub(r'\s+', ' ', s).strip()
|
| 83 |
-
return s
|
| 84 |
-
|
| 85 |
-
VI_CHARS = set("ăâđêôơưáàảãạắằẳẵặấầẩẫậéèẻẽẹíìỉĩịóòỏõọúùủũụứừửữựýỳỷỹỵ")
|
| 86 |
-
def detect_language(text: str) -> str:
|
| 87 |
-
t = (text or "").lower()
|
| 88 |
-
for ch in t:
|
| 89 |
-
if ch in VI_CHARS:
|
| 90 |
-
return "vi"
|
| 91 |
-
return "en"
|
| 92 |
|
| 93 |
-
#
|
| 94 |
-
|
| 95 |
-
if not HF_TOKEN:
|
| 96 |
-
raise RuntimeError("HF_TOKEN not configured in Secrets")
|
| 97 |
-
url = f"https://api-inference.huggingface.co/models/{model_id}"
|
| 98 |
-
headers = dict(HF_HEADERS)
|
| 99 |
-
headers["Content-Type"] = "application/json"
|
| 100 |
-
return requests.post(url, headers=headers, json=payload, timeout=timeout)
|
| 101 |
-
|
| 102 |
-
def hf_post_bytes(model_id: str, data: bytes, content_type: str = "application/octet-stream", timeout: int = 180) -> requests.Response:
|
| 103 |
-
if not HF_TOKEN:
|
| 104 |
-
raise RuntimeError("HF_TOKEN not configured in Secrets")
|
| 105 |
-
url = f"https://api-inference.huggingface.co/models/{model_id}"
|
| 106 |
-
headers = dict(HF_HEADERS)
|
| 107 |
-
headers["Content-Type"] = content_type
|
| 108 |
-
return requests.post(url, headers=headers, data=data, timeout=timeout)
|
| 109 |
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
try:
|
| 112 |
-
if
|
| 113 |
-
|
| 114 |
-
if k in obj:
|
| 115 |
-
return obj.get(k,"")
|
| 116 |
-
if "choices" in obj and isinstance(obj["choices"], list) and obj["choices"]:
|
| 117 |
-
c0 = obj["choices"][0]
|
| 118 |
-
return c0.get("text") or c0.get("message",{}).get("content","") or str(c0)
|
| 119 |
-
return json.dumps(obj, ensure_ascii=False)
|
| 120 |
-
if isinstance(obj, list) and obj:
|
| 121 |
-
first = obj[0]
|
| 122 |
-
if isinstance(first, dict):
|
| 123 |
-
for k in ("generated_text","text"):
|
| 124 |
-
if k in first:
|
| 125 |
-
return first.get(k,"")
|
| 126 |
-
return str(first)
|
| 127 |
-
return str(obj)
|
| 128 |
except Exception:
|
| 129 |
-
logger.exception("
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
# ---------------- Auto model finder ----------------
|
| 133 |
-
# Candidate fallback list — you can extend
|
| 134 |
-
DEFAULT_MODEL_CANDIDATES = [
|
| 135 |
-
"mistralai/Mistral-7B-Instruct-v0.3",
|
| 136 |
-
"google/gemma-2b-it",
|
| 137 |
-
"databricks/dolly-v2-3b",
|
| 138 |
-
"tiiuae/falcon-7b-instruct", # may be private at times
|
| 139 |
-
"facebook/blenderbot-400M-distill",
|
| 140 |
-
# Vietnamese candidates (if public)
|
| 141 |
-
"vinai/PhoGPT-4B",
|
| 142 |
-
]
|
| 143 |
|
| 144 |
-
def
|
| 145 |
-
"""
|
| 146 |
-
Return (ok, response_short_info)
|
| 147 |
-
ok True if got status 200 and some textual output parseable
|
| 148 |
-
"""
|
| 149 |
try:
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
info = {"status": r.status_code, "text": (r.text[:500] if r.text else "")}
|
| 153 |
-
if r.status_code == 200:
|
| 154 |
-
# try parse
|
| 155 |
-
try:
|
| 156 |
-
j = r.json()
|
| 157 |
-
out = parse_hf_text_output(j)
|
| 158 |
-
if out and len(out.strip())>0:
|
| 159 |
-
info["result"] = out
|
| 160 |
-
return True, info
|
| 161 |
-
except Exception:
|
| 162 |
-
# maybe non-json; if text length present, accept minimally
|
| 163 |
-
if r.text and len(r.text.strip())>0:
|
| 164 |
-
info["result"] = r.text
|
| 165 |
-
return True, info
|
| 166 |
-
return False, info
|
| 167 |
-
except requests.exceptions.RequestException as e:
|
| 168 |
-
logger.warning("test_model_working request exception for %s: %s", model_id, e)
|
| 169 |
-
return False, {"error": str(e)}
|
| 170 |
except Exception:
|
| 171 |
-
logger.exception("
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
""
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
|
| 204 |
-
# ----------------
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
try:
|
| 213 |
-
|
| 214 |
-
|
|
|
|
|
|
|
| 215 |
except Exception:
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
raise RuntimeError("HF returned 403 (forbidden) — token or access rights issue")
|
| 219 |
-
elif r.status_code == 404:
|
| 220 |
-
raise RuntimeError("HF returned 404 (model not found) — check HF_MODEL or model access")
|
| 221 |
-
else:
|
| 222 |
-
raise RuntimeError(f"HF text gen returned {r.status_code}: {r.text[:300]}")
|
| 223 |
-
|
| 224 |
-
def hf_stt_from_bytes(audio_bytes: bytes, model_override: Optional[str] = None) -> str:
|
| 225 |
-
model = model_override or HF_STT_MODEL
|
| 226 |
-
if not model:
|
| 227 |
-
raise RuntimeError("HF_STT_MODEL not configured")
|
| 228 |
-
r = hf_post_bytes(model, audio_bytes, content_type="application/octet-stream", timeout=180)
|
| 229 |
-
if r.status_code == 200:
|
| 230 |
try:
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
|
|
|
| 235 |
except Exception:
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
|
|
|
| 239 |
|
| 240 |
-
def
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
|
|
|
| 247 |
try:
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
try:
|
| 255 |
-
|
| 256 |
-
|
|
|
|
| 257 |
except Exception:
|
| 258 |
-
|
| 259 |
-
logger.warning("HF TTS returned %s: %s", r.status_code, r.text[:200])
|
| 260 |
-
except Exception:
|
| 261 |
-
logger.exception("HF TTS call failed")
|
| 262 |
-
# fallback to gTTS if present
|
| 263 |
-
if _HAS_GTTS:
|
| 264 |
-
try:
|
| 265 |
-
lang = "vi" if detect_language(text) == "vi" else "en"
|
| 266 |
-
tts = gTTS(text=text, lang=lang)
|
| 267 |
-
bio = io.BytesIO()
|
| 268 |
-
tts.write_to_fp(bio)
|
| 269 |
-
bio.seek(0)
|
| 270 |
-
return bio.read()
|
| 271 |
except Exception:
|
| 272 |
-
logger.exception("
|
| 273 |
-
|
| 274 |
-
raise RuntimeError("No TTS available (no HF_TTS_MODEL and gTTS not installed)")
|
| 275 |
-
|
| 276 |
-
# ---------------- Telegram helpers ----------------
|
| 277 |
-
def telegram_send_message(chat_id: str, text: str) -> bool:
|
| 278 |
-
if not TELEGRAM_TOKEN or not chat_id:
|
| 279 |
-
return False
|
| 280 |
try:
|
| 281 |
-
url = f"https://
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
return False
|
| 294 |
try:
|
| 295 |
-
url = f"https://api.telegram.org/bot{
|
| 296 |
-
|
| 297 |
-
data = {"chat_id": chat_id}
|
| 298 |
-
r = requests.post(url, files=files, data=data, timeout=30)
|
| 299 |
-
if r.status_code != 200:
|
| 300 |
-
logger.warning("Telegram sendAudio failed %s: %s", r.status_code, r.text[:300])
|
| 301 |
-
return False
|
| 302 |
return True
|
| 303 |
except Exception:
|
| 304 |
-
logger.exception("
|
| 305 |
return False
|
| 306 |
|
| 307 |
-
# ----------------
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
logger.info("Telegram token not set; poller disabled")
|
| 311 |
-
return
|
| 312 |
-
logger.info("Starting Telegram poller")
|
| 313 |
-
base = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}"
|
| 314 |
-
offset = None
|
| 315 |
-
while True:
|
| 316 |
-
try:
|
| 317 |
-
params = {"timeout": 30}
|
| 318 |
-
if offset: params["offset"] = offset
|
| 319 |
-
r = requests.get(base + "/getUpdates", params=params, timeout=35)
|
| 320 |
-
if r.status_code != 200:
|
| 321 |
-
logger.warning("Telegram getUpdates failed: %s", r.status_code)
|
| 322 |
-
time.sleep(2); continue
|
| 323 |
-
j = r.json()
|
| 324 |
-
for upd in j.get("result", []):
|
| 325 |
-
offset = upd.get("update_id", 0) + 1
|
| 326 |
-
msg = upd.get("message") or {}
|
| 327 |
-
chat = msg.get("chat", {})
|
| 328 |
-
chat_id = str(chat.get("id"))
|
| 329 |
-
text = (msg.get("text") or "").strip()
|
| 330 |
-
if not text: continue
|
| 331 |
-
logger.info("TG msg %s: %s", chat_id, text[:200])
|
| 332 |
-
lower = text.lower()
|
| 333 |
-
if lower.startswith("/ask "):
|
| 334 |
-
q = text[5:].strip()
|
| 335 |
-
try:
|
| 336 |
-
ans = hf_text_generate(q)
|
| 337 |
-
except Exception as e:
|
| 338 |
-
ans = f"[HF error] {e}"
|
| 339 |
-
try:
|
| 340 |
-
requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": ans}, timeout=10)
|
| 341 |
-
except Exception:
|
| 342 |
-
logger.exception("tg reply failed")
|
| 343 |
-
elif lower.startswith("/say "):
|
| 344 |
-
phrase = text[5:].strip()
|
| 345 |
-
try:
|
| 346 |
-
audio = hf_tts_get_bytes(phrase)
|
| 347 |
-
telegram_send_audio(chat_id, audio, filename="say.mp3")
|
| 348 |
-
except Exception:
|
| 349 |
-
logger.exception("tg say failed")
|
| 350 |
-
elif lower.startswith("/status"):
|
| 351 |
-
try:
|
| 352 |
-
requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": "KC Robot v7.5 running"}, timeout=10)
|
| 353 |
-
except Exception:
|
| 354 |
-
pass
|
| 355 |
-
else:
|
| 356 |
-
try:
|
| 357 |
-
requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": "Commands: /ask <q> | /say <text> | /status"}, timeout=10)
|
| 358 |
-
except Exception:
|
| 359 |
-
pass
|
| 360 |
-
except Exception:
|
| 361 |
-
logger.exception("telegram poller crashed, sleeping 3s")
|
| 362 |
-
time.sleep(3)
|
| 363 |
-
|
| 364 |
-
if TELEGRAM_TOKEN:
|
| 365 |
-
try:
|
| 366 |
-
t = threading.Thread(target=telegram_poller_loop, daemon=True)
|
| 367 |
-
t.start()
|
| 368 |
-
except Exception:
|
| 369 |
-
logger.exception("start telegram thread failed")
|
| 370 |
-
|
| 371 |
-
# ---------------- Flask app & endpoints ----------------
|
| 372 |
app = Flask(__name__)
|
| 373 |
|
| 374 |
INDEX_HTML = """
|
| 375 |
<!doctype html>
|
| 376 |
<html>
|
| 377 |
-
<head>
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
.you{color:#0b63d6;margin:6px 0}
|
| 388 |
-
.bot{color:#0b8a3f;margin:6px 0}
|
| 389 |
-
.small{font-size:13px;color:#666}
|
| 390 |
-
</style>
|
| 391 |
</head>
|
| 392 |
<body>
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
<div id="chat"></div>
|
| 404 |
-
<div style="margin-top:10px">
|
| 405 |
-
<input type="file" id="afile" accept="audio/*"><button onclick="uploadAudio()">Upload → STT</button>
|
| 406 |
-
</div>
|
| 407 |
-
<hr>
|
| 408 |
-
<div class="small">Diagnostics: <button onclick="modelCheck()">Kiểm tra model</button><span id="diag"></span></div>
|
| 409 |
-
</div>
|
| 410 |
<script>
|
| 411 |
-
let lastAnswer = "";
|
| 412 |
-
async function loadStatus(){ try{ let r=await fetch('/health'); let j=await r.json(); document.getElementById('modelName').innerText=j.hf_model||'(not set)'; document.getElementById('tgstatus').innerText=j.telegram ? 'enabled' : 'disabled'; }catch(e){ console.log(e); } }
|
| 413 |
-
function escapeHtml(s){ return (s+'').replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>'); }
|
| 414 |
-
function appendYou(t){ document.getElementById('chat').innerHTML += '<div class="you"><b>You:</b> '+escapeHtml(t)+'</div>'; scroll(); }
|
| 415 |
-
function appendBot(t){ document.getElementById('chat').innerHTML += '<div class="bot"><b>Robot:</b> '+escapeHtml(t)+'</div>'; scroll(); }
|
| 416 |
-
function scroll(){ let c=document.getElementById('chat'); c.scrollTop = c.scrollHeight; }
|
| 417 |
async function send(){
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
}
|
| 426 |
-
async function playLast(){
|
| 427 |
-
if(!lastAnswer) return alert('Chưa có câu trả lời');
|
| 428 |
-
try{
|
| 429 |
-
let r=await fetch('/tts',{method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify({text:lastAnswer})});
|
| 430 |
-
if(!r.ok){ alert('TTS lỗi'); return; }
|
| 431 |
-
const blob = await r.blob();
|
| 432 |
-
const url=URL.createObjectURL(blob);
|
| 433 |
-
const audio=new Audio(url); audio.play();
|
| 434 |
-
}catch(e){ alert('Play error: '+e); }
|
| 435 |
-
}
|
| 436 |
-
async function uploadAudio(){
|
| 437 |
-
const f=document.getElementById('afile').files[0]; if(!f) return alert('Chọn file audio');
|
| 438 |
-
const fd=new FormData(); fd.append('file', f);
|
| 439 |
-
const r=await fetch('/stt',{method:'POST', body: fd});
|
| 440 |
-
const j=await r.json();
|
| 441 |
-
if(j.text){ appendYou('[voice] '+j.text); } else appendYou('[stt error] '+JSON.stringify(j));
|
| 442 |
-
}
|
| 443 |
-
function clearChat(){ document.getElementById('chat').innerHTML=''; lastAnswer=''; }
|
| 444 |
-
async function modelCheck(){
|
| 445 |
-
document.getElementById('diag').innerText=' checking...';
|
| 446 |
-
try{
|
| 447 |
-
let r=await fetch('/model_check');
|
| 448 |
-
let j=await r.json();
|
| 449 |
-
document.getElementById('diag').innerText = ' ' + JSON.stringify(j).slice(0,200);
|
| 450 |
-
loadStatus();
|
| 451 |
-
}catch(e){ document.getElementById('diag').innerText=' error'; }
|
| 452 |
-
}
|
| 453 |
-
loadStatus();
|
| 454 |
</script>
|
|
|
|
| 455 |
</body>
|
| 456 |
</html>
|
| 457 |
"""
|
| 458 |
|
| 459 |
@app.route("/", methods=["GET"])
|
| 460 |
-
def
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
"
|
| 473 |
-
|
| 474 |
-
}
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
prompt = f"You are a bilingual assistant (Vietnamese/English). Answer in the same language as the user, clearly and concisely:\n\n{text}"
|
| 490 |
-
try:
|
| 491 |
-
ans = hf_text_generate(prompt)
|
| 492 |
-
except Exception as e:
|
| 493 |
-
logger.exception("hf_text_generate failed")
|
| 494 |
-
return jsonify({"error": str(e)}), 500
|
| 495 |
-
CONVERSATION.append((text, ans))
|
| 496 |
-
save_conv(text, ans)
|
| 497 |
-
push_display("YOU: " + (text[:60]))
|
| 498 |
-
push_display("BOT: " + (ans[:60] if isinstance(ans,str) else str(ans)[:60]))
|
| 499 |
-
# notify telegram
|
| 500 |
-
if TELEGRAM_TOKEN and TELEGRAM_CHAT_ID:
|
| 501 |
-
try:
|
| 502 |
-
telegram_send_message(TELEGRAM_CHAT_ID, f"You: {text}\nBot: {ans[:300]}")
|
| 503 |
-
except Exception:
|
| 504 |
-
logger.exception("telegram notify failed")
|
| 505 |
-
return jsonify({"answer": ans})
|
| 506 |
-
except Exception as e:
|
| 507 |
-
logger.exception("route_ask exception")
|
| 508 |
-
return jsonify({"error": str(e)}), 500
|
| 509 |
-
|
| 510 |
-
@app.route("/tts", methods=["POST"])
|
| 511 |
-
def route_tts():
|
| 512 |
-
try:
|
| 513 |
-
j = request.get_json(force=True) or {}
|
| 514 |
-
text = clean_text(j.get("text","") or "")
|
| 515 |
-
if not text:
|
| 516 |
-
return jsonify({"error":"no text"}), 400
|
| 517 |
-
try:
|
| 518 |
-
audio_bytes = hf_tts_get_bytes(text)
|
| 519 |
-
except Exception as e:
|
| 520 |
-
logger.exception("tts generation failed")
|
| 521 |
-
return jsonify({"error": str(e)}), 500
|
| 522 |
-
return Response(audio_bytes, mimetype="audio/mpeg")
|
| 523 |
-
except Exception as e:
|
| 524 |
-
logger.exception("route_tts exception")
|
| 525 |
-
return jsonify({"error": str(e)}), 500
|
| 526 |
-
|
| 527 |
-
@app.route("/stt", methods=["POST"])
|
| 528 |
-
def route_stt():
|
| 529 |
-
try:
|
| 530 |
-
if "file" in request.files:
|
| 531 |
-
f = request.files["file"]
|
| 532 |
-
audio_bytes = f.read()
|
| 533 |
-
else:
|
| 534 |
-
audio_bytes = request.get_data()
|
| 535 |
-
if not audio_bytes:
|
| 536 |
-
return jsonify({"error":"no audio provided"}), 400
|
| 537 |
-
try:
|
| 538 |
-
txt = hf_stt_from_bytes(audio_bytes)
|
| 539 |
-
except Exception as e:
|
| 540 |
-
logger.exception("STT failed")
|
| 541 |
-
return jsonify({"error": str(e)}), 500
|
| 542 |
-
CONVERSATION.append((f"[voice] {txt}", ""))
|
| 543 |
-
save_conv(f"[voice] {txt}", "")
|
| 544 |
-
push_display("VOICE: " + (txt[:60] if isinstance(txt,str) else str(txt)))
|
| 545 |
-
return jsonify({"text": txt})
|
| 546 |
-
except Exception as e:
|
| 547 |
-
logger.exception("route_stt exception")
|
| 548 |
-
return jsonify({"error": str(e)}), 500
|
| 549 |
-
|
| 550 |
-
@app.route("/presence", methods=["POST"])
|
| 551 |
-
def route_presence():
|
| 552 |
-
"""
|
| 553 |
-
ESP32 radar should POST JSON {"note":"..."}.
|
| 554 |
-
Server returns greeting audio (if TTS available) or JSON greeting.
|
| 555 |
-
Also sends telegram notification if configured.
|
| 556 |
-
"""
|
| 557 |
-
try:
|
| 558 |
-
j = request.get_json(force=True) or {}
|
| 559 |
-
note = clean_text(j.get("note","Có người phía trước") or "Có người phía trước")
|
| 560 |
-
greeting = f"Xin chào! {note}"
|
| 561 |
-
CONVERSATION.append(("__presence__", greeting))
|
| 562 |
-
save_conv("__presence__", greeting)
|
| 563 |
-
push_display("RADAR: " + note[:60])
|
| 564 |
-
if TELEGRAM_TOKEN and TELEGRAM_CHAT_ID:
|
| 565 |
-
try:
|
| 566 |
-
telegram_send_message(TELEGRAM_CHAT_ID, f"⚠️ Robot: Phát hiện người - {note}")
|
| 567 |
-
except Exception:
|
| 568 |
-
logger.exception("telegram notify failed")
|
| 569 |
-
try:
|
| 570 |
-
audio_bytes = hf_tts_get_bytes(greeting)
|
| 571 |
-
return Response(audio_bytes, mimetype="audio/mpeg")
|
| 572 |
-
except Exception:
|
| 573 |
-
return jsonify({"greeting": greeting})
|
| 574 |
-
except Exception as e:
|
| 575 |
-
logger.exception("route_presence exception")
|
| 576 |
-
return jsonify({"error": str(e)}), 500
|
| 577 |
-
|
| 578 |
-
@app.route("/display", methods=["GET"])
|
| 579 |
-
def route_display():
|
| 580 |
-
return jsonify({"lines": DISPLAY_BUFFER.copy(), "conv_len": len(CONVERSATION)})
|
| 581 |
-
|
| 582 |
-
@app.route("/model_check", methods=["GET"])
|
| 583 |
-
def model_check():
|
| 584 |
-
"""
|
| 585 |
-
Attempt to verify HF_MODEL / select fallback, returns diagnostic JSON.
|
| 586 |
-
"""
|
| 587 |
-
global SELECTED_MODEL
|
| 588 |
-
# first try current HF_MODEL
|
| 589 |
-
results = {}
|
| 590 |
-
try:
|
| 591 |
-
# if SELECTED_MODEL already set and seems good, return
|
| 592 |
-
if SELECTED_MODEL:
|
| 593 |
-
results["selected_model"] = SELECTED_MODEL
|
| 594 |
-
ok, info = test_model_working(SELECTED_MODEL)
|
| 595 |
-
results["selected_ok"] = ok
|
| 596 |
-
results["selected_info"] = info
|
| 597 |
-
return jsonify(results)
|
| 598 |
-
# else try auto-select with preference HF_MODEL
|
| 599 |
-
chosen = auto_select_model(HF_MODEL if HF_MODEL else None)
|
| 600 |
-
if chosen:
|
| 601 |
-
SELECTED_MODEL = chosen
|
| 602 |
-
results["selected_model"] = chosen
|
| 603 |
-
results["note"] = "Model selected"
|
| 604 |
-
return jsonify(results)
|
| 605 |
-
else:
|
| 606 |
-
results["error"] = "No usable model found in candidates"
|
| 607 |
-
return jsonify(results), 404
|
| 608 |
-
except Exception as e:
|
| 609 |
-
logger.exception("model_check failed")
|
| 610 |
-
return jsonify({"error": str(e)}), 500
|
| 611 |
-
|
| 612 |
-
@app.route("/config", methods=["GET","POST"])
|
| 613 |
-
def config():
|
| 614 |
-
"""
|
| 615 |
-
GET returns current config.
|
| 616 |
-
POST JSON can change HF_MODEL / HF_TTS_MODEL / HF_STT_MODEL at runtime (temporary).
|
| 617 |
-
Example: {"hf_model":"...", "hf_tts_model":"..."}
|
| 618 |
-
"""
|
| 619 |
-
global HF_MODEL, HF_TTS_MODEL, HF_STT_MODEL, SELECTED_MODEL
|
| 620 |
-
if request.method == "GET":
|
| 621 |
-
return jsonify({"hf_model": HF_MODEL, "hf_tts_model": HF_TTS_MODEL, "hf_stt_model": HF_STT_MODEL, "selected_model": SELECTED_MODEL})
|
| 622 |
try:
|
| 623 |
-
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
| 641 |
-
|
| 642 |
-
|
| 643 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 644 |
try:
|
| 645 |
-
|
| 646 |
-
|
| 647 |
-
SELECTED_MODEL = chosen
|
| 648 |
-
logger.info("Startup: selected model = %s", SELECTED_MODEL)
|
| 649 |
-
else:
|
| 650 |
-
logger.warning("Startup: no usable HF model found yet. Use /model_check or set HF_MODEL secret.")
|
| 651 |
except Exception:
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
# run startup check in a thread so Flask starts quickly
|
| 655 |
-
t_start = threading.Thread(target=startup_model_check, daemon=True)
|
| 656 |
-
t_start.start()
|
| 657 |
|
| 658 |
-
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
app.run(host="0.0.0.0", port=PORT)
|
| 663 |
|
|
|
|
|
|
|
|
|
|
| 664 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
"""
|
| 4 |
+
KCrobot AI — app.py v2.0 MAX FINAL
|
| 5 |
+
- Run on Cloud Run / HuggingFace / local
|
| 6 |
+
- Use Gemini cloud as brain (GEMINI_API_KEY, GEMINI_MODEL)
|
| 7 |
+
- TTS: ElevenLabs -> gTTS fallback
|
| 8 |
+
- Endpoints for ESP32: /api/chat, /api/chat_audio, /play_latest, /notify
|
| 9 |
+
- Save history & usage in data/
|
| 10 |
+
"""
|
| 11 |
+
from __future__ import annotations
|
|
|
|
|
|
|
|
|
|
| 12 |
import os
|
| 13 |
import io
|
|
|
|
|
|
|
| 14 |
import json
|
| 15 |
+
import time
|
| 16 |
import logging
|
| 17 |
+
import pathlib
|
| 18 |
+
import tempfile
|
| 19 |
+
import base64
|
| 20 |
+
from typing import Tuple, Optional, Dict, Any
|
| 21 |
+
from datetime import datetime
|
| 22 |
+
from flask import Flask, request, jsonify, send_file, render_template_string
|
| 23 |
+
|
| 24 |
+
# TTS
|
| 25 |
+
from gtts import gTTS
|
| 26 |
+
|
| 27 |
+
# Optional Google Generative AI SDK (newer)
|
| 28 |
+
GENAI_IMPORTED = False
|
| 29 |
+
GENAI_CLIENT = None
|
| 30 |
try:
|
| 31 |
+
# try new google-genai
|
| 32 |
+
from google import genai
|
| 33 |
+
GENAI_IMPORTED = True
|
| 34 |
+
# we will create client lazily with key
|
| 35 |
except Exception:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
try:
|
| 37 |
+
# try older google.generativeai for fallback
|
| 38 |
+
import google.generativeai as genai_old
|
| 39 |
+
GENAI_IMPORTED = True
|
| 40 |
except Exception:
|
| 41 |
+
GENAI_IMPORTED = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
+
# HTTP
|
| 44 |
+
import requests
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
+
# Logging
|
| 47 |
+
logging.basicConfig(level=logging.INFO)
|
| 48 |
+
logger = logging.getLogger("kcrobot_v2")
|
| 49 |
+
|
| 50 |
+
# -------------------------
|
| 51 |
+
# CONFIG (via ENV / secrets)
|
| 52 |
+
# -------------------------
|
| 53 |
+
ADMIN_TOKEN = os.getenv("ADMIN_TOKEN", "") # optional: protect admin endpoints if used
|
| 54 |
+
CFG = {
|
| 55 |
+
"GEMINI_API_KEY": os.getenv("GEMINI_API_KEY", "").strip(),
|
| 56 |
+
"GEMINI_MODEL": os.getenv("GEMINI_MODEL", "models/gemini-2.5-flash").strip(),
|
| 57 |
+
"TELEGRAM_TOKEN": os.getenv("TELEGRAM_TOKEN", "").strip(),
|
| 58 |
+
"TELEGRAM_CHAT_ID": os.getenv("TELEGRAM_CHAT_ID", "").strip(),
|
| 59 |
+
"ELEVEN_API_KEY": os.getenv("ELEVEN_API_KEY", "").strip(),
|
| 60 |
+
"ELEVEN_VOICE_ID": os.getenv("ELEVEN_VOICE_ID", "").strip(),
|
| 61 |
+
}
|
| 62 |
+
# Server settings
|
| 63 |
+
PORT = int(os.getenv("PORT", 7860))
|
| 64 |
+
HOST = os.getenv("HOST", "0.0.0.0")
|
| 65 |
+
|
| 66 |
+
# -------------------------
|
| 67 |
+
# STORAGE & FILES
|
| 68 |
+
# -------------------------
|
| 69 |
+
BASE = pathlib.Path.cwd()
|
| 70 |
+
DATA_DIR = BASE / "data"
|
| 71 |
+
DATA_DIR.mkdir(exist_ok=True)
|
| 72 |
+
HISTORY_FILE = DATA_DIR / "history.json"
|
| 73 |
+
USAGE_FILE = DATA_DIR / "usage.json"
|
| 74 |
+
LATEST_MP3 = DATA_DIR / "latest_reply.mp3"
|
| 75 |
+
|
| 76 |
+
# -------------------------
|
| 77 |
+
# Helpers: JSON safe
|
| 78 |
+
# -------------------------
|
| 79 |
+
def load_json_safe(path: pathlib.Path, default):
|
| 80 |
try:
|
| 81 |
+
if path.exists():
|
| 82 |
+
return json.loads(path.read_text(encoding="utf-8"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
except Exception:
|
| 84 |
+
logger.exception("load_json_safe failed for %s", path)
|
| 85 |
+
return default
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
+
def save_json_safe(path: pathlib.Path, data) -> bool:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
try:
|
| 89 |
+
path.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
|
| 90 |
+
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
except Exception:
|
| 92 |
+
logger.exception("save_json_safe failed for %s", path)
|
| 93 |
+
return False
|
| 94 |
+
|
| 95 |
+
# -------------------------
|
| 96 |
+
# USAGE & HISTORY
|
| 97 |
+
# -------------------------
|
| 98 |
+
def today_str() -> str:
|
| 99 |
+
return datetime.utcnow().strftime("%Y-%m-%d")
|
| 100 |
+
|
| 101 |
+
def load_usage():
|
| 102 |
+
default = {"date": today_str(), "requests_today": 0, "tokens_month": 0}
|
| 103 |
+
return load_json_safe(USAGE_FILE, default)
|
| 104 |
+
|
| 105 |
+
def save_usage(u):
|
| 106 |
+
return save_json_safe(USAGE_FILE, u)
|
| 107 |
+
|
| 108 |
+
def increment_usage(tokens=1):
|
| 109 |
+
u = load_usage()
|
| 110 |
+
if u.get("date") != today_str():
|
| 111 |
+
u = {"date": today_str(), "requests_today": 0, "tokens_month": u.get("tokens_month", 0)}
|
| 112 |
+
u["requests_today"] = u.get("requests_today", 0) + 1
|
| 113 |
+
u["tokens_month"] = u.get("tokens_month", 0) + int(tokens)
|
| 114 |
+
save_usage(u)
|
| 115 |
+
|
| 116 |
+
def append_history(entry: dict):
|
| 117 |
+
h = load_json_safe(HISTORY_FILE, [])
|
| 118 |
+
h.append(entry)
|
| 119 |
+
if len(h) > 1000:
|
| 120 |
+
h = h[-1000:]
|
| 121 |
+
save_json_safe(HISTORY_FILE, h)
|
| 122 |
+
|
| 123 |
+
# -------------------------
|
| 124 |
+
# Language detection (simple)
|
| 125 |
+
# -------------------------
|
| 126 |
+
VIET_CHAR_RE = __import__("re").compile(
|
| 127 |
+
r"[àáạảãâầấậẩẫăằắặẳẵđèéẹẻẽêềếệểễìíịỉĩòóọỏõôồốộổỗơờớợởỡùúụủũưừứựửữỳýỵỷỹ]",
|
| 128 |
+
__import__("re").I
|
| 129 |
+
)
|
| 130 |
+
def detect_lang(text: str) -> str:
|
| 131 |
+
if not text or not isinstance(text, str):
|
| 132 |
+
return "en"
|
| 133 |
+
if VIET_CHAR_RE.search(text):
|
| 134 |
+
return "vi"
|
| 135 |
+
low = text.lower()
|
| 136 |
+
en_signs = ["hello", "what", "how", "please", "thank", "today", "weather"]
|
| 137 |
+
for w in en_signs:
|
| 138 |
+
if w in low:
|
| 139 |
+
return "en"
|
| 140 |
+
return "en"
|
| 141 |
|
| 142 |
+
# -------------------------
|
| 143 |
+
# Gemini wrapper (new client preferred)
|
| 144 |
+
# -------------------------
|
| 145 |
+
def ensure_genai_client():
|
| 146 |
+
global GENAI_CLIENT
|
| 147 |
+
if GENAI_CLIENT:
|
| 148 |
+
return GENAI_CLIENT
|
| 149 |
+
key = CFG.get("GEMINI_API_KEY") or ""
|
| 150 |
+
if not key:
|
| 151 |
+
logger.warning("No GEMINI key configured.")
|
| 152 |
+
return None
|
| 153 |
+
try:
|
| 154 |
+
# new style
|
| 155 |
try:
|
| 156 |
+
from google import genai as genai_new
|
| 157 |
+
GENAI_CLIENT = genai_new.Client(api_key=key)
|
| 158 |
+
logger.info("Gemini client (new) configured.")
|
| 159 |
+
return GENAI_CLIENT
|
| 160 |
except Exception:
|
| 161 |
+
pass
|
| 162 |
+
# older google.generativeai style
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
try:
|
| 164 |
+
import google.generativeai as genai_old
|
| 165 |
+
genai_old.configure(api_key=key)
|
| 166 |
+
GENAI_CLIENT = genai_old
|
| 167 |
+
logger.info("Gemini client (old) configured.")
|
| 168 |
+
return GENAI_CLIENT
|
| 169 |
except Exception:
|
| 170 |
+
pass
|
| 171 |
+
except Exception:
|
| 172 |
+
logger.exception("ensure_genai_client failed")
|
| 173 |
+
return None
|
| 174 |
|
| 175 |
+
def call_gemini(prompt: str, temperature: float = 0.2, max_tokens: int = 512) -> Dict[str, Any]:
|
| 176 |
+
key = CFG.get("GEMINI_API_KEY") or ""
|
| 177 |
+
model = CFG.get("GEMINI_MODEL") or "models/gemini-2.5-flash"
|
| 178 |
+
if not key:
|
| 179 |
+
return {"ok": False, "error": "Gemini API key not configured"}
|
| 180 |
+
client = ensure_genai_client()
|
| 181 |
+
# try SDK client first
|
| 182 |
+
if client:
|
| 183 |
try:
|
| 184 |
+
# new Client API (genai.Client)
|
| 185 |
+
if hasattr(client, "models") and hasattr(client.models, "generate_content"):
|
| 186 |
+
resp = client.models.generate_content(model=model, contents=prompt)
|
| 187 |
+
# Response may have text attribute
|
| 188 |
+
text = getattr(resp, "text", None)
|
| 189 |
+
if text:
|
| 190 |
+
return {"ok": True, "text": text}
|
| 191 |
+
# fallback: try output field
|
| 192 |
+
if isinstance(resp, dict):
|
| 193 |
+
return {"ok": True, "text": json.dumps(resp)[:2000]}
|
| 194 |
+
return {"ok": True, "text": str(resp)}
|
| 195 |
+
# older google.generativeai
|
| 196 |
+
if hasattr(client, "generate_text") or hasattr(client, "generate"):
|
| 197 |
+
# try older generate_text
|
| 198 |
try:
|
| 199 |
+
resp = client.generate_text(model=model, prompt=prompt, temperature=temperature)
|
| 200 |
+
text = getattr(resp, "text", None) or resp.get("candidates", [{}])[0].get("content", "")
|
| 201 |
+
return {"ok": True, "text": text}
|
| 202 |
except Exception:
|
| 203 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
except Exception:
|
| 205 |
+
logger.exception("Gemini SDK call failed; will try REST fallback.")
|
| 206 |
+
# REST fallback (v1beta)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
try:
|
| 208 |
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generate"
|
| 209 |
+
headers = {"Content-Type": "application/json"}
|
| 210 |
+
payload = {
|
| 211 |
+
"prompt": {
|
| 212 |
+
"messages": [
|
| 213 |
+
{"author": "system", "content": {"text": "You are a helpful assistant."}},
|
| 214 |
+
{"author": "user", "content": {"text": prompt}}
|
| 215 |
+
]
|
| 216 |
+
},
|
| 217 |
+
"maxOutputTokens": max_tokens,
|
| 218 |
+
"temperature": temperature
|
| 219 |
+
}
|
| 220 |
+
r = requests.post(url, params={"key": key}, json=payload, headers=headers, timeout=30)
|
| 221 |
+
if r.status_code >= 400:
|
| 222 |
+
return {"ok": False, "error": f"HTTP {r.status_code}: {r.text}"}
|
| 223 |
+
j = r.json()
|
| 224 |
+
# parse candidate content
|
| 225 |
+
cand = j.get("candidates")
|
| 226 |
+
if cand and isinstance(cand, list):
|
| 227 |
+
c0 = cand[0]
|
| 228 |
+
content = c0.get("content")
|
| 229 |
+
if isinstance(content, list):
|
| 230 |
+
parts = []
|
| 231 |
+
for c in content:
|
| 232 |
+
if isinstance(c, dict) and "text" in c:
|
| 233 |
+
parts.append(c["text"])
|
| 234 |
+
if parts:
|
| 235 |
+
return {"ok": True, "text": "".join(parts)}
|
| 236 |
+
# fallback to output field
|
| 237 |
+
output = j.get("output")
|
| 238 |
+
if isinstance(output, str):
|
| 239 |
+
return {"ok": True, "text": output}
|
| 240 |
+
# else return truncated json
|
| 241 |
+
return {"ok": True, "text": json.dumps(j)[:2000]}
|
| 242 |
+
except Exception as e:
|
| 243 |
+
logger.exception("Gemini REST error")
|
| 244 |
+
return {"ok": False, "error": str(e)}
|
| 245 |
+
|
| 246 |
+
# -------------------------
|
| 247 |
+
# TTS (ElevenLabs -> gTTS fallback)
|
| 248 |
+
# -------------------------
|
| 249 |
+
def tts_elevenlabs_bytes(text: str, voice_id: str, api_key: str) -> bytes:
|
| 250 |
+
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
|
| 251 |
+
headers = {"xi-api-key": api_key, "Content-Type": "application/json"}
|
| 252 |
+
payload = {"text": text, "voice_settings": {"stability": 0.6, "similarity_boost": 0.75}}
|
| 253 |
+
r = requests.post(url, json=payload, headers=headers, timeout=30)
|
| 254 |
+
r.raise_for_status()
|
| 255 |
+
return r.content
|
| 256 |
+
|
| 257 |
+
def tts_gtts_bytes(text: str, lang: str = "vi") -> bytes:
|
| 258 |
+
tts = gTTS(text=text, lang=lang)
|
| 259 |
+
bio = io.BytesIO()
|
| 260 |
+
tts.write_to_fp(bio)
|
| 261 |
+
bio.seek(0)
|
| 262 |
+
return bio.read()
|
| 263 |
+
|
| 264 |
+
def synthesize_and_save(answer: str, lang_hint: str = "vi") -> Tuple[bool, str]:
|
| 265 |
+
try:
|
| 266 |
+
mp3_bytes = None
|
| 267 |
+
if CFG.get("ELEVEN_API_KEY") and CFG.get("ELEVEN_VOICE_ID"):
|
| 268 |
+
try:
|
| 269 |
+
mp3_bytes = tts_elevenlabs_bytes(answer, CFG["ELEVEN_VOICE_ID"], CFG["ELEVEN_API_KEY"])
|
| 270 |
+
logger.info("TTS: used ElevenLabs")
|
| 271 |
+
except Exception:
|
| 272 |
+
logger.exception("ElevenLabs failed -> fallback gTTS")
|
| 273 |
+
mp3_bytes = None
|
| 274 |
+
if mp3_bytes is None:
|
| 275 |
+
lang_code = "vi" if lang_hint.startswith("vi") else "en"
|
| 276 |
+
mp3_bytes = tts_gtts_bytes(answer, lang=lang_code)
|
| 277 |
+
logger.info("TTS: used gTTS")
|
| 278 |
+
with open(LATEST_MP3, "wb") as f:
|
| 279 |
+
f.write(mp3_bytes)
|
| 280 |
+
return True, str(LATEST_MP3)
|
| 281 |
+
except Exception as e:
|
| 282 |
+
logger.exception("synthesize_and_save failed")
|
| 283 |
+
return False, f"TTS error: {e}"
|
| 284 |
+
|
| 285 |
+
# -------------------------
|
| 286 |
+
# Telegram notify
|
| 287 |
+
# -------------------------
|
| 288 |
+
def send_telegram_message(text: str) -> bool:
|
| 289 |
+
token = CFG.get("TELEGRAM_TOKEN") or ""
|
| 290 |
+
cid = CFG.get("TELEGRAM_CHAT_ID") or ""
|
| 291 |
+
if not token or not cid:
|
| 292 |
+
logger.info("Telegram not configured.")
|
| 293 |
return False
|
| 294 |
try:
|
| 295 |
+
url = f"https://api.telegram.org/bot{token}/sendMessage"
|
| 296 |
+
requests.post(url, json={"chat_id": cid, "text": text}, timeout=8)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 297 |
return True
|
| 298 |
except Exception:
|
| 299 |
+
logger.exception("send_telegram_message failed")
|
| 300 |
return False
|
| 301 |
|
| 302 |
+
# -------------------------
|
| 303 |
+
# Flask app & UI
|
| 304 |
+
# -------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 305 |
app = Flask(__name__)
|
| 306 |
|
| 307 |
INDEX_HTML = """
|
| 308 |
<!doctype html>
|
| 309 |
<html>
|
| 310 |
+
<head><meta charset="utf-8"><title>KCrobot AI V2 Max</title>
|
| 311 |
+
<style>
|
| 312 |
+
body{font-family:Arial;background:#071225;color:#fff;padding:20px}
|
| 313 |
+
.container{max-width:900px;margin:0 auto}
|
| 314 |
+
textarea{width:100%;padding:10px;border-radius:8px;background:#0b1221;color:#fff;border:1px solid #134}
|
| 315 |
+
button{padding:10px 14px;border-radius:8px;background:#0ea5ff;color:#012;border:none;cursor:pointer}
|
| 316 |
+
#resp{white-space:pre-wrap;margin-top:12px;background:#071025;padding:10px;border-radius:6px}
|
| 317 |
+
audio{margin-top:12px}
|
| 318 |
+
.small{font-size:0.9rem;color:#9fb3c8}
|
| 319 |
+
</style>
|
|
|
|
|
|
|
|
|
|
|
|
|
| 320 |
</head>
|
| 321 |
<body>
|
| 322 |
+
<div class="container">
|
| 323 |
+
<h1>🤖 KCrobot AI — V2 MAX</h1>
|
| 324 |
+
<p class="small">Model: {{model}} — Gemini Key: {{gemini}} — Telegram: {{tg}}</p>
|
| 325 |
+
<textarea id="q" rows="4" placeholder="Nhập tiếng Việt / English..."></textarea>
|
| 326 |
+
<p>
|
| 327 |
+
<label><input id="voice" type="checkbox" checked> Voice ON</label>
|
| 328 |
+
<button onclick="send()">Gửi & Nghe</button>
|
| 329 |
+
</p>
|
| 330 |
+
<div id="resp"></div>
|
| 331 |
+
<audio id="audio" controls style="display:none"></audio>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 332 |
<script>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 333 |
async function send(){
|
| 334 |
+
const q = document.getElementById('q').value;
|
| 335 |
+
const voice = document.getElementById('voice').checked;
|
| 336 |
+
if(!q){ alert('Nhập nội dung'); return; }
|
| 337 |
+
document.getElementById('resp').innerText = '⏳ Đang xử lý...';
|
| 338 |
+
const res = await fetch('/api/chat', {
|
| 339 |
+
method: 'POST', headers: {'Content-Type':'application/json'},
|
| 340 |
+
body: JSON.stringify({q: q, voice: voice})
|
| 341 |
+
});
|
| 342 |
+
const j = await res.json();
|
| 343 |
+
if(j.error){ document.getElementById('resp').innerText = 'Error: ' + j.error; return; }
|
| 344 |
+
document.getElementById('resp').innerText = j.answer;
|
| 345 |
+
if(j.play_url){
|
| 346 |
+
const audio = document.getElementById('audio');
|
| 347 |
+
audio.src = j.play_url + '?t=' + Date.now();
|
| 348 |
+
audio.style.display='block';
|
| 349 |
+
audio.play();
|
| 350 |
+
}
|
| 351 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 352 |
</script>
|
| 353 |
+
</div>
|
| 354 |
</body>
|
| 355 |
</html>
|
| 356 |
"""
|
| 357 |
|
| 358 |
@app.route("/", methods=["GET"])
|
| 359 |
+
def root():
|
| 360 |
+
model = CFG.get("GEMINI_MODEL","n/a")
|
| 361 |
+
gem = bool(CFG.get("GEMINI_API_KEY"))
|
| 362 |
+
tg = bool(CFG.get("TELEGRAM_TOKEN") and CFG.get("TELEGRAM_CHAT_ID"))
|
| 363 |
+
return render_template_string(INDEX_HTML, model=model, gemini=("✅" if gem else "❌"), tg=("✅" if tg else "❌"))
|
| 364 |
+
|
| 365 |
+
@app.route("/api/chat", methods=["POST"])
|
| 366 |
+
def api_chat():
|
| 367 |
+
data = request.get_json(silent=True) or {}
|
| 368 |
+
q = data.get("q") or data.get("question") or ""
|
| 369 |
+
voice_on = bool(data.get("voice", True))
|
| 370 |
+
if not q or not str(q).strip():
|
| 371 |
+
return jsonify({"error":"missing 'q'"}), 400
|
| 372 |
+
lang = detect_lang(q)
|
| 373 |
+
prompt = (f"Bạn là robot trợ lý, trả lời bằng tiếng Việt tự nhiên: {q}" if lang=="vi"
|
| 374 |
+
else f"You are a helpful assistant. Answer in natural English: {q}")
|
| 375 |
+
gem = call_gemini(prompt)
|
| 376 |
+
if not gem.get("ok"):
|
| 377 |
+
answer = f"[Gemini error] {gem.get('error')}"
|
| 378 |
+
else:
|
| 379 |
+
answer = gem.get("text","")
|
| 380 |
+
increment_usage(max(1, len(answer.split())))
|
| 381 |
+
append_history({"ts": time.time(), "q": q, "answer": answer, "lang": lang})
|
| 382 |
+
play_url = None
|
| 383 |
+
if voice_on:
|
| 384 |
+
ok, path_or_err = synthesize_and_save(answer, lang_hint=lang)
|
| 385 |
+
if ok:
|
| 386 |
+
play_url = "/play_latest"
|
| 387 |
+
# Telegram notify in background
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 388 |
try:
|
| 389 |
+
# keep simple: send summary
|
| 390 |
+
if CFG.get("TELEGRAM_TOKEN") and CFG.get("TELEGRAM_CHAT_ID"):
|
| 391 |
+
# non-blocking
|
| 392 |
+
import threading
|
| 393 |
+
threading.Thread(target=send_telegram_message, args=(f"Q: {q}\nA: {answer}",)).start()
|
| 394 |
+
except Exception:
|
| 395 |
+
pass
|
| 396 |
+
resp = {"answer": answer}
|
| 397 |
+
if play_url:
|
| 398 |
+
resp["play_url"] = play_url
|
| 399 |
+
return jsonify(resp)
|
| 400 |
+
|
| 401 |
+
@app.route("/api/chat_audio", methods=["POST"])
|
| 402 |
+
def api_chat_audio():
|
| 403 |
+
# Accept wav upload as raw body or file named 'file'
|
| 404 |
+
wav_bytes = None
|
| 405 |
+
if 'file' in request.files:
|
| 406 |
+
wav_bytes = request.files['file'].read()
|
| 407 |
+
else:
|
| 408 |
+
wav_bytes = request.get_data()
|
| 409 |
+
if not wav_bytes:
|
| 410 |
+
return jsonify({"error":"no audio provided"}),400
|
| 411 |
+
# If client provides X-Text header (pre-transcribed text), use it
|
| 412 |
+
provided_text = request.headers.get("X-Text","")
|
| 413 |
+
if provided_text:
|
| 414 |
+
q = provided_text
|
| 415 |
+
else:
|
| 416 |
+
# Server STT not implemented in this build
|
| 417 |
+
return jsonify({"error":"STT not enabled on server. Please send text or add STT implementation."}), 501
|
| 418 |
+
lang = detect_lang(q)
|
| 419 |
+
prompt = (f"Bạn là robot trợ lý, trả lời bằng tiếng Việt tự nhiên: {q}" if lang=="vi"
|
| 420 |
+
else f"You are a helpful assistant. Answer in natural English: {q}")
|
| 421 |
+
gem = call_gemini(prompt)
|
| 422 |
+
if not gem.get("ok"):
|
| 423 |
+
answer = f"[Gemini error] {gem.get('error')}"
|
| 424 |
+
else:
|
| 425 |
+
answer = gem.get("text","")
|
| 426 |
+
append_history({"ts": time.time(), "q": q, "answer": answer, "lang": lang})
|
| 427 |
+
ok, path_or_err = synthesize_and_save(answer, lang_hint=lang)
|
| 428 |
+
if ok:
|
| 429 |
+
return jsonify({"question": q, "answer": answer, "play_url": "/play_latest"})
|
| 430 |
+
return jsonify({"error":"tts_failed", "details": path_or_err}), 500
|
| 431 |
+
|
| 432 |
+
@app.route("/play_latest", methods=["GET"])
|
| 433 |
+
def play_latest():
|
| 434 |
+
if not LATEST_MP3.exists():
|
| 435 |
+
return jsonify({"error":"no audio"}), 404
|
| 436 |
+
return send_file(str(LATEST_MP3), mimetype="audio/mpeg")
|
| 437 |
+
|
| 438 |
+
@app.route("/notify", methods=["POST"])
|
| 439 |
+
def notify():
|
| 440 |
+
data = request.get_json(silent=True) or {}
|
| 441 |
+
event = data.get("event","event")
|
| 442 |
+
msg = data.get("msg","")
|
| 443 |
try:
|
| 444 |
+
import threading
|
| 445 |
+
threading.Thread(target=send_telegram_message, args=(f"[Robot Notify] {event}: {msg}",)).start()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 446 |
except Exception:
|
| 447 |
+
pass
|
| 448 |
+
return jsonify({"sent": True})
|
|
|
|
|
|
|
|
|
|
| 449 |
|
| 450 |
+
@app.route("/history", methods=["GET"])
|
| 451 |
+
def history():
|
| 452 |
+
h = load_json_safe(HISTORY_FILE, [])
|
| 453 |
+
return jsonify(h)
|
|
|
|
| 454 |
|
| 455 |
+
@app.route("/health", methods=["GET"])
|
| 456 |
+
def health():
|
| 457 |
+
return jsonify({"status":"ok","time":time.time(),"model":CFG.get("GEMINI_MODEL","n/a")})
|
| 458 |
|
| 459 |
+
# -------------------------
|
| 460 |
+
# Startup
|
| 461 |
+
# -------------------------
|
| 462 |
+
if __name__ == "__main__":
|
| 463 |
+
# ensure history/usage exist
|
| 464 |
+
load_json_safe(HISTORY_FILE, [])
|
| 465 |
+
load_usage()
|
| 466 |
+
logger.info("KCrobot v2 starting. Gemini: %s, Eleven: %s, Telegram: %s",
|
| 467 |
+
bool(CFG.get("GEMINI_API_KEY")), bool(CFG.get("ELEVEN_API_KEY")), bool(CFG.get("TELEGRAM_TOKEN")))
|
| 468 |
+
app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8080)))
|