# app.py — KC Robot AI V5.2 (Full) # Flask server + Hugging Face inference (LLM/STT/TTS) + Telegram poller + web UI (browser mic) # Secrets expected in HF Space Settings: # HF_API_TOKEN (required) # TELEGRAM_TOKEN (optional) # TELEGRAM_CHATID (optional) # Optional env overrides: # HF_MODEL, HF_TTS_MODEL, HF_STT_MODEL, PORT import os import io import time import json import threading import logging from pathlib import Path from typing import List, Tuple, Optional import requests from flask import Flask, request, jsonify, send_file, render_template_string # ---------- config & logging ---------- logging.basicConfig(level=logging.INFO) logger = logging.getLogger("kcrobot.v5.2") app = Flask(__name__) TMP_DIR = Path("/tmp/kcrobot") TMP_DIR.mkdir(parents=True, exist_ok=True) # Environment / Secrets (set in Space -> Settings -> Secrets) HF_API_TOKEN = os.getenv("HF_API_TOKEN", "").strip() HF_MODEL = os.getenv("HF_MODEL", "google/flan-t5-large").strip() HF_TTS_MODEL = os.getenv("HF_TTS_MODEL", "doanthang/vietTTS-southern-female").strip() HF_STT_MODEL = os.getenv("HF_STT_MODEL", "openai/whisper-small").strip() TELEGRAM_TOKEN = os.getenv("TELEGRAM_TOKEN", "").strip() TELEGRAM_CHATID = os.getenv("TELEGRAM_CHATID", "").strip() PORT = int(os.getenv("PORT", 7860)) if not HF_API_TOKEN: logger.warning("⚠️ HF_API_TOKEN not set — put your Hugging Face token into Space Secrets (HF_API_TOKEN).") HF_HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"} if HF_API_TOKEN else {} # ---------- in-memory state ---------- CONV: List[Tuple[str, str]] = [] # (user, bot) DISPLAY_LINES: List[str] = [] # lines for small OLED displays def push_display(line: str, limit: int = 8): DISPLAY_LINES.append(line) if len(DISPLAY_LINES) > limit: del DISPLAY_LINES[0: len(DISPLAY_LINES)-limit] # ---------- Hugging Face helpers ---------- def hf_post_json(model_id: str, payload: dict, timeout: int = 120): if not HF_API_TOKEN: raise RuntimeError("HF_API_TOKEN missing (set in Space Secrets).") url = f"https://api-inference.huggingface.co/models/{model_id}" r = requests.post(url, headers={**HF_HEADERS, "Content-Type": "application/json"}, json=payload, timeout=timeout) if not r.ok: logger.error("HF JSON POST error %s: %s", r.status_code, r.text[:400]) r.raise_for_status() try: return r.json() except Exception: return r.content def hf_post_bytes(model_id: str, data: bytes, content_type: str = "application/octet-stream", timeout: int = 180): if not HF_API_TOKEN: raise RuntimeError("HF_API_TOKEN missing (set in Space Secrets).") url = f"https://api-inference.huggingface.co/models/{model_id}" headers = dict(HF_HEADERS) headers["Content-Type"] = content_type r = requests.post(url, headers=headers, data=data, timeout=timeout) if not r.ok: logger.error("HF BYTES POST error %s: %s", r.status_code, r.text[:400]) r.raise_for_status() return r def hf_text_generate(prompt: str, model: Optional[str] = None, max_new_tokens: int = 256, temperature: float = 0.7) -> str: model = model or HF_MODEL payload = {"inputs": prompt, "parameters": {"max_new_tokens": int(max_new_tokens), "temperature": float(temperature)}, "options": {"wait_for_model": True}} out = hf_post_json(model, payload, timeout=120) # parse typical shapes if isinstance(out, list) and len(out) and isinstance(out[0], dict): return out[0].get("generated_text", "") or str(out[0]) if isinstance(out, dict): return out.get("generated_text") or out.get("text") or str(out) return str(out) def hf_tts_bytes(text: str, model: Optional[str] = None) -> bytes: model = model or HF_TTS_MODEL payload = {"inputs": text} url = f"https://api-inference.huggingface.co/models/{model}" r = requests.post(url, headers={**HF_HEADERS, "Content-Type": "application/json"}, json=payload, timeout=120) if not r.ok: logger.error("HF TTS error %s: %s", r.status_code, r.text[:400]) r.raise_for_status() return r.content def hf_stt_from_bytes(audio_bytes: bytes, model: Optional[str] = None) -> str: model = model or HF_STT_MODEL r = hf_post_bytes(model, audio_bytes, content_type="application/octet-stream", timeout=180) try: j = r.json() if isinstance(j, dict) and "text" in j: return j["text"] # external shapes if isinstance(j, list) and len(j) and isinstance(j[0], dict) and "text" in j[0]: return j[0]["text"] return str(j) except Exception: # r may be raw string return r.text if hasattr(r, "text") else "" # ---------- Telegram helpers ---------- def send_telegram_message(text: str): if not TELEGRAM_TOKEN or not TELEGRAM_CHATID: logger.debug("Telegram not configured.") return False try: url = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/sendMessage" r = requests.post(url, json={"chat_id": TELEGRAM_CHATID, "text": text}, timeout=10) if not r.ok: logger.warning("Telegram send failed: %s %s", r.status_code, r.text[:200]) return r.ok except Exception: logger.exception("send_telegram_message failed") return False def telegram_send_audio(chat_id: str, audio_bytes: bytes, filename: str = "reply.mp3"): if not TELEGRAM_TOKEN: return False try: url = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/sendAudio" files = {"audio": (filename, audio_bytes, "audio/mpeg")} data = {"chat_id": chat_id} r = requests.post(url, files=files, data=data, timeout=30) if not r.ok: logger.warning("Telegram sendAudio failed: %s %s", r.status_code, r.text[:200]) return r.ok except Exception: logger.exception("telegram_send_audio failed") return False def telegram_poll_loop(): if not TELEGRAM_TOKEN: logger.info("Telegram poller disabled (no TELEGRAM_TOKEN).") return base = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}" offset = None logger.info("Telegram poller started.") while True: try: params = {"timeout": 30} if offset: params["offset"] = offset r = requests.get(base + "/getUpdates", params=params, timeout=35) if not r.ok: time.sleep(2) continue j = r.json() for upd in j.get("result", []): offset = upd.get("update_id", 0) + 1 msg = upd.get("message") or {} chat = msg.get("chat", {}) chat_id = chat.get("id") text = (msg.get("text") or "").strip() if not text: continue logger.info("TG msg %s: %s", chat_id, text) low = text.lower() if low.startswith("/ask "): q = text[5:].strip() ans = hf_text_generate(q) try: requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": ans}, timeout=10) except Exception: logger.exception("tg reply failed") elif low.startswith("/say "): phrase = text[5:].strip() # try TTS then send audio try: audio = hf_tts_bytes(phrase) telegram_send_audio(chat_id, audio, filename="say.mp3") except Exception: logger.exception("tg say failed") elif low.startswith("/status"): try: requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": "KC Robot brain running."}, timeout=10) except Exception: pass else: try: requests.post(base + "/sendMessage", json={"chat_id": chat_id, "text": "Commands: /ask | /say | /status"}, timeout=10) except Exception: pass except Exception: logger.exception("telegram_poll_loop crashed, sleeping 3s") time.sleep(3) # start telegram poller thread if token exists if TELEGRAM_TOKEN: try: t = threading.Thread(target=telegram_poll_loop, daemon=True) t.start() except Exception: logger.exception("Failed to start telegram poller thread") # ---------- Routes (ESP32 & web) ---------- @app.route("/health", methods=["GET"]) def health(): return jsonify({ "ok": True, "hf_token": bool(HF_API_TOKEN), "hf_model": HF_MODEL, "tts_model": HF_TTS_MODEL, "stt_model": HF_STT_MODEL, "telegram": bool(TELEGRAM_TOKEN) }) @app.route("/ask", methods=["POST"]) def route_ask(): data = request.get_json(force=True, silent=True) or {} text = (data.get("text") or "").strip() lang = (data.get("lang") or "auto").strip().lower() if not text: return jsonify({"error": "no text"}), 400 # make instructive prompt if lang == "vi": prompt = "Bạn là trợ lý thông minh, trả lời bằng tiếng Việt, rõ ràng và ngắn gọn:\n\n" + text elif lang == "en": prompt = "You are a helpful assistant. Answer in clear English:\n\n" + text else: prompt = "You are a bilingual assistant (Vietnamese/English). Answer in the language of the user.\n\n" + text try: ans = hf_text_generate(prompt) except Exception as e: logger.exception("hf_text_generate failed") return jsonify({"error": str(e)}), 500 CONV.append((text, ans)) push_display("YOU: " + text[:80]) push_display("BOT: " + ans[:80]) # optionally notify telegram summary if TELEGRAM_TOKEN and TELEGRAM_CHATID: try: send_telegram_message(f"You: {text}\nBot: {ans}") except Exception: logger.exception("telegram notify failed") return jsonify({"answer": ans}) @app.route("/tts", methods=["POST"]) def route_tts(): data = request.get_json(force=True, silent=True) or {} text = (data.get("text") or "").strip() if not text: return jsonify({"error": "no text"}), 400 try: audio = hf_tts_bytes(text) except Exception as e: logger.exception("hf_tts failed") return jsonify({"error": str(e)}), 500 # return audio bytes as audio/mpeg (most outputs are mp3 or wav; we choose audio/mpeg) return send_file(io.BytesIO(audio), mimetype="audio/mpeg", as_attachment=False, download_name="tts.mp3") @app.route("/stt", methods=["POST"]) def route_stt(): # Accepts multipart form 'file' or raw bytes try: if "file" in request.files: f = request.files["file"] audio_bytes = f.read() else: audio_bytes = request.get_data() or b"" if not audio_bytes: return jsonify({"error": "no audio"}), 400 txt = hf_stt_from_bytes(audio_bytes) push_display("STT: " + (txt[:80] if isinstance(txt, str) else str(txt))) return jsonify({"text": txt}) except Exception as e: logger.exception("route_stt failed") return jsonify({"error": str(e)}), 500 @app.route("/presence", methods=["POST"]) def route_presence(): data = request.get_json(force=True, silent=True) or {} note = (data.get("note") or "Có người đến gần").strip() greeting_vi = f"Xin chào! {note}" greeting_en = "Hello! Someone is near the robot." combined = f"{greeting_vi}\n{greeting_en}" CONV.append(("__presence__", combined)) push_display("RADAR: " + note[:80]) # generate greeting TTS asynchronously (pre-warm HF model) so ESP32 can call /tts if it wants def gen_greeting(): try: _ = hf_tts_bytes(greeting_vi) except Exception: logger.exception("generate greeting vi failed") try: _ = hf_tts_bytes(greeting_en) except Exception: logger.exception("generate greeting en failed") threading.Thread(target=gen_greeting, daemon=True).start() # notify telegram if TELEGRAM_TOKEN and TELEGRAM_CHATID: send_telegram_message("⚠️ Robot: Phát hiện người - " + note) return jsonify({"greeting": combined}) @app.route("/display", methods=["GET"]) def route_display(): return jsonify({"lines": DISPLAY_LINES[-8:], "conv_len": len(CONV)}) # ---------- Simple Web UI with browser STT & TTS (for testing) ---------- INDEX_HTML = r""" KC Robot AI V5.2

🤖 KC Robot AI V5.2 — Full

Cần HF_API_TOKEN trong Secrets để STT/TTS/LLM hoạt động.

""" @app.route("/", methods=["GET"]) def index(): return render_template_string(INDEX_HTML) # ---------- startup: warm models + greeting ---------- def startup_actions(): logger.info("KC Robot AI V5.2 starting up.") greeting_vi = "Xin chào chủ nhân! KC Robot đã sẵn sàng." greeting_en = "Hello master! KC Robot is ready." push_display("SYSTEM: Robot online") # warm up: request TTS generation asynchronously (not blocking) def gen(): try: if HF_API_TOKEN: _ = hf_tts_bytes(greeting_vi) _ = hf_tts_bytes(greeting_en) except Exception: logger.exception("warmup tts failed") threading.Thread(target=gen, daemon=True).start() # optional: notify telegram about startup if TELEGRAM_TOKEN and TELEGRAM_CHATID: try: send_telegram_message("KC Robot brain is online.") except Exception: logger.exception("telegram startup notify failed") @app.before_first_request def _before_first(): startup_actions() # ---------- run ---------- if __name__ == "__main__": logger.info("Starting KC Robot AI V5.2 on port %s", PORT) app.run(host="0.0.0.0", port=PORT)