Spaces:
Running
Running
| from flask import Flask, request, jsonify, Response | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| import requests | |
| # 初始化 Flask | |
| app = Flask(__name__) | |
| # Telegram Token 與 API URL | |
| TELEGRAM_TOKEN = "7967078631:AAH9viY8zWZ6mi7krxw1RSz5eycrI9Lce8Q" | |
| TELEGRAM_URL = f"https://api.telegram.org/bot{TELEGRAM_TOKEN}/sendMessage" | |
| # 載入中文輕量語言模型 | |
| model_name = "ckiplab/gpt2-base-chinese" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained(model_name) | |
| # 建立主回應函數 | |
| def generate_reply(prompt): | |
| inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=40, | |
| do_sample=True, | |
| top_k=50, | |
| top_p=0.95, | |
| temperature=0.8, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| reply = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return reply.strip() | |
| # ✅ Space首頁顯示 | |
| def index(): | |
| return Response("✅ Flask server is running and ready to receive Telegram messages!", mimetype="text/plain") | |
| # ✅ Telegram webhook endpoint | |
| def telegram_webhook(): | |
| data = request.get_json() | |
| if not data or "message" not in data: | |
| return jsonify({"status": "ignored"}), 200 | |
| message = data["message"] | |
| chat_id = message["chat"]["id"] | |
| user_text = message.get("text", "") | |
| if not user_text: | |
| return jsonify({"status": "no text"}), 200 | |
| try: | |
| reply = generate_reply(user_text) | |
| except Exception as e: | |
| reply = "⚠️ 系統錯誤,請稍後再試。" | |
| requests.post(TELEGRAM_URL, json={ | |
| "chat_id": chat_id, | |
| "text": reply | |
| }) | |
| return jsonify({"status": "ok"}), 200 |