hoangthiencm commited on
Commit
daf2c98
·
verified ·
1 Parent(s): d45755e

Create server.py

Browse files
Files changed (1) hide show
  1. server.py +97 -0
server.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import logging
4
+ from flask import Flask, request, jsonify
5
+ import google.generativeai as genai
6
+ from flask_cors import CORS
7
+ from dotenv import load_dotenv
8
+
9
+ # Load environment variables
10
+ load_dotenv()
11
+
12
+ app = Flask(__name__)
13
+ CORS(app)
14
+
15
+ # Cấu hình Logging
16
+ logging.basicConfig(level=logging.INFO)
17
+ logger = logging.getLogger(__name__)
18
+
19
+ # --- CẤU HÌNH TỪ BIẾN MÔI TRƯỜNG ---
20
+ # 1. API KEYS
21
+ api_keys_env = os.getenv("GEMINI_API_KEYS", "")
22
+ API_KEYS = [k.strip() for k in api_keys_env.split(",") if k.strip()]
23
+
24
+ if not API_KEYS:
25
+ logger.warning("WARN: Chưa tìm thấy GEMINI_API_KEYS! Server sẽ không hoạt động đúng.")
26
+
27
+ # 2. MODELS
28
+ models_env = os.getenv("GEMINI_MODELS", "gemini-2.0-flash-exp,gemini-1.5-pro,gemini-1.5-flash")
29
+ GEMINI_MODELS = [m.strip() for m in models_env.split(",") if m.strip()]
30
+
31
+ @app.route('/')
32
+ def home():
33
+ return jsonify({
34
+ "status": "online",
35
+ "server": "HT MATH Central Server",
36
+ "models_available": GEMINI_MODELS,
37
+ "keys_loaded": len(API_KEYS)
38
+ })
39
+
40
+ @app.route('/api/models', methods=['GET'])
41
+ def get_models():
42
+ """API trả về danh sách model"""
43
+ return jsonify({"models": GEMINI_MODELS})
44
+
45
+ @app.route('/api/generate', methods=['POST'])
46
+ def generate_content():
47
+ if not API_KEYS:
48
+ return jsonify({'error': 'Server chưa cấu hình API Key'}), 500
49
+
50
+ try:
51
+ data = request.json
52
+ if not data:
53
+ return jsonify({'error': 'No data provided'}), 400
54
+
55
+ prompt = data.get('prompt', '')
56
+ image_data = data.get('image')
57
+ model_name = data.get('model', GEMINI_MODELS[0])
58
+
59
+ if not prompt and not image_data:
60
+ return jsonify({'error': 'Cần cung cấp nội dung hoặc hình ảnh'}), 400
61
+
62
+ # --- LOAD BALANCING (Round Robin/Random) ---
63
+ current_key = random.choice(API_KEYS)
64
+ genai.configure(api_key=current_key)
65
+
66
+ masked_key = current_key[:5] + "..." + current_key[-3:]
67
+ logger.info(f"Request -> Model: {model_name} | Key: {masked_key}")
68
+
69
+ model = genai.GenerativeModel(model_name)
70
+ content_parts = [prompt]
71
+
72
+ if image_data:
73
+ try:
74
+ import base64
75
+ from io import BytesIO
76
+ from PIL import Image
77
+ image_bytes = base64.b64decode(image_data)
78
+ image = Image.open(BytesIO(image_bytes))
79
+ content_parts.append(image)
80
+ except Exception as e:
81
+ logger.error(f"Lỗi ảnh: {str(e)}")
82
+ return jsonify({'error': f'Lỗi xử lý ảnh: {str(e)}'}), 400
83
+
84
+ response = model.generate_content(content_parts)
85
+
86
+ if response.text:
87
+ return jsonify({'result': response.text})
88
+ else:
89
+ return jsonify({'result': 'Không có phản hồi từ Gemini'}), 500
90
+
91
+ except Exception as e:
92
+ logger.error(f"Lỗi Server: {str(e)}")
93
+ return jsonify({'error': str(e)}), 500
94
+
95
+ if __name__ == '__main__':
96
+ # Chạy trên port 7860 (Port chuẩn của Hugging Face)
97
+ app.run(host='0.0.0.0', port=7860)