Update main.py
Browse files
main.py
CHANGED
|
@@ -10,7 +10,7 @@ import io
|
|
| 10 |
import logging
|
| 11 |
import random
|
| 12 |
import yaml
|
| 13 |
-
from typing import Tuple, List
|
| 14 |
from fastapi import FastAPI, HTTPException
|
| 15 |
from fastapi.responses import JSONResponse
|
| 16 |
from pydantic import BaseModel
|
|
@@ -46,176 +46,131 @@ logger = logging.getLogger(__name__)
|
|
| 46 |
class PredictRequest(BaseModel):
|
| 47 |
data: List[str]
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
|
| 52 |
def verify_api_key(api_key: str) -> bool:
|
| 53 |
"""Verify API key"""
|
| 54 |
return api_key == API_KEY
|
| 55 |
|
| 56 |
-
def smart_cv_model(image_np: np.ndarray) -> Tuple[int, float]:
|
| 57 |
-
"""Computer Vision model -
|
|
|
|
| 58 |
try:
|
| 59 |
-
height, width = image_np.shape[:2]
|
| 60 |
-
|
| 61 |
-
# Convert to grayscale for analysis
|
| 62 |
gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
|
| 63 |
-
|
| 64 |
-
# Apply threshold to find dark areas (potential missing pieces)
|
| 65 |
_, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
|
| 66 |
-
|
| 67 |
-
# Find contours
|
| 68 |
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 69 |
|
| 70 |
if contours:
|
| 71 |
-
# Find the largest contour (likely the missing piece)
|
| 72 |
largest_contour = max(contours, key=cv2.contourArea)
|
| 73 |
-
|
| 74 |
-
# Get bounding box
|
| 75 |
x, y, w, h = cv2.boundingRect(largest_contour)
|
| 76 |
-
|
| 77 |
-
# Calculate center X
|
| 78 |
center_x = x + w // 2
|
| 79 |
|
| 80 |
-
#
|
| 81 |
-
|
| 82 |
|
| 83 |
-
|
| 84 |
area_ratio = cv2.contourArea(largest_contour) / (width * height)
|
| 85 |
confidence = min(0.9, max(0.6, area_ratio * 10))
|
| 86 |
|
| 87 |
logger.info(f"CV Model: target at x={center_x}, confidence={confidence:.3f}")
|
| 88 |
-
|
|
|
|
| 89 |
else:
|
| 90 |
-
# Fallback
|
| 91 |
random.seed(hash(image_np.tobytes()) % 2**31)
|
| 92 |
target_x = int(width * (0.45 + random.random() * 0.3))
|
| 93 |
confidence = 0.65 + random.random() * 0.15
|
| 94 |
-
|
| 95 |
logger.info(f"CV Model (rule-based): target at x={target_x}, confidence={confidence:.3f}")
|
| 96 |
-
return target_x, confidence
|
| 97 |
|
| 98 |
except Exception as e:
|
| 99 |
logger.warning(f"CV processing failed, using safe fallback: {e}")
|
| 100 |
-
# Safe fallback - ALWAYS works
|
| 101 |
center_x = int(width * 0.6)
|
| 102 |
-
return center_x, 0.7
|
| 103 |
|
| 104 |
-
def process_image_onnx(image_np: np.ndarray) -> Tuple[int, float]:
|
| 105 |
-
"""Process image with ONNX model"""
|
|
|
|
| 106 |
try:
|
| 107 |
-
# Preprocess image for ONNX
|
| 108 |
-
height, width = image_np.shape[:2]
|
| 109 |
-
|
| 110 |
-
# Resize and pad to 640x640
|
| 111 |
max_size = 640
|
| 112 |
ratio = min(max_size / width, max_size / height)
|
| 113 |
-
new_width = int(width * ratio)
|
| 114 |
-
new_height = int(height * ratio)
|
| 115 |
-
|
| 116 |
resized = cv2.resize(image_np, (new_width, new_height))
|
| 117 |
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
dh = (max_size - new_height) // 2
|
| 121 |
|
| 122 |
-
|
| 123 |
-
|
| 124 |
|
| 125 |
-
# Normalize and prepare for ONNX
|
| 126 |
-
input_tensor = padded.astype(np.float32) / 255.0
|
| 127 |
-
input_tensor = np.transpose(input_tensor, (2, 0, 1)) # HWC to CHW
|
| 128 |
-
input_tensor = np.expand_dims(input_tensor, axis=0) # Add batch dimension
|
| 129 |
-
|
| 130 |
-
# Run inference
|
| 131 |
outputs = model_session.run(None, {model_session.get_inputs()[0].name: input_tensor})
|
| 132 |
-
preds = outputs[0][0]
|
| 133 |
|
| 134 |
-
# Post-process predictions
|
| 135 |
if len(preds) == 0:
|
| 136 |
-
return 0, 0.0
|
| 137 |
|
| 138 |
-
# Extract boxes and scores
|
| 139 |
box_scores = preds[:, 4:]
|
| 140 |
max_scores = np.max(box_scores, axis=1)
|
| 141 |
|
| 142 |
-
# Filter by confidence
|
| 143 |
valid_preds = max_scores > CONFIDENCE_THRESHOLD
|
| 144 |
if not np.any(valid_preds):
|
| 145 |
-
return 0, 0.0
|
| 146 |
|
| 147 |
preds = preds[valid_preds]
|
| 148 |
max_scores = max_scores[valid_preds]
|
| 149 |
|
| 150 |
-
# Process bounding boxes
|
| 151 |
boxes_raw = preds[:, :4]
|
| 152 |
-
|
| 153 |
-
# Scale back to original image
|
| 154 |
boxes_raw[:, 0] = (boxes_raw[:, 0] - dw) / ratio
|
| 155 |
boxes_raw[:, 1] = (boxes_raw[:, 1] - dh) / ratio
|
| 156 |
boxes_raw[:, 2] /= ratio
|
| 157 |
boxes_raw[:, 3] /= ratio
|
| 158 |
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
y1 = boxes_raw[:, 1] - boxes_raw[:, 3] / 2
|
| 162 |
-
x2 = boxes_raw[:, 0] + boxes_raw[:, 2] / 2
|
| 163 |
-
y2 = boxes_raw[:, 1] + boxes_raw[:, 3] / 2
|
| 164 |
|
| 165 |
boxes_processed = np.column_stack((x1, y1, x2, y2)).astype(np.float32)
|
| 166 |
-
|
| 167 |
-
# Apply NMS
|
| 168 |
indices = cv2.dnn.NMSBoxes(boxes_processed, max_scores, CONFIDENCE_THRESHOLD, NMS_IOU_THRESHOLD)
|
| 169 |
|
| 170 |
if len(indices) == 0:
|
| 171 |
-
return 0, 0.0
|
| 172 |
|
| 173 |
-
# Get best detection
|
| 174 |
best_idx = indices.flatten()[0]
|
| 175 |
best_box = boxes_processed[best_idx]
|
| 176 |
best_score = max_scores[best_idx]
|
| 177 |
-
|
| 178 |
-
# Calculate center x position
|
| 179 |
center_x = int((best_box[0] + best_box[2]) / 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
-
|
|
|
|
| 182 |
|
| 183 |
except Exception as e:
|
| 184 |
logger.error(f"Error in ONNX processing: {e}")
|
| 185 |
-
return 0, 0.0
|
| 186 |
|
| 187 |
def load_model():
|
| 188 |
"""Load ONNX model and class names"""
|
| 189 |
global model_session, CLASS_NAMES
|
| 190 |
-
|
| 191 |
try:
|
| 192 |
-
# Load class names
|
| 193 |
if os.path.exists(YAML_PATH):
|
| 194 |
with open(YAML_PATH, "r", encoding="utf-8") as f:
|
| 195 |
-
|
| 196 |
-
CLASS_NAMES = data.get('names', ['Target'])
|
| 197 |
else:
|
| 198 |
CLASS_NAMES = ['Target']
|
| 199 |
-
|
| 200 |
logger.info(f"Loaded {len(CLASS_NAMES)} classes: {CLASS_NAMES}")
|
| 201 |
|
| 202 |
-
# Load ONNX model
|
| 203 |
if ONNX_AVAILABLE and os.path.exists(MODEL_PATH):
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
model_session = ort.InferenceSession(MODEL_PATH, providers=providers)
|
| 207 |
-
logger.info("✅ ONNX model loaded successfully")
|
| 208 |
-
except Exception as e:
|
| 209 |
-
logger.error(f"Failed to load ONNX model: {e}")
|
| 210 |
-
logger.info("Will use CV model instead")
|
| 211 |
-
model_session = None
|
| 212 |
else:
|
| 213 |
-
|
| 214 |
-
logger.warning("ONNX Runtime not available, using CV model")
|
| 215 |
-
elif not os.path.exists(MODEL_PATH):
|
| 216 |
-
logger.warning(f"Model file {MODEL_PATH} not found, using CV model")
|
| 217 |
model_session = None
|
| 218 |
-
|
| 219 |
except Exception as e:
|
| 220 |
logger.error(f"Failed to load model: {e}")
|
| 221 |
model_session = None
|
|
@@ -225,107 +180,74 @@ def base64_to_numpy(base64_string: str) -> np.ndarray:
|
|
| 225 |
try:
|
| 226 |
if base64_string.startswith('data:image'):
|
| 227 |
base64_string = base64_string.split(',')[1]
|
| 228 |
-
|
| 229 |
image_data = base64.b64decode(base64_string)
|
| 230 |
-
|
| 231 |
-
return np.array(image.convert('RGB'))
|
| 232 |
except Exception as e:
|
| 233 |
logger.error(f"Error converting base64: {e}")
|
| 234 |
raise ValueError("Invalid image data")
|
| 235 |
|
| 236 |
def solve_geetest4_api(background_image: str, api_key: str):
|
| 237 |
-
"""Pure API endpoint function -
|
| 238 |
try:
|
| 239 |
-
# Verify API key
|
| 240 |
if not verify_api_key(api_key):
|
| 241 |
-
|
|
|
|
| 242 |
|
| 243 |
-
# Convert image
|
| 244 |
image_np = base64_to_numpy(background_image)
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
if model_session
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
# If ONNX fails, fallback to CV
|
| 251 |
if confidence < CONFIDENCE_THRESHOLD:
|
| 252 |
logger.info("ONNX confidence too low, using CV fallback")
|
| 253 |
-
target_x, confidence = smart_cv_model(image_np)
|
| 254 |
model_type = "CV"
|
| 255 |
else:
|
| 256 |
model_type = "ONNX"
|
| 257 |
else:
|
| 258 |
-
#
|
| 259 |
-
target_x, confidence = smart_cv_model(image_np)
|
| 260 |
model_type = "CV"
|
| 261 |
|
| 262 |
-
# Always succeed with minimum confidence
|
| 263 |
if target_x > 0 and confidence >= 0.5:
|
| 264 |
-
|
|
|
|
| 265 |
else:
|
| 266 |
-
# Fallback for edge cases
|
| 267 |
fallback_x = int(image_np.shape[1] * 0.6)
|
| 268 |
-
|
|
|
|
| 269 |
|
| 270 |
except Exception as e:
|
| 271 |
logger.error(f"API Error: {e}")
|
| 272 |
-
#
|
| 273 |
-
return [f"⚠️ Error, using fallback position", 200, 0.6]
|
| 274 |
|
| 275 |
-
# Initialize model on startup
|
| 276 |
load_model()
|
| 277 |
-
|
| 278 |
-
# --- FastAPI App ---
|
| 279 |
-
app = FastAPI(
|
| 280 |
-
title="GeeTest4 Solver API",
|
| 281 |
-
description="Pure API endpoint for GeeTest4 captcha solving",
|
| 282 |
-
version="1.0.0",
|
| 283 |
-
docs_url=None, # Disable docs
|
| 284 |
-
redoc_url=None # Disable redoc
|
| 285 |
-
)
|
| 286 |
|
| 287 |
@app.get("/")
|
| 288 |
async def root():
|
| 289 |
-
"""Root endpoint - returns 404 to hide the service"""
|
| 290 |
raise HTTPException(status_code=404, detail="Not Found")
|
| 291 |
|
| 292 |
@app.post("/api/predict")
|
| 293 |
async def predict(request: PredictRequest):
|
| 294 |
"""Main API endpoint for GeeTest4 solving"""
|
| 295 |
try:
|
| 296 |
-
# Validate request
|
| 297 |
if len(request.data) < 2:
|
| 298 |
raise HTTPException(status_code=400, detail="Invalid request format")
|
| 299 |
|
| 300 |
-
background_image = request.data[0]
|
| 301 |
-
api_key = request.data[1]
|
| 302 |
-
|
| 303 |
-
# Process request
|
| 304 |
result = solve_geetest4_api(background_image, api_key)
|
| 305 |
-
|
| 306 |
-
# Return response in Gradio format for compatibility
|
| 307 |
return {"data": result}
|
| 308 |
-
|
| 309 |
except Exception as e:
|
| 310 |
logger.error(f"API Error: {e}")
|
| 311 |
-
return JSONResponse(
|
| 312 |
-
status_code=500,
|
| 313 |
-
content={"data": ["❌ Server error", 0, 0.0]}
|
| 314 |
-
)
|
| 315 |
|
| 316 |
@app.get("/health")
|
| 317 |
async def health_check():
|
| 318 |
-
"""Health check endpoint"""
|
| 319 |
return {"status": "healthy", "model_loaded": model_session is not None}
|
| 320 |
|
| 321 |
-
# Launch app
|
| 322 |
if __name__ == "__main__":
|
| 323 |
logger.info("🚀 Starting GeeTest4 Pure FastAPI Server")
|
| 324 |
-
|
| 325 |
-
# Launch with uvicorn
|
| 326 |
-
uvicorn.run(
|
| 327 |
-
app,
|
| 328 |
-
host="0.0.0.0",
|
| 329 |
-
port=int(os.getenv("PORT", 7860)),
|
| 330 |
-
log_level="info"
|
| 331 |
-
)
|
|
|
|
| 10 |
import logging
|
| 11 |
import random
|
| 12 |
import yaml
|
| 13 |
+
from typing import Tuple, List, Dict, Union
|
| 14 |
from fastapi import FastAPI, HTTPException
|
| 15 |
from fastapi.responses import JSONResponse
|
| 16 |
from pydantic import BaseModel
|
|
|
|
| 46 |
class PredictRequest(BaseModel):
|
| 47 |
data: List[str]
|
| 48 |
|
| 49 |
+
# MODIFIKASI: Tipe data untuk bounding box
|
| 50 |
+
BoundingBox = Dict[str, int]
|
| 51 |
|
| 52 |
def verify_api_key(api_key: str) -> bool:
|
| 53 |
"""Verify API key"""
|
| 54 |
return api_key == API_KEY
|
| 55 |
|
| 56 |
+
def smart_cv_model(image_np: np.ndarray) -> Tuple[int, float, Union[BoundingBox, None]]:
|
| 57 |
+
"""Computer Vision model - sekarang mengembalikan bounding box"""
|
| 58 |
+
height, width = image_np.shape[:2]
|
| 59 |
try:
|
|
|
|
|
|
|
|
|
|
| 60 |
gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
|
|
|
|
|
|
|
| 61 |
_, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
|
|
|
|
|
|
|
| 62 |
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 63 |
|
| 64 |
if contours:
|
|
|
|
| 65 |
largest_contour = max(contours, key=cv2.contourArea)
|
|
|
|
|
|
|
| 66 |
x, y, w, h = cv2.boundingRect(largest_contour)
|
|
|
|
|
|
|
| 67 |
center_x = x + w // 2
|
| 68 |
|
| 69 |
+
# MODIFIKASI: Siapkan data bounding box untuk dikembalikan
|
| 70 |
+
bbox = {'x': x, 'y': y, 'w': w, 'h': h}
|
| 71 |
|
| 72 |
+
center_x = max(int(width * 0.1), min(center_x, int(width * 0.9)))
|
| 73 |
area_ratio = cv2.contourArea(largest_contour) / (width * height)
|
| 74 |
confidence = min(0.9, max(0.6, area_ratio * 10))
|
| 75 |
|
| 76 |
logger.info(f"CV Model: target at x={center_x}, confidence={confidence:.3f}")
|
| 77 |
+
# MODIFIKASI: Kembalikan bbox
|
| 78 |
+
return center_x, confidence, bbox
|
| 79 |
else:
|
| 80 |
+
# Fallback jika tidak ada kontur
|
| 81 |
random.seed(hash(image_np.tobytes()) % 2**31)
|
| 82 |
target_x = int(width * (0.45 + random.random() * 0.3))
|
| 83 |
confidence = 0.65 + random.random() * 0.15
|
|
|
|
| 84 |
logger.info(f"CV Model (rule-based): target at x={target_x}, confidence={confidence:.3f}")
|
| 85 |
+
return target_x, confidence, None
|
| 86 |
|
| 87 |
except Exception as e:
|
| 88 |
logger.warning(f"CV processing failed, using safe fallback: {e}")
|
|
|
|
| 89 |
center_x = int(width * 0.6)
|
| 90 |
+
return center_x, 0.7, None
|
| 91 |
|
| 92 |
+
def process_image_onnx(image_np: np.ndarray) -> Tuple[int, float, Union[BoundingBox, None]]:
|
| 93 |
+
"""Process image with ONNX model - sekarang mengembalikan bounding box"""
|
| 94 |
+
height, width = image_np.shape[:2]
|
| 95 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
max_size = 640
|
| 97 |
ratio = min(max_size / width, max_size / height)
|
| 98 |
+
new_width, new_height = int(width * ratio), int(height * ratio)
|
|
|
|
|
|
|
| 99 |
resized = cv2.resize(image_np, (new_width, new_height))
|
| 100 |
|
| 101 |
+
dw, dh = (max_size - new_width) // 2, (max_size - new_height) // 2
|
| 102 |
+
padded = cv2.copyMakeBorder(resized, dh, max_size - new_height - dh, dw, max_size - new_width - dw, cv2.BORDER_CONSTANT, value=(114, 114, 114))
|
|
|
|
| 103 |
|
| 104 |
+
input_tensor = (padded.astype(np.float32) / 255.0).transpose(2, 0, 1)
|
| 105 |
+
input_tensor = np.expand_dims(input_tensor, axis=0)
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
outputs = model_session.run(None, {model_session.get_inputs()[0].name: input_tensor})
|
| 108 |
+
preds = outputs[0][0]
|
| 109 |
|
|
|
|
| 110 |
if len(preds) == 0:
|
| 111 |
+
return 0, 0.0, None
|
| 112 |
|
|
|
|
| 113 |
box_scores = preds[:, 4:]
|
| 114 |
max_scores = np.max(box_scores, axis=1)
|
| 115 |
|
|
|
|
| 116 |
valid_preds = max_scores > CONFIDENCE_THRESHOLD
|
| 117 |
if not np.any(valid_preds):
|
| 118 |
+
return 0, 0.0, None
|
| 119 |
|
| 120 |
preds = preds[valid_preds]
|
| 121 |
max_scores = max_scores[valid_preds]
|
| 122 |
|
|
|
|
| 123 |
boxes_raw = preds[:, :4]
|
|
|
|
|
|
|
| 124 |
boxes_raw[:, 0] = (boxes_raw[:, 0] - dw) / ratio
|
| 125 |
boxes_raw[:, 1] = (boxes_raw[:, 1] - dh) / ratio
|
| 126 |
boxes_raw[:, 2] /= ratio
|
| 127 |
boxes_raw[:, 3] /= ratio
|
| 128 |
|
| 129 |
+
x1, y1 = boxes_raw[:, 0] - boxes_raw[:, 2] / 2, boxes_raw[:, 1] - boxes_raw[:, 3] / 2
|
| 130 |
+
x2, y2 = boxes_raw[:, 0] + boxes_raw[:, 2] / 2, boxes_raw[:, 1] + boxes_raw[:, 3] / 2
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
boxes_processed = np.column_stack((x1, y1, x2, y2)).astype(np.float32)
|
|
|
|
|
|
|
| 133 |
indices = cv2.dnn.NMSBoxes(boxes_processed, max_scores, CONFIDENCE_THRESHOLD, NMS_IOU_THRESHOLD)
|
| 134 |
|
| 135 |
if len(indices) == 0:
|
| 136 |
+
return 0, 0.0, None
|
| 137 |
|
|
|
|
| 138 |
best_idx = indices.flatten()[0]
|
| 139 |
best_box = boxes_processed[best_idx]
|
| 140 |
best_score = max_scores[best_idx]
|
|
|
|
|
|
|
| 141 |
center_x = int((best_box[0] + best_box[2]) / 2)
|
| 142 |
+
|
| 143 |
+
# MODIFIKASI: Konversi dari x1,y1,x2,y2 ke x,y,w,h
|
| 144 |
+
x = int(best_box[0])
|
| 145 |
+
y = int(best_box[1])
|
| 146 |
+
w = int(best_box[2] - best_box[0])
|
| 147 |
+
h = int(best_box[3] - best_box[1])
|
| 148 |
+
bbox = {'x': x, 'y': y, 'w': w, 'h': h}
|
| 149 |
|
| 150 |
+
# MODIFIKASI: Kembalikan bbox
|
| 151 |
+
return center_x, float(best_score), bbox
|
| 152 |
|
| 153 |
except Exception as e:
|
| 154 |
logger.error(f"Error in ONNX processing: {e}")
|
| 155 |
+
return 0, 0.0, None
|
| 156 |
|
| 157 |
def load_model():
|
| 158 |
"""Load ONNX model and class names"""
|
| 159 |
global model_session, CLASS_NAMES
|
|
|
|
| 160 |
try:
|
|
|
|
| 161 |
if os.path.exists(YAML_PATH):
|
| 162 |
with open(YAML_PATH, "r", encoding="utf-8") as f:
|
| 163 |
+
CLASS_NAMES = yaml.safe_load(f).get('names', ['Target'])
|
|
|
|
| 164 |
else:
|
| 165 |
CLASS_NAMES = ['Target']
|
|
|
|
| 166 |
logger.info(f"Loaded {len(CLASS_NAMES)} classes: {CLASS_NAMES}")
|
| 167 |
|
|
|
|
| 168 |
if ONNX_AVAILABLE and os.path.exists(MODEL_PATH):
|
| 169 |
+
model_session = ort.InferenceSession(MODEL_PATH, providers=['CPUExecutionProvider'])
|
| 170 |
+
logger.info("✅ ONNX model loaded successfully")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
else:
|
| 172 |
+
logger.warning("ONNX model not found or ONNX runtime not available. Using CV model.")
|
|
|
|
|
|
|
|
|
|
| 173 |
model_session = None
|
|
|
|
| 174 |
except Exception as e:
|
| 175 |
logger.error(f"Failed to load model: {e}")
|
| 176 |
model_session = None
|
|
|
|
| 180 |
try:
|
| 181 |
if base64_string.startswith('data:image'):
|
| 182 |
base64_string = base64_string.split(',')[1]
|
|
|
|
| 183 |
image_data = base64.b64decode(base64_string)
|
| 184 |
+
return np.array(Image.open(io.BytesIO(image_data)).convert('RGB'))
|
|
|
|
| 185 |
except Exception as e:
|
| 186 |
logger.error(f"Error converting base64: {e}")
|
| 187 |
raise ValueError("Invalid image data")
|
| 188 |
|
| 189 |
def solve_geetest4_api(background_image: str, api_key: str):
|
| 190 |
+
"""Pure API endpoint function - sekarang mengembalikan bounding box"""
|
| 191 |
try:
|
|
|
|
| 192 |
if not verify_api_key(api_key):
|
| 193 |
+
# MODIFIKASI: Tambah None untuk konsistensi format
|
| 194 |
+
return ["❌ Invalid API key", 0, 0.0, None]
|
| 195 |
|
|
|
|
| 196 |
image_np = base64_to_numpy(background_image)
|
| 197 |
+
bbox = None # Default value
|
| 198 |
+
|
| 199 |
+
if model_session:
|
| 200 |
+
# MODIFIKASI: Tangkap bbox dari return value
|
| 201 |
+
target_x, confidence, bbox = process_image_onnx(image_np)
|
|
|
|
| 202 |
if confidence < CONFIDENCE_THRESHOLD:
|
| 203 |
logger.info("ONNX confidence too low, using CV fallback")
|
| 204 |
+
target_x, confidence, bbox = smart_cv_model(image_np)
|
| 205 |
model_type = "CV"
|
| 206 |
else:
|
| 207 |
model_type = "ONNX"
|
| 208 |
else:
|
| 209 |
+
# MODIFIKASI: Tangkap bbox dari return value
|
| 210 |
+
target_x, confidence, bbox = smart_cv_model(image_np)
|
| 211 |
model_type = "CV"
|
| 212 |
|
|
|
|
| 213 |
if target_x > 0 and confidence >= 0.5:
|
| 214 |
+
# MODIFIKASI: Sertakan bbox dalam respons sukses
|
| 215 |
+
return [f"✅ Success! Target at x={target_x} (Model: {model_type})", target_x, confidence, bbox]
|
| 216 |
else:
|
|
|
|
| 217 |
fallback_x = int(image_np.shape[1] * 0.6)
|
| 218 |
+
# MODIFIKASI: Tambah None untuk konsistensi format
|
| 219 |
+
return [f"✅ Fallback position x={fallback_x}", fallback_x, 0.7, None]
|
| 220 |
|
| 221 |
except Exception as e:
|
| 222 |
logger.error(f"API Error: {e}")
|
| 223 |
+
# MODIFIKASI: Tambah None untuk konsistensi format
|
| 224 |
+
return [f"⚠️ Error, using fallback position", 200, 0.6, None]
|
| 225 |
|
|
|
|
| 226 |
load_model()
|
| 227 |
+
app = FastAPI(title="GeeTest4 Solver API", version="1.1.0", docs_url=None, redoc_url=None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
@app.get("/")
|
| 230 |
async def root():
|
|
|
|
| 231 |
raise HTTPException(status_code=404, detail="Not Found")
|
| 232 |
|
| 233 |
@app.post("/api/predict")
|
| 234 |
async def predict(request: PredictRequest):
|
| 235 |
"""Main API endpoint for GeeTest4 solving"""
|
| 236 |
try:
|
|
|
|
| 237 |
if len(request.data) < 2:
|
| 238 |
raise HTTPException(status_code=400, detail="Invalid request format")
|
| 239 |
|
| 240 |
+
background_image, api_key = request.data[0], request.data[1]
|
|
|
|
|
|
|
|
|
|
| 241 |
result = solve_geetest4_api(background_image, api_key)
|
|
|
|
|
|
|
| 242 |
return {"data": result}
|
|
|
|
| 243 |
except Exception as e:
|
| 244 |
logger.error(f"API Error: {e}")
|
| 245 |
+
return JSONResponse(status_code=500, content={"data": ["❌ Server error", 0, 0.0, None]})
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
@app.get("/health")
|
| 248 |
async def health_check():
|
|
|
|
| 249 |
return {"status": "healthy", "model_loaded": model_session is not None}
|
| 250 |
|
|
|
|
| 251 |
if __name__ == "__main__":
|
| 252 |
logger.info("🚀 Starting GeeTest4 Pure FastAPI Server")
|
| 253 |
+
uvicorn.run(app, host="0.0.0.0", port=int(os.getenv("PORT", 7860)), log_level="info")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|