Spaces:
Running
Running
| import os | |
| import base64 | |
| import io | |
| import numpy as np | |
| import cv2 | |
| from PIL import Image | |
| from fastapi import FastAPI, Request | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import JSONResponse | |
| import easyocr | |
| app = FastAPI() | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| BASE_DIR = os.path.dirname(os.path.abspath(__file__)) | |
| print("Loading Case-Sensitive Custom Model V6 (Lazy Loaded as V2)...") | |
| reader = easyocr.Reader( | |
| ['en'], | |
| recog_network='cuims_model_v2', | |
| model_storage_directory=os.path.join(BASE_DIR, 'model'), | |
| user_network_directory=os.path.join(BASE_DIR, 'user_network') | |
| ) | |
| print("Model Loaded!") | |
| async def solve_captcha(request: Request): | |
| try: | |
| img = None | |
| content_type = request.headers.get('content-type', '') | |
| if 'application/json' in content_type: | |
| # Handles Chrome Extension Base64 payload | |
| data = await request.json() | |
| base64_data = data.get('image', '') | |
| if "," in base64_data: | |
| base64_data = base64_data.split(",")[1] | |
| img_bytes = base64.b64decode(base64_data) | |
| img = Image.open(io.BytesIO(img_bytes)) | |
| elif 'multipart/form-data' in content_type: | |
| # Handles Flutter Raw File Upload payload | |
| form = await request.form() | |
| file = form.get('file') | |
| if file: | |
| contents = await file.read() | |
| img = Image.open(io.BytesIO(contents)) | |
| else: | |
| return JSONResponse(status_code=400, content={"text": "", "error": "Invalid content type"}) | |
| if not img: | |
| return JSONResponse(status_code=400, content={"text": "", "error": "No image provided"}) | |
| img = img.convert("RGBA") | |
| white_bg = Image.new("RGBA", img.size, "WHITE") | |
| img_with_bg = Image.alpha_composite(white_bg, img) | |
| img_native = img_with_bg.resize((100, 32), resample=Image.Resampling.BICUBIC) | |
| img_cv_grey = np.array(img_native.convert("L")) | |
| _, binarized_img = cv2.threshold(img_cv_grey, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) | |
| results = reader.recognize( | |
| binarized_img, | |
| horizontal_list=[[0, 100, 0, 32]], | |
| free_list=[], | |
| detail=0, | |
| decoder='beamsearch', | |
| beamWidth=10, | |
| allowlist='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' | |
| ) | |
| text = "".join(results) | |
| cleaned = "".join(e for e in text if e.isalnum())[:4] | |
| print(f"AI Predicted: {cleaned}") | |
| return {"text": cleaned} | |
| except Exception as e: | |
| import traceback | |
| print(f"Detailed Error: {traceback.format_exc()}") | |
| return {"text": "", "error": str(e)} |