Theowise / apps /blursens.py
hoon1018's picture
Upload 5 files
909d310 verified
import os
import cv2
import numpy as np
import base64
import gc
import io
import requests
from fastapi import APIRouter, File, UploadFile, Form, Request
from fastapi.responses import JSONResponse, HTMLResponse
from fastapi.templating import Jinja2Templates
from PIL import Image, ImageOps
from pillow_heif import register_heif_opener
# ์ดˆ๊ธฐํ™”
register_heif_opener()
router = APIRouter() # app ๋Œ€์‹  router ์‚ฌ์šฉ
templates = Jinja2Templates(directory="templates")
# ๋ชจ๋ธ ๋กœ๋“œ ๋กœ์ง (๋ชจ๋“ˆ ๋กœ๋“œ ์‹œ ์‹คํ–‰)
model_path = "models/face_detection_yunet_2023mar.onnx"
model_url = "https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx"
if not os.path.exists(model_path):
try:
r = requests.get(model_url)
with open(model_path, 'wb') as f: f.write(r.content)
except: pass
try:
face_detector = cv2.FaceDetectorYN.create(
model=model_path, config="", input_size=(320, 320),
score_threshold=0.6, nms_threshold=0.3, top_k=5000
)
except:
face_detector = None
# [๋ผ์šฐํŠธ] ๋ฉ”์ธ ํŽ˜์ด์ง€ (๋ฃจํŠธ ๊ฒฝ๋กœ๋Š” ์—ฌ๊ธฐ์„œ ์ฒ˜๋ฆฌํ•˜๊ฑฐ๋‚˜ app.py์—์„œ ์ฒ˜๋ฆฌ ๊ฐ€๋Šฅ)
@router.get("/", response_class=HTMLResponse)
async def main_blur(request: Request):
return templates.TemplateResponse("main_blursens.html", {"request": request})
# [API] ์ด๋ฏธ์ง€ ์ƒ์„ฑ
@router.post("/generate")
async def generate_image(
file: UploadFile = File(...),
blur_strength: str = Form("medium"),
output_format: str = Form("original")
):
try:
filename = file.filename
original_ext = filename.split('.')[-1].lower() if '.' in filename else 'png'
file_bytes = await file.read()
pil_img = Image.open(io.BytesIO(file_bytes))
pil_img = ImageOps.exif_transpose(pil_img)
img = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
h, w, _ = img.shape
if face_detector is None:
return JSONResponse(status_code=500, content={"message": "AI ๋ชจ๋ธ(YuNet) ๋กœ๋“œ ์‹คํŒจ"})
face_detector.setInputSize((w, h))
_, faces = face_detector.detect(img)
if faces is not None:
for face in faces:
box_x, box_y, box_w, box_h = map(int, face[:4])
x, y = max(0, box_x), max(0, box_y)
bw, bh = min(box_w, w - x), min(box_h, h - y)
tx, ty, tw, th = x, y, bw, bh
if tw > 0 and th > 0:
roi = img[ty:ty+th, tx:tx+tw]
mask = np.zeros((th, tw), dtype=np.uint8)
cv2.ellipse(mask, (tw//2, th//2), (tw//2, th//2), 0, 0, 360, 255, -1)
if blur_strength == "strong": k_ratio = 0.5
elif blur_strength == "soft": k_ratio = 0.1
else: k_ratio = 0.25
k = int(max(tw, th) * k_ratio) | 1
k = max(3, k)
blurred_roi = cv2.GaussianBlur(roi, (k, k), 0)
img[ty:ty+th, tx:tx+tw] = np.where(mask[..., None] == 255, blurred_roi, roi)
mime_type = "image/png"
final_ext = "png"
encoded_bytes = None
if output_format == "png":
_, encoded_temp = cv2.imencode('.png', img)
encoded_bytes = encoded_temp.tobytes()
mime_type = "image/png"
final_ext = "png"
elif output_format == "webp":
_, encoded_temp = cv2.imencode('.webp', img, [int(cv2.IMWRITE_WEBP_QUALITY), 80])
encoded_bytes = encoded_temp.tobytes()
mime_type = "image/webp"
final_ext = "webp"
else:
if original_ext in ['heic', 'heif']:
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
pil_out = Image.fromarray(img_rgb)
buffer = io.BytesIO()
pil_out.save(buffer, format="HEIC", quality=100)
encoded_bytes = buffer.getvalue()
mime_type = "image/heic"
final_ext = "heic"
elif original_ext in ['jpg', 'jpeg']:
_, encoded_temp = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
encoded_bytes = encoded_temp.tobytes()
mime_type = "image/jpeg"
final_ext = "jpg"
else:
_, encoded_temp = cv2.imencode('.png', img)
encoded_bytes = encoded_temp.tobytes()
mime_type = "image/png"
final_ext = "png"
img_str = base64.b64encode(encoded_bytes).decode()
del file_bytes, pil_img, img
gc.collect()
return {
"status": "success",
"image_b64": f"data:{mime_type};base64,{img_str}",
"file_ext": final_ext
}
except Exception as e:
print(f"Error: {e}")
return JSONResponse(status_code=500, content={"message": str(e)})