File size: 2,866 Bytes
3d36685
 
 
 
 
 
 
 
 
 
be5a0b9
 
 
f5473ed
be5a0b9
 
 
b058771
3d36685
 
 
be5a0b9
3d36685
be5a0b9
f5473ed
3d36685
 
 
 
 
 
 
 
 
 
be5a0b9
 
3d36685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be5a0b9
3d36685
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import os
import cv2
import numpy as np
import onnxruntime as ort
import gradio as gr
from huggingface_hub import hf_hub_download

REPO_ID = "A123123/AnimeAutoCensor" 
HF_TOKEN = os.getenv("HF_TOKEN")

target_size = 640  
session = None
input_name = None

try:
    print("Downloading model files...")
    model_path = hf_hub_download(repo_id=REPO_ID, filename="model.onnx", token=HF_TOKEN)
    hf_hub_download(repo_id=REPO_ID, filename="model.onnx.data", token=HF_TOKEN)
    
    session = ort.InferenceSession(model_path, providers=['CPUExecutionProvider'])
    input_name = session.get_inputs()[0].name
    print("Model loaded successfully!")
except Exception as e:
    print(f"CRITICAL ERROR: {e}")

def apply_mosaic_mask(image_rgb, mask, mosaic_level=16):
    h, w = image_rgb.shape[:2]
    mask = (mask > 0).astype(np.uint8)
    small = cv2.resize(image_rgb, (max(1, w // mosaic_level), max(1, h // mosaic_level)), interpolation=cv2.INTER_LINEAR)
    mosaic_image = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST)
    output_image = image_rgb.copy()
    output_image[mask == 1] = mosaic_image[mask == 1]
    return output_image

def process_image(input_img):
    if input_img is None or session is None:
        return input_img
    
    h_orig, w_orig = input_img.shape[:2]
    
    scale = target_size / max(h_orig, w_orig)
    new_h, new_w = int(h_orig * scale), int(w_orig * scale)
    img_resized = cv2.resize(input_img, (new_w, new_h))
    
    canvas = np.zeros((target_size, target_size, 3), dtype=np.uint8)
    pad_y = (target_size - new_h) // 2
    pad_x = (target_size - new_w) // 2
    canvas[pad_y:pad_y+new_h, pad_x:pad_x+new_w] = img_resized

    input_tensor = canvas.astype(np.float32) / 255.0
    input_tensor = (input_tensor - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]
    input_tensor = input_tensor.transpose(2, 0, 1)[np.newaxis, ...].astype(np.float32)

    outputs = session.run(None, {input_name: input_tensor})
    pred = outputs[0][0][0]

    mask_valid = pred[pad_y:pad_y+new_h, pad_x:pad_x+new_w]
    mask_final = cv2.resize(mask_valid, (w_orig, h_orig))
    binary_mask = (mask_final > 0.5).astype(np.uint8)
    
    result_rgb = apply_mosaic_mask(input_img, binary_mask)
    return result_rgb

with gr.Blocks(title="AI Anime Auto-Censor") as demo:
    gr.Markdown("# 🎨 AI Anime Auto-Censor (Trial Version)")
    gr.Markdown("This tool uses AI to detect and censor content. The model is protected.")
    
    with gr.Row():
        with gr.Column():
            input_i = gr.Image(type="numpy", label="Upload Image")
            run_btn = gr.Button("Start Processing", variant="primary")
        with gr.Column():
            output_i = gr.Image(type="numpy", label="Censored Result")
            
    run_btn.click(fn=process_image, inputs=input_i, outputs=output_i)

if __name__ == "__main__":
    demo.launch()