MetaQu's picture
Update app.py
ae9db09 verified
raw
history blame
2.14 kB
import gradio as gr
from transformers import pipeline
from PIL import Image, ExifTags
import numpy as np
import cv2
import io
# Load HuggingFace pipeline
detector = pipeline("image-classification", model="falconsai/nsfw_image_detection")
# ganti model ke yang support binary classification real vs ai kalau mau training
def analyze_image(image):
# Konversi ke format OpenCV
img_bytes = io.BytesIO()
image.save(img_bytes, format="PNG")
img_bytes = np.frombuffer(img_bytes.getvalue(), dtype=np.uint8)
cv_img = cv2.imdecode(img_bytes, cv2.IMREAD_COLOR)
# Blur score (laplacian variance)
gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
blur_score = cv2.Laplacian(gray, cv2.CV_64F).var()
# Noise score (std dev)
noise_score = np.std(gray)
# Metadata Kamera
meta = image.getexif()
meta_info = "Ada" if meta else "Tidak Ada"
# Prediksi AI vs Asli (pakai HF pipeline dummy dulu)
preds = detector(image)
label = preds[0]["label"].lower()
score = preds[0]["score"]
# Normalisasi ke persentase AI vs Asli
if "artificial" in label or "fake" in label or "ai" in label:
ai_prob = score * 100
else:
ai_prob = (1 - score) * 100
real_prob = 100 - ai_prob
# Koreksi dengan metadata โ†’ jika metadata kamera ada, naikkan skor real
if meta_info == "Ada":
real_prob += 15
ai_prob -= 15
# Koreksi blur/noise โ†’ foto asli biasanya lebih natural
if blur_score > 500 and noise_score > 20:
real_prob += 5
# Clamp ke 0-100
ai_prob = max(0, min(100, ai_prob))
real_prob = max(0, min(100, real_prob))
# Output
hasil = f"""๐Ÿ–ผ๏ธ Hasil Deteksi:
{ai_prob:.2f}% AI / {real_prob:.2f}% Asli
Blur Score: {blur_score:.2f}
Noise Score: {noise_score:.2f}
Metadata Kamera: {meta_info}
"""
return hasil
demo = gr.Interface(
fn=analyze_image,
inputs=gr.Image(type="pil"),
outputs="text",
title="AI vs Real Image Detector",
description="Deteksi apakah gambar AI-generated atau asli. Gratis, tanpa API berbayar."
)
if __name__ == "__main__":
demo.launch()