File size: 2,760 Bytes
8c30b41
3289e9d
7ad75f4
 
19e6f0c
f924686
7ad75f4
19e6f0c
7ad75f4
f924686
19e6f0c
7ad75f4
 
 
 
 
 
 
 
 
 
 
 
19e6f0c
7ad75f4
 
 
 
 
 
 
 
 
 
 
 
 
 
f924686
7ad75f4
 
f924686
7ad75f4
f924686
7ad75f4
 
f924686
7ad75f4
 
 
 
 
 
 
f924686
7ad75f4
 
f924686
7ad75f4
 
 
 
f924686
7ad75f4
 
f924686
 
 
7ad75f4
f924686
7ad75f4
 
 
 
 
f924686
 
 
 
19e6f0c
 
 
 
 
7ad75f4
 
19e6f0c
923a637
19e6f0c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
from transformers import pipeline
from PIL import Image, ExifTags, ImageStat, ImageFilter
import numpy as np

# Model utama untuk deteksi AI
detector = pipeline("image-classification", model="umm-maybe/AI-image-detector")

# Model tambahan general classifier (backup)
general = pipeline("image-classification", model="google/vit-base-patch16-224")

def analyze_noise(img):
    gray = img.convert("L")
    arr = np.array(gray)
    return np.std(arr)  # Standar deviasi โ†’ noise

def analyze_blur(img):
    gray = img.convert("L")
    arr = np.array(gray)
    lap = cv2.Laplacian(arr, cv2.CV_64F).var()
    return lap  # Variansi Laplacian โ†’ blur

def metadata_score(img):
    try:
        exif = img._getexif()
        if exif is None:
            return 1  # Tidak ada metadata โ†’ kemungkinan AI
        for tag, value in exif.items():
            decoded = ExifTags.TAGS.get(tag, tag)
            if decoded == "Make" or decoded == "Model":
                return 0  # Ada kamera โ†’ kemungkinan asli
        return 1
    except:
        return 1

def detect_image(img: Image.Image):
    try:
        # Prediksi AI-detector
        result1 = detector(img)
        label1 = result1[0]['label']
        conf1 = result1[0]['score']  # 0-1

        # Prediksi general model
        result2 = general(img)
        label2 = result2[0]['label']
        conf2 = result2[0]['score']

        # Analisis blur & noise
        noise = analyze_noise(img)
        try:
            import cv2
            blur = analyze_blur(img)
        except:
            blur = 0

        # Metadata
        meta = metadata_score(img)

        # Weighted skor akhir (lebih sensitif terhadap AI photorealistic)
        ai_score = conf1 * 0.6 + meta * 0.2 + (1 - min(noise/100,1)) * 0.1 + (1 - min(blur/1000,1)) * 0.1
        ai_score = min(max(ai_score, 0), 1)  # Clamp 0-1
        human_score = 1 - ai_score

        ai_percent = round(ai_score * 100, 2)
        human_percent = round(human_score * 100, 2)

        output = f"""
### Hasil Deteksi:
๐Ÿ–ผ๏ธ Gambar ini {ai_percent}% AI / {human_percent}% Asli

**Model AI-detector:** {label1} ({round(conf1*100,2)}%)  
**Model General (ViT):** {label2} ({round(conf2*100,2)}%)  
**Blur Score:** {round(blur,2)}  
**Noise Score:** {round(noise,2)}  
**Metadata Kamera:** {"Ada" if meta==0 else "Tidak Ada"}
"""
        return output
    except Exception as e:
        return f"Terjadi error: {str(e)}"

iface = gr.Interface(
    fn=detect_image,
    inputs=gr.Image(type="pil"),
    outputs="markdown",
    title="Hybrid AI vs Foto Asli Detector",
    description="Upload foto untuk mendeteksi persentase AI vs foto asli. Lebih sensitif terhadap AI photorealistic."
)

if __name__ == "__main__":
    iface.launch()