File size: 4,465 Bytes
b8761f5
 
f33f560
b8761f5
 
 
 
 
 
 
 
 
 
 
ee6a3b6
 
 
 
 
 
 
 
f33f560
 
 
 
 
ee6a3b6
f33f560
 
 
 
ee6a3b6
f33f560
ee6a3b6
f33f560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee6a3b6
 
 
 
 
 
 
 
 
 
b8761f5
ee6a3b6
 
 
 
f33f560
ee6a3b6
 
 
f33f560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee6a3b6
 
 
 
f33f560
 
 
 
 
b8761f5
f33f560
ee6a3b6
 
b8761f5
 
 
 
 
 
ee6a3b6
b8761f5
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import gradio as gr
from transformers import pipeline
import re

content_model = pipeline("image-classification", model="facebook/convnext-base-224")
nsfw_model = pipeline("image-classification", model="Falconsai/nsfw_image_detection")

def analyze_image(image):
    content_preds = content_model(image)
    top_content = max(content_preds, key=lambda x: x["score"])
    
    nsfw_preds = nsfw_model(image)
    top_nsfw = max(nsfw_preds, key=lambda x: x["score"])
    
    all_labels = []
    for pred in content_preds:
        all_labels.append(pred["label"].lower())
    
    for pred in nsfw_preds:
        all_labels.append(pred["label"].lower())
    
    combined_labels = " ".join(all_labels)
    
    human_keywords = [
        "human", "person", "people", "man", "woman", "child", "baby", "boy", "girl",
        "face", "portrait", "selfie", "crowd", "family", "couple", "teenager"
    ]
    is_human = any(keyword in combined_labels for keyword in human_keywords)
    
    dog_keywords = [
        "dog", "puppy", "retriever", "labrador", "golden", "beagle", "bulldog",
        "poodle", "german shepherd", "chihuahua", "terrier", "hound", "mastiff",
        "canine", "pet", "animal", "malamute", "malemute", "alaskan"
    ]
    is_dog = any(keyword in combined_labels for keyword in dog_keywords)
    
    violence_keywords = [
        "blood", "wound", "injury", "hurt", "pain", "fight", "violence", "weapon",
        "knife", "gun", "attack", "aggression", "conflict", "battle", "war"
    ]
    
    suffering_keywords = [
        "sad", "crying", "tears", "depressed", "miserable", "suffering", "pain",
        "distress", "anguish", "grief", "mourning", "funeral", "death", "dead",
        "dying", "illness", "sick", "injured", "abandoned", "neglected"
    ]
    
    abuse_keywords = [
        "abuse", "mistreatment", "cruelty", "torture", "beaten", "chained",
        "caged", "starving", "malnourished", "neglected", "abandoned"
    ]
    
    death_keywords = [
        "death", "dead", "dying", "corpse", "carcass", "deceased", "lifeless",
        "motionless", "still", "rigid", "pale", "cold"
    ]
    
    suspicious_keywords = [
        "lying", "laying", "ground", "floor", "side", "horizontal", "flat",
        "unconscious", "sleeping", "resting", "still", "motionless", "quiet"
    ]
    
    adult_content = top_nsfw["label"].lower() == "nsfw"
    violence = any(keyword in combined_labels for keyword in violence_keywords)
    suffering = any(keyword in combined_labels for keyword in suffering_keywords)
    abuse = any(keyword in combined_labels for keyword in abuse_keywords)
    death = any(keyword in combined_labels for keyword in death_keywords)
    
    suspicious_animal = is_dog and any(keyword in combined_labels for keyword in suspicious_keywords)
    
    sensitive = adult_content or violence or suffering or abuse or death or suspicious_animal
    
    if is_human:
        content_type = "human"
    elif is_dog:
        content_type = "dog"
    else:
        content_type = "unknown"
    
    description_parts = []
    
    if content_type == "human":
        description_parts.append("Imagem contendo pessoa(s)")
    elif content_type == "dog":
        description_parts.append("Imagem contendo cão/cachorro")
    else:
        description_parts.append(f"Imagem contendo {label}")
    
    if adult_content:
        description_parts.append("com conteúdo adulto")
    if violence:
        description_parts.append("com violência")
    if suffering:
        description_parts.append("mostrando sofrimento")
    if abuse:
        description_parts.append("com maus tratos")
    if death:
        description_parts.append("mostrando morte")
    if suspicious_animal:
        description_parts.append("com características suspeitas")
    
    if sensitive:
        description_parts.append("- CONTEÚDO SENSÍVEL")
    else:
        description_parts.append("- conteúdo seguro")
    
    description = " ".join(description_parts) + "."
    
    debug_info = f" [Debug: Labels detectadas: {combined_labels}]"

    return {
        "content": content_type,
        "adult_content": adult_content,
        "violence": violence,
        "sensitive_content": sensitive,
        "content_description": description + debug_info
    }

demo = gr.Interface(
    fn=analyze_image,
    inputs=gr.Image(type="pil"),
    outputs="json",
    title="Dog / Human Safety Detector"
)

if __name__ == "__main__":
    demo.launch()