csjhonathan commited on
Commit
b8761f5
·
1 Parent(s): bda51c5

initial commit

Browse files
Files changed (2) hide show
  1. app.py +41 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ content_model = pipeline("image-classification", model="facebook/convnext-base-224")
5
+ nsfw_model = pipeline("image-classification", model="Falconsai/nsfw_image_detection")
6
+
7
+ def analyze_image(image):
8
+ content_preds = content_model(image)
9
+ top_content = max(content_preds, key=lambda x: x["score"])
10
+
11
+ nsfw_preds = nsfw_model(image)
12
+ top_nsfw = max(nsfw_preds, key=lambda x: x["score"])
13
+
14
+ label = top_content["label"].lower()
15
+ is_human = "human" in label or "person" in label or "people" in label
16
+ is_dog = "dog" in label or "retriever" in label or "puppy" in label
17
+
18
+ adult_content = top_nsfw["label"].lower() == "nsfw"
19
+ violence = "blood" in label or "wound" in label
20
+ sensitive = adult_content or violence
21
+
22
+ content_type = "human" if is_human else "dog" if is_dog else "unknown"
23
+ description = f"Imagem contendo {label}. Conteúdo {'seguro' if not sensitive else 'sensível'}."
24
+
25
+ return {
26
+ "content": content_type,
27
+ "adult_content": adult_content,
28
+ "violence": violence,
29
+ "sensitive_content": sensitive,
30
+ "content_description": description
31
+ }
32
+
33
+ demo = gr.Interface(
34
+ fn=analyze_image,
35
+ inputs=gr.Image(type="pil"),
36
+ outputs="json",
37
+ title="Dog / Human Safety Detector"
38
+ )
39
+
40
+ if __name__ == "__main__":
41
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio