csjhonathan commited on
Commit
ee6a3b6
·
1 Parent(s): f33f560

ajusta verificação de conteúdo

Browse files
Files changed (1) hide show
  1. app.py +35 -9
app.py CHANGED
@@ -12,20 +12,27 @@ def analyze_image(image):
12
  nsfw_preds = nsfw_model(image)
13
  top_nsfw = max(nsfw_preds, key=lambda x: x["score"])
14
 
15
- label = top_content["label"].lower()
 
 
 
 
 
 
 
16
 
17
  human_keywords = [
18
  "human", "person", "people", "man", "woman", "child", "baby", "boy", "girl",
19
  "face", "portrait", "selfie", "crowd", "family", "couple", "teenager"
20
  ]
21
- is_human = any(keyword in label for keyword in human_keywords)
22
 
23
  dog_keywords = [
24
  "dog", "puppy", "retriever", "labrador", "golden", "beagle", "bulldog",
25
  "poodle", "german shepherd", "chihuahua", "terrier", "hound", "mastiff",
26
- "canine", "pet", "animal"
27
  ]
28
- is_dog = any(keyword in label for keyword in dog_keywords)
29
 
30
  violence_keywords = [
31
  "blood", "wound", "injury", "hurt", "pain", "fight", "violence", "weapon",
@@ -43,12 +50,25 @@ def analyze_image(image):
43
  "caged", "starving", "malnourished", "neglected", "abandoned"
44
  ]
45
 
 
 
 
 
 
 
 
 
 
 
46
  adult_content = top_nsfw["label"].lower() == "nsfw"
47
- violence = any(keyword in label for keyword in violence_keywords)
48
- suffering = any(keyword in label for keyword in suffering_keywords)
49
- abuse = any(keyword in label for keyword in abuse_keywords)
 
50
 
51
- sensitive = adult_content or violence or suffering or abuse
 
 
52
 
53
  if is_human:
54
  content_type = "human"
@@ -74,6 +94,10 @@ def analyze_image(image):
74
  description_parts.append("mostrando sofrimento")
75
  if abuse:
76
  description_parts.append("com maus tratos")
 
 
 
 
77
 
78
  if sensitive:
79
  description_parts.append("- CONTEÚDO SENSÍVEL")
@@ -81,13 +105,15 @@ def analyze_image(image):
81
  description_parts.append("- conteúdo seguro")
82
 
83
  description = " ".join(description_parts) + "."
 
 
84
 
85
  return {
86
  "content": content_type,
87
  "adult_content": adult_content,
88
  "violence": violence,
89
  "sensitive_content": sensitive,
90
- "content_description": description
91
  }
92
 
93
  demo = gr.Interface(
 
12
  nsfw_preds = nsfw_model(image)
13
  top_nsfw = max(nsfw_preds, key=lambda x: x["score"])
14
 
15
+ all_labels = []
16
+ for pred in content_preds:
17
+ all_labels.append(pred["label"].lower())
18
+
19
+ for pred in nsfw_preds:
20
+ all_labels.append(pred["label"].lower())
21
+
22
+ combined_labels = " ".join(all_labels)
23
 
24
  human_keywords = [
25
  "human", "person", "people", "man", "woman", "child", "baby", "boy", "girl",
26
  "face", "portrait", "selfie", "crowd", "family", "couple", "teenager"
27
  ]
28
+ is_human = any(keyword in combined_labels for keyword in human_keywords)
29
 
30
  dog_keywords = [
31
  "dog", "puppy", "retriever", "labrador", "golden", "beagle", "bulldog",
32
  "poodle", "german shepherd", "chihuahua", "terrier", "hound", "mastiff",
33
+ "canine", "pet", "animal", "malamute", "malemute", "alaskan"
34
  ]
35
+ is_dog = any(keyword in combined_labels for keyword in dog_keywords)
36
 
37
  violence_keywords = [
38
  "blood", "wound", "injury", "hurt", "pain", "fight", "violence", "weapon",
 
50
  "caged", "starving", "malnourished", "neglected", "abandoned"
51
  ]
52
 
53
+ death_keywords = [
54
+ "death", "dead", "dying", "corpse", "carcass", "deceased", "lifeless",
55
+ "motionless", "still", "rigid", "pale", "cold"
56
+ ]
57
+
58
+ suspicious_keywords = [
59
+ "lying", "laying", "ground", "floor", "side", "horizontal", "flat",
60
+ "unconscious", "sleeping", "resting", "still", "motionless", "quiet"
61
+ ]
62
+
63
  adult_content = top_nsfw["label"].lower() == "nsfw"
64
+ violence = any(keyword in combined_labels for keyword in violence_keywords)
65
+ suffering = any(keyword in combined_labels for keyword in suffering_keywords)
66
+ abuse = any(keyword in combined_labels for keyword in abuse_keywords)
67
+ death = any(keyword in combined_labels for keyword in death_keywords)
68
 
69
+ suspicious_animal = is_dog and any(keyword in combined_labels for keyword in suspicious_keywords)
70
+
71
+ sensitive = adult_content or violence or suffering or abuse or death or suspicious_animal
72
 
73
  if is_human:
74
  content_type = "human"
 
94
  description_parts.append("mostrando sofrimento")
95
  if abuse:
96
  description_parts.append("com maus tratos")
97
+ if death:
98
+ description_parts.append("mostrando morte")
99
+ if suspicious_animal:
100
+ description_parts.append("com características suspeitas")
101
 
102
  if sensitive:
103
  description_parts.append("- CONTEÚDO SENSÍVEL")
 
105
  description_parts.append("- conteúdo seguro")
106
 
107
  description = " ".join(description_parts) + "."
108
+
109
+ debug_info = f" [Debug: Labels detectadas: {combined_labels}]"
110
 
111
  return {
112
  "content": content_type,
113
  "adult_content": adult_content,
114
  "violence": violence,
115
  "sensitive_content": sensitive,
116
+ "content_description": description + debug_info
117
  }
118
 
119
  demo = gr.Interface(