csjhonathan
ajusta verificação de conteúdo
ee6a3b6
raw
history blame
4.47 kB
import gradio as gr
from transformers import pipeline
import re
content_model = pipeline("image-classification", model="facebook/convnext-base-224")
nsfw_model = pipeline("image-classification", model="Falconsai/nsfw_image_detection")
def analyze_image(image):
content_preds = content_model(image)
top_content = max(content_preds, key=lambda x: x["score"])
nsfw_preds = nsfw_model(image)
top_nsfw = max(nsfw_preds, key=lambda x: x["score"])
all_labels = []
for pred in content_preds:
all_labels.append(pred["label"].lower())
for pred in nsfw_preds:
all_labels.append(pred["label"].lower())
combined_labels = " ".join(all_labels)
human_keywords = [
"human", "person", "people", "man", "woman", "child", "baby", "boy", "girl",
"face", "portrait", "selfie", "crowd", "family", "couple", "teenager"
]
is_human = any(keyword in combined_labels for keyword in human_keywords)
dog_keywords = [
"dog", "puppy", "retriever", "labrador", "golden", "beagle", "bulldog",
"poodle", "german shepherd", "chihuahua", "terrier", "hound", "mastiff",
"canine", "pet", "animal", "malamute", "malemute", "alaskan"
]
is_dog = any(keyword in combined_labels for keyword in dog_keywords)
violence_keywords = [
"blood", "wound", "injury", "hurt", "pain", "fight", "violence", "weapon",
"knife", "gun", "attack", "aggression", "conflict", "battle", "war"
]
suffering_keywords = [
"sad", "crying", "tears", "depressed", "miserable", "suffering", "pain",
"distress", "anguish", "grief", "mourning", "funeral", "death", "dead",
"dying", "illness", "sick", "injured", "abandoned", "neglected"
]
abuse_keywords = [
"abuse", "mistreatment", "cruelty", "torture", "beaten", "chained",
"caged", "starving", "malnourished", "neglected", "abandoned"
]
death_keywords = [
"death", "dead", "dying", "corpse", "carcass", "deceased", "lifeless",
"motionless", "still", "rigid", "pale", "cold"
]
suspicious_keywords = [
"lying", "laying", "ground", "floor", "side", "horizontal", "flat",
"unconscious", "sleeping", "resting", "still", "motionless", "quiet"
]
adult_content = top_nsfw["label"].lower() == "nsfw"
violence = any(keyword in combined_labels for keyword in violence_keywords)
suffering = any(keyword in combined_labels for keyword in suffering_keywords)
abuse = any(keyword in combined_labels for keyword in abuse_keywords)
death = any(keyword in combined_labels for keyword in death_keywords)
suspicious_animal = is_dog and any(keyword in combined_labels for keyword in suspicious_keywords)
sensitive = adult_content or violence or suffering or abuse or death or suspicious_animal
if is_human:
content_type = "human"
elif is_dog:
content_type = "dog"
else:
content_type = "unknown"
description_parts = []
if content_type == "human":
description_parts.append("Imagem contendo pessoa(s)")
elif content_type == "dog":
description_parts.append("Imagem contendo cão/cachorro")
else:
description_parts.append(f"Imagem contendo {label}")
if adult_content:
description_parts.append("com conteúdo adulto")
if violence:
description_parts.append("com violência")
if suffering:
description_parts.append("mostrando sofrimento")
if abuse:
description_parts.append("com maus tratos")
if death:
description_parts.append("mostrando morte")
if suspicious_animal:
description_parts.append("com características suspeitas")
if sensitive:
description_parts.append("- CONTEÚDO SENSÍVEL")
else:
description_parts.append("- conteúdo seguro")
description = " ".join(description_parts) + "."
debug_info = f" [Debug: Labels detectadas: {combined_labels}]"
return {
"content": content_type,
"adult_content": adult_content,
"violence": violence,
"sensitive_content": sensitive,
"content_description": description + debug_info
}
demo = gr.Interface(
fn=analyze_image,
inputs=gr.Image(type="pil"),
outputs="json",
title="Dog / Human Safety Detector"
)
if __name__ == "__main__":
demo.launch()