csjhonathan
ajusta moderador para aceitar base64
045bec2
import gradio as gr
from transformers import pipeline
import re
import torch
import timm
from PIL import Image
import numpy as np
import base64
from io import BytesIO
try:
eva02_model = timm.create_model('hf_hub:SmilingWolf/wd-eva02-large-tagger-v3', pretrained=True)
eva02_model.eval()
import requests
tags_response = requests.get('https://huggingface.co/SmilingWolf/wd-eva02-large-tagger-v3/resolve/main/selected_tags.csv')
tags_lines = tags_response.text.strip().split('\n')
eva02_tags = [line.split(',')[1] for line in tags_lines[1:]]
print(f"Modelo EVA02 carregado com {len(eva02_tags)} tags")
except Exception as e:
print(f"Erro ao carregar EVA02: {e}")
eva02_model = None
eva02_tags = []
content_model = pipeline("image-classification", model="facebook/convnext-base-224")
nsfw_model = pipeline("image-classification", model="Falconsai/nsfw_image_detection")
def decode_base64_image(base64_string):
if ',' in base64_string:
base64_string = base64_string.split(',')[1]
try:
image_data = base64.b64decode(base64_string)
image = Image.open(BytesIO(image_data))
return image
except Exception as e:
print(f"Erro ao decodificar base64: {e}")
return None
def analyze_with_eva02(image):
if eva02_model is None:
return [], []
image_tensor = torch.from_numpy(np.array(image)).permute(2, 0, 1).float() / 255.0
image_tensor = torch.nn.functional.interpolate(
image_tensor.unsqueeze(0),
size=(448, 448),
mode='bilinear',
align_corners=False
)
with torch.no_grad():
features = eva02_model(image_tensor)
probs = torch.sigmoid(features[0])
detected_tags = []
tag_scores = []
for i, (tag, prob) in enumerate(zip(eva02_tags, probs)):
if prob > 0.5:
detected_tags.append(tag)
tag_scores.append(float(prob))
return detected_tags, tag_scores
def analyze_image(image_input, base64_input=None):
image = None
if base64_input and isinstance(base64_input, str) and base64_input.strip():
image = decode_base64_image(base64_input)
if image is None:
return {
"content": "unknown",
"adult_content": False,
"violence": False,
"sensitive_content": False,
"content_description": "Erro ao processar imagem base64."
}
elif image_input is not None:
image = image_input
else:
return {
"content": "unknown",
"adult_content": False,
"violence": False,
"sensitive_content": False,
"content_description": "Nenhuma imagem fornecida."
}
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
if eva02_model is not None:
eva02_tags_detected, eva02_scores = analyze_with_eva02(image)
combined_labels = " ".join(eva02_tags_detected).lower()
print(f"EVA02 detectou: {eva02_tags_detected}")
else:
content_preds = content_model(image)
top_content = max(content_preds, key=lambda x: x["score"])
nsfw_preds = nsfw_model(image)
top_nsfw = max(nsfw_preds, key=lambda x: x["score"])
all_labels = []
for pred in content_preds:
all_labels.append(pred["label"].lower())
for pred in nsfw_preds:
all_labels.append(pred["label"].lower())
combined_labels = " ".join(all_labels)
human_keywords = [
"1boy", "1girl", "2boys", "2girls", "multiple boys", "multiple girls",
"human", "person", "people", "man", "woman", "child", "baby", "boy", "girl",
"face", "portrait", "selfie", "crowd", "family", "couple", "teenager",
"male", "female", "adult", "teen", "kid", "toddler", "infant"
]
dog_keywords = [
"1dog", "2dogs", "multiple dogs", "dog", "puppy", "retriever", "labrador",
"golden", "beagle", "bulldog", "poodle", "german shepherd", "chihuahua",
"terrier", "hound", "mastiff", "canine", "pet", "animal", "malamute",
"malemute", "alaskan", "doggy", "doggie", "pup", "husky", "border collie",
"dachshund", "boxer", "rottweiler", "siberian husky", "australian shepherd"
]
human_count = sum(1 for keyword in human_keywords if keyword in combined_labels)
dog_count = sum(1 for keyword in dog_keywords if keyword in combined_labels)
if dog_count > 0 and human_count > 0:
if dog_count >= human_count:
is_dog = True
is_human = False
else:
is_dog = False
is_human = True
elif dog_count > 0:
is_dog = True
is_human = False
elif human_count > 0:
is_dog = False
is_human = True
else:
is_dog = False
is_human = False
violence_keywords = [
"blood", "wound", "injury", "hurt", "pain", "fight", "violence", "weapon",
"knife", "gun", "attack", "aggression", "conflict", "battle", "war",
"bloody", "injured", "wounded", "bleeding", "scar", "bruise", "cut"
]
suffering_keywords = [
"sad", "crying", "tears", "depressed", "miserable", "suffering", "pain",
"distress", "anguish", "grief", "mourning", "funeral", "death", "dead",
"dying", "illness", "sick", "injured", "abandoned", "neglected",
"crying", "tears", "sad", "depressed", "miserable", "grief", "mourning"
]
abuse_keywords = [
"abuse", "mistreatment", "cruelty", "torture", "beaten", "chained",
"caged", "starving", "malnourished", "neglected", "abandoned",
"chained", "caged", "starving", "malnourished", "abused", "mistreated"
]
death_keywords = [
"death", "dead", "dying", "corpse", "carcass", "deceased", "lifeless",
"motionless", "still", "rigid", "pale", "cold", "skull", "bones",
"grave", "tombstone", "funeral", "coffin", "burial"
]
suspicious_keywords = [
"unconscious", "motionless", "lifeless", "rigid", "cold", "pale",
"injured", "wounded", "bleeding", "hurt", "pain", "distress",
"abandoned", "neglected", "starving", "malnourished", "chained", "caged"
]
normal_dog_behavior = [
"sleeping", "resting", "lying", "sitting", "playing", "running", "walking",
"happy", "excited", "alert", "awake", "active", "energetic", "playful"
]
if eva02_model is not None:
adult_keywords = ["nsfw", "explicit", "nude", "naked", "sexual", "adult", "mature"]
adult_content = any(keyword in combined_labels for keyword in adult_keywords)
else:
adult_content = top_nsfw["label"].lower() == "nsfw"
violence = any(keyword in combined_labels for keyword in violence_keywords)
suffering = any(keyword in combined_labels for keyword in suffering_keywords)
abuse = any(keyword in combined_labels for keyword in abuse_keywords)
death = any(keyword in combined_labels for keyword in death_keywords)
has_suspicious_behavior = any(keyword in combined_labels for keyword in suspicious_keywords)
has_normal_behavior = any(keyword in combined_labels for keyword in normal_dog_behavior)
has_death_indicators = any(keyword in combined_labels for keyword in death_keywords)
if is_dog:
suspicious_animal = has_death_indicators or (has_suspicious_behavior and not has_normal_behavior)
dead_dog = has_death_indicators
else:
suspicious_animal = has_suspicious_behavior
dead_dog = False
sensitive = adult_content or violence or suffering or abuse or death or suspicious_animal or dead_dog
if is_human:
content_type = "human"
elif is_dog:
content_type = "dog"
else:
content_type = "unknown"
description_parts = []
if content_type == "human":
description_parts.append("Imagem contendo pessoa(s)")
elif content_type == "dog":
description_parts.append("Imagem contendo cão/cachorro")
else:
description_parts.append(f"Imagem contendo {label}")
if adult_content:
description_parts.append("com conteúdo adulto")
if violence:
description_parts.append("com violência")
if suffering:
description_parts.append("mostrando sofrimento")
if abuse:
description_parts.append("com maus tratos")
if death:
description_parts.append("mostrando morte")
if suspicious_animal:
description_parts.append("com características suspeitas")
if dead_dog:
description_parts.append("morto")
if sensitive:
description_parts.append("- CONTEÚDO SENSÍVEL")
else:
description_parts.append("- conteúdo seguro")
description = " ".join(description_parts) + "."
detected_human_tags = [tag for tag in human_keywords if tag in combined_labels]
detected_dog_tags = [tag for tag in dog_keywords if tag in combined_labels]
detected_suspicious = [tag for tag in suspicious_keywords if tag in combined_labels]
detected_normal = [tag for tag in normal_dog_behavior if tag in combined_labels]
detected_death = [tag for tag in death_keywords if tag in combined_labels]
debug_info = f" [Debug: Human({human_count}): {detected_human_tags}, Dog({dog_count}): {detected_dog_tags}, Suspicious: {detected_suspicious}, Normal: {detected_normal}, Death: {detected_death}]"
return {
"content": content_type,
"adult_content": adult_content,
"violence": violence,
"sensitive_content": sensitive,
"content_description": description + debug_info
}
demo = gr.Interface(
fn=analyze_image,
inputs=[
gr.Image(type="pil", label="Upload de Imagem ou Cole Base64"),
gr.Textbox(label="Ou Cole String Base64 Aqui", lines=3, placeholder="data:image/jpeg;base64,/9j/4AAQ...")
],
outputs="json",
title="Dog / Human Safety Detector",
description="Envie uma imagem ou cole uma string base64 para análise de moderação"
)
if __name__ == "__main__":
demo.launch()