scripts / hf_job_nsfw.py
vlordier's picture
Upload hf_job_nsfw.py with huggingface_hub
db65255 verified
#!/usr/bin/env python3
"""
NSFW Classification Job - Process human crops from SAM3D bboxes with EraX YOLO
Requires: SAM 3D Body outputs for human bboxes
Outputs: Per-human NSFW detections with bboxes and confidence scores
"""
import argparse
import os
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')
import logging
import sys
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout,
force=True
)
logger = logging.getLogger(__name__)
import numpy as np
import torch
from datasets import load_dataset, Dataset as HFDataset, Features, Value
from PIL import Image
import json
from huggingface_hub import snapshot_download
from ultralytics import YOLO
def make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.1):
"""Convert bbox to square with padding"""
x1, y1, x2, y2 = bbox
w = x2 - x1
h = y2 - y1
# Make square
size = max(w, h)
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
# Add padding
size = size * (1 + padding)
# Get square bbox
x1_sq = max(0, cx - size / 2)
y1_sq = max(0, cy - size / 2)
x2_sq = min(img_width, cx + size / 2)
y2_sq = min(img_height, cy + size / 2)
return [x1_sq, y1_sq, x2_sq, y2_sq]
def process_batch(batch, sam3d_dataset):
"""Process batch of images - join with SAM3D results to get human bboxes"""
images = batch['image']
image_paths = batch.get('image_path', [f'img_{i:06d}' for i in range(len(images))])
results_list = []
# Collect all crops for batch inference
crops = []
crop_info = [] # (image_idx, human_idx, original_bbox)
for idx, image_pil in enumerate(images):
image_id = Path(image_paths[idx]).stem if image_paths[idx] else f'img_{idx:06d}'
# Find corresponding SAM3D data
sam3d_row = sam3d_dataset.filter(lambda x: x['image_id'] == image_id).take(1)
sam3d_row = list(sam3d_row)
if not sam3d_row or not sam3d_row[0]['sam3d_data']:
results_list.append({
'image_id': image_id,
'human_detections': None
})
continue
humans_data = json.loads(sam3d_row[0]['sam3d_data'])
image_rgb = np.array(image_pil.convert('RGB'))
img_width, img_height = image_pil.size
# Collect crops for each human
for human_idx, human in enumerate(humans_data):
bbox = human.get('bbox')
if bbox is None:
continue
# Make square bbox with padding
square_bbox = make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.15)
x1, y1, x2, y2 = map(int, square_bbox)
# Crop and resize to standard size for YOLO
crop = image_rgb[y1:y2, x1:x2]
if crop.size > 0:
crops.append(crop)
crop_info.append((idx, human_idx, square_bbox, bbox))
# Batch NSFW inference on all crops
human_results = {} # {image_idx: {human_idx: detections}}
if crops:
try:
yolo_results = model(crops, conf=0.2, iou=0.3, verbose=False)
for crop_idx, result in enumerate(yolo_results):
img_idx, human_idx, square_bbox, orig_bbox = crop_info[crop_idx]
x1_sq, y1_sq, x2_sq, y2_sq = square_bbox
detections = []
if result.boxes:
for box in result.boxes:
class_id = int(box.cls.item())
confidence = box.conf.item()
class_names = ['anus', 'make_love', 'nipple', 'penis', 'vagina']
class_name = class_names[class_id] if class_id < len(class_names) else f'class_{class_id}'
# Convert crop coordinates to image coordinates
dx1, dy1, dx2, dy2 = box.xyxy[0].tolist()
abs_x1 = x1_sq + dx1
abs_y1 = y1_sq + dy1
abs_x2 = x1_sq + dx2
abs_y2 = y1_sq + dy2
detections.append({
'class': class_name,
'confidence': confidence,
'bbox': [abs_x1, abs_y1, abs_x2, abs_y2]
})
if not detections:
detections = [{'class': 'safe', 'confidence': 1.0, 'bbox': orig_bbox}]
if img_idx not in human_results:
human_results[img_idx] = {}
human_results[img_idx][human_idx] = detections
except Exception as e:
logger.error(f"NSFW batch failed: {e}")
# Organize results by image
for idx, image_path in enumerate(image_paths):
image_id = Path(image_path).stem if image_path else f'img_{idx:06d}'
if idx in human_results:
# Convert dict to list ordered by human_idx
max_human_idx = max(human_results[idx].keys())
detections_list = []
for h_idx in range(max_human_idx + 1):
detections_list.append(human_results[idx].get(h_idx, [{'class': 'safe', 'confidence': 1.0}]))
results_list.append({
'image_id': image_id,
'human_detections': json.dumps(detections_list)
})
else:
results_list.append({
'image_id': image_id,
'human_detections': None
})
return {
'image_id': [r['image_id'] for r in results_list],
'nsfw_detections': [r['human_detections'] for r in results_list]
}
def main():
global model
logger.info("="*60)
logger.info("NSFW Classification with EraX YOLO (Per-Human)")
logger.info("="*60)
ap = argparse.ArgumentParser()
ap.add_argument('--input-dataset', type=str, required=True, help='Original images')
ap.add_argument('--sam3d-dataset', type=str, required=True, help='SAM3D outputs with bboxes')
ap.add_argument('--output-dataset', type=str, required=True)
ap.add_argument('--split', type=str, default='train')
ap.add_argument('--batch-size', type=int, default=4)
ap.add_argument('--shard-index', type=int, default=0)
ap.add_argument('--num-shards', type=int, default=1)
args = ap.parse_args()
logger.info(f"Arguments: {vars(args)}")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(f"Using device: {device}")
# Download and load NSFW model
logger.info("Downloading EraX-NSFW model...")
snapshot_download(repo_id="erax-ai/EraX-NSFW-V1.0", local_dir="./", force_download=False)
logger.info("Loading YOLO model...")
model = YOLO('erax_nsfw_yolo11m.pt')
logger.info("✓ Model loaded")
# Load SAM3D results
logger.info(f"Loading SAM3D results from {args.sam3d_dataset}...")
sam3d_ds = load_dataset(args.sam3d_dataset, split=args.split, streaming=True)
# Load images dataset
logger.info(f"Loading images from {args.input_dataset}...")
ds = load_dataset(args.input_dataset, split=args.split, streaming=True)
if args.num_shards > 1:
ds = ds.shard(num_shards=args.num_shards, index=args.shard_index)
sam3d_ds = sam3d_ds.shard(num_shards=args.num_shards, index=args.shard_index)
logger.info(f"Using shard {args.shard_index+1}/{args.num_shards}")
# Process with batching
logger.info(f"Processing with batch_size={args.batch_size}")
from functools import partial
process_fn = partial(process_batch, sam3d_dataset=sam3d_ds)
processed_ds = ds.map(
process_fn,
batched=True,
batch_size=args.batch_size,
remove_columns=ds.column_names
)
# Collect results
results = []
for batch_idx, item in enumerate(processed_ds):
results.append(item)
if (batch_idx + 1) % 100 == 0:
logger.info(f"Processed {batch_idx + 1} images")
logger.info(f"✓ Processed {len(results)} images")
# Create output dataset
features = Features({
'image_id': Value('string'),
'nsfw_detections': Value('string')
})
output_ds = HFDataset.from_dict({
'image_id': [r['image_id'] for r in results],
'nsfw_detections': [r['nsfw_detections'] for r in results]
}, features=features)
# Upload
logger.info(f"Uploading to {args.output_dataset}...")
output_ds.push_to_hub(
args.output_dataset,
split=args.split,
token=os.environ.get('HF_TOKEN'),
private=True
)
logger.info("✓ Upload complete")
if __name__ == '__main__':
main()