antai-detector / .env.example
RaidenIppen's picture
Add two-stage router + specialist architecture with CLIP routing
5d6b029 verified
# ─── Antai Space β€” Environment Variables ─────────────────────────────────────
# For local development: copy to .env and adjust as needed.
# In production (HuggingFace Spaces): set these in Space Settings β†’ Variables.
# Router model: zero-shot image classification to determine content bucket.
# Must be a model that supports the zero-shot-image-classification task.
ROUTER_MODEL_ID=openai/clip-vit-base-patch32
# Specialist models β€” one per content bucket.
# All default to the same general-purpose AI detector; override individual
# buckets with a more accurate specialist as you benchmark alternatives.
SPECIALIST_GENERAL_MODEL_ID=Ateeqq/ai-vs-human-image-detector
SPECIALIST_FACE_MODEL_ID=Ateeqq/ai-vs-human-image-detector
SPECIALIST_DOCUMENT_MODEL_ID=Ateeqq/ai-vs-human-image-detector
SPECIALIST_ART_MODEL_ID=Ateeqq/ai-vs-human-image-detector
SPECIALIST_NATURE_MODEL_ID=Ateeqq/ai-vs-human-image-detector
# Confidence threshold: scores >= AI_THRESHOLD β†’ isAI: true
AI_THRESHOLD=0.5
# Debug mode: when true, response includes rawRouter and rawSpecialist arrays.
# Keep false in production to avoid large payloads.
DEBUG=false