avfranco's picture
HF Space deploy snapshot (minimal allow-list)
557ee65
import os
import uuid
# Application Constants
APP_NAME = "agents"
USER_ID = "default_user"
DEFAULT_RUNNER_ID = "00000000-0000-0000-0000-000000000000"
# Environment Flags
HF_SPACE = os.getenv("HF_SPACE", "false").lower() == "true"
def is_hf_space() -> bool:
"""Helper to check if running in HuggingFace Spaces."""
return HF_SPACE
# Ingestion Limits
_DEFAULT_MAX_FILES = "12" if is_hf_space() else "100"
_DEFAULT_MAX_FILE_SIZE = "10" if is_hf_space() else "50"
_DEFAULT_MAX_TOTAL_SIZE = "20" if is_hf_space() else "250"
MAX_UPLOAD_FILES = int(os.getenv("MAX_UPLOAD_FILES", _DEFAULT_MAX_FILES))
MAX_UPLOAD_FILE_SIZE_MB = int(os.getenv("MAX_UPLOAD_FILE_SIZE_MB", _DEFAULT_MAX_FILE_SIZE))
MAX_UPLOAD_TOTAL_SIZE_MB = int(os.getenv("MAX_UPLOAD_TOTAL_SIZE_MB", _DEFAULT_MAX_TOTAL_SIZE))
MAX_GZIP_DECOMPRESSED_SIZE_MB = int(os.getenv("MAX_GZIP_DECOMPRESSED_SIZE_MB", "200"))
ALLOWED_UPLOAD_EXTENSIONS = os.getenv(
"ALLOWED_UPLOAD_EXTENSIONS", ".gpx,.tcx.gz,.tcx,.fit,.fit.gz"
).split(",")
# Storage Configuration
_DEFAULT_STORAGE_ENABLED = "false" if is_hf_space() else "true"
STORAGE_ENABLED = os.getenv("STORAGE_ENABLED", _DEFAULT_STORAGE_ENABLED).lower() == "true"
STORAGE_DB_PATH = os.getenv("STORAGE_DB_PATH", ".data/runner_agentic_intelligence.db")
def is_storage_enabled() -> bool:
"""Helper to check if persistence is enabled."""
return STORAGE_ENABLED
def is_persistence_enabled() -> bool:
"""
Returns True only when storage is enabled AND not running in HF public preview.
"""
return is_storage_enabled() and not is_hf_space()
def generate_session_id() -> str:
"""Generate a unique session ID."""
return str(uuid.uuid4())
# LLM Configuration
LLM_PROVIDER = os.getenv("LLM_PROVIDER", "gemini") # "gemini", "mock", "litellm"
LLM_MODEL_ID = os.getenv("LLM_MODEL", "")
LLM_API_BASE = os.getenv("LLM_API_BASE", None)
LLM_TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "1.0"))
LLM_MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "8192"))
LLM_DROP_PARAMS = True
LLM_SCHEMA_GUARD_STRICT = os.getenv("LLM_SCHEMA_GUARD_STRICT", "false").lower() == "true"
AI_INSIGHTS_ENABLED = os.getenv("AI_INSIGHTS_ENABLED", "true").lower() == "true"
# LiteLLM / OpenAI compatible settings
# All specific providers are now handled by the LiteLLM client layer.
if LLM_PROVIDER.lower() == "litellm":
# Ensure we use LLM_MODEL env var if provided first
ENV_MODEL = os.getenv("LLM_MODEL")
if ENV_MODEL:
LLM_MODEL_ID = ENV_MODEL
if not LLM_MODEL_ID:
# Default model if not provided
LLM_MODEL_ID = "openai/gpt-4o-mini"
# Simple heuristic to distinguish between OpenAI proper and others (like HF Router)
is_real_openai = LLM_MODEL_ID.startswith("openai/") and "gpt-oss" not in LLM_MODEL_ID
if is_real_openai:
if "gpt-5" in LLM_MODEL_ID:
LLM_TEMPERATURE = 1.0
else:
# Default to HuggingFace Router for other LiteLLM models unless api_base is set
if not LLM_API_BASE:
LLM_API_BASE = "https://router.huggingface.co/v1"
else:
# Default fallback for Gemini or other non-explicit LiteLLM provider
LLM_MODEL_ID = os.getenv("LLM_MODEL") or os.getenv("GEMINI_MODEL", "gemini-3-flash-preview")
# Deprecated: do not use global LLM_API_KEY for multi-provider setup.
# LLM clients now resolve their own credentials from environment variables.
LLM_API_KEY = os.getenv("LLM_API_KEY")