Spaces:
Sleeping
Sleeping
| # Hugging Face Inference API Configuration | |
| # ============================================ | |
| # Mode: API (recommended) or Local | |
| # ============================================ | |
| # Use HF Inference API (true) or load model locally (false) | |
| HF_USE_API=true | |
| # HF API token (get it from https://huggingface.co/settings/tokens) | |
| # Required if HF_USE_API=true | |
| HF_API_TOKEN=hf_xxxxxxxxxxxxxxxxxxxxx | |
| # ============================================ | |
| # Model Configuration | |
| # ============================================ | |
| # Model to use (any Hugging Face model ID) | |
| HF_MODEL_NAME=distilbert-base-uncased-finetuned-sst-2-english | |
| # Task type (text-classification, text-generation, summarization, etc.) | |
| HF_TASK=text-classification | |
| # ============================================ | |
| # Server Configuration | |
| # ============================================ | |
| HF_HOST=0.0.0.0 | |
| HF_PORT=8000 | |
| # ============================================ | |
| # Local Mode Only (ignored if HF_USE_API=true) | |
| # ============================================ | |
| # Device (cpu, cuda, cuda:0, etc.) | |
| HF_DEVICE=cpu | |
| # Maximum batch size for inference | |
| HF_MAX_BATCH_SIZE=32 | |