# FDAM AI Pipeline Environment Configuration # Set to true for local development with mock models (RTX 4090) # Set to false for production with real models (HuggingFace 4xL4) MOCK_MODELS=true # Server configuration (0.0.0.0 required for WSL) SERVER_HOST=0.0.0.0 SERVER_PORT=7860 # Optional: Override model paths (FP8 + 2B architecture) # VISION_MODEL=Qwen/Qwen3-VL-30B-A3B-Thinking-FP8 # EMBEDDING_MODEL=Qwen/Qwen3-VL-Embedding-2B # RERANKER_MODEL=Qwen/Qwen3-VL-Reranker-2B