Spaces:
Sleeping
Sleeping
| # QAgents-Workflows Environment Configuration | |
| # Copy this file to .env and fill in your actual values | |
| # For Hugging Face Spaces: Add these as Repository Secrets or Space Variables | |
| # ============================================================================= | |
| # LLM Configuration (Model-Agnostic) | |
| # ============================================================================= | |
| # LLM Provider: gemini (default), openai, anthropic, groq, ollama, etc. | |
| # Leave empty to use default: "gemini" | |
| LLM_PROVIDER=gemini | |
| # LLM Model identifier | |
| # For Gemini: gemini-2.5-flash-lite, gemini-2.5-flash, gemini-2.5-pro, gemini-2.0-flash | |
| # For OpenAI: gpt-4o, gpt-4o-mini, gpt-4-turbo | |
| # For Anthropic: claude-3-opus, claude-3-sonnet | |
| # For Groq: llama-3-70b-versatile, mixtral-8x7b-32768 | |
| # For Ollama: mistral, neural-chat, starling-lm (local models) | |
| # Leave empty to use default: "gemini-2.5-flash-lite" | |
| LLM_MODEL=gemini-2.5-flash-lite | |
| # ============================================================================= | |
| # API Keys (Provider-Specific) | |
| # ============================================================================= | |
| # Google Gemini API Key (required for LLM_PROVIDER=gemini) | |
| # Get from: https://aistudio.google.com/app/apikey | |
| GOOGLE_API_KEY=your-gemini-api-key-here | |
| # Alternative Gemini API Key (fallback if GOOGLE_API_KEY not set) | |
| GENAI_API_KEY= | |
| # OpenAI API Key (required for LLM_PROVIDER=openai) | |
| OPENAI_API_KEY=sk-... | |
| # Anthropic API Key (required for LLM_PROVIDER=anthropic) | |
| ANTHROPIC_API_KEY=sk-ant-... | |
| # Groq API Key (required for LLM_PROVIDER=groq) | |
| GROQ_API_KEY=gsk_... | |
| # Note: Ollama (LLM_PROVIDER=ollama) requires no API key - runs locally | |
| # ============================================================================= | |
| # MCP Server Configuration (QuantumArchitect-MCP) | |
| # ============================================================================= | |
| # MCP Server Base URL | |
| # Local: http://127.0.0.1:7861 | |
| # Remote (ngrok example): https://your-ngrok-url.ngrok.io | |
| # Leave empty to use default: http://127.0.0.1:7861 | |
| MCP_SERVER_URL=http://127.0.0.1:7861 | |
| # ============================================================================= | |
| # Optional: Cost Tracking and Evaluation | |
| # ============================================================================= | |
| # Cost tracking can be enabled/disabled | |
| # TRACK_COSTS=true | |
| # ============================================================================= | |
| # Notes for Hugging Face Spaces | |
| # ============================================================================= | |
| # 1. Upload this file as .env to your Space (or use Space Settings UI) | |
| # 2. Go to Space Settings > Secrets > Add Secret | |
| # 3. Add each variable: | |
| # - Name: LLM_PROVIDER, Value: gemini | |
| # - Name: LLM_MODEL, Value: gemini-2.5-flash-lite | |
| # - Name: GOOGLE_API_KEY, Value: your-key | |
| # - Name: MCP_SERVER_URL, Value: https://your-backend-url.ngrok.io | |
| # | |
| # 4. Restart the Space for changes to take effect | |
| # | |
| # Alternative: Use Space Variables (visible in Space info) instead of Secrets | |
| # This is useful for non-sensitive settings like LLM_PROVIDER and MCP_SERVER_URL | |