File size: 3,085 Bytes
1bb4678
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# QAgents-Workflows Environment Configuration
# Copy this file to .env and fill in your actual values
# For Hugging Face Spaces: Add these as Repository Secrets or Space Variables

# =============================================================================
# LLM Configuration (Model-Agnostic)
# =============================================================================

# LLM Provider: gemini (default), openai, anthropic, groq, ollama, etc.
# Leave empty to use default: "gemini"
LLM_PROVIDER=gemini

# LLM Model identifier
# For Gemini: gemini-2.5-flash-lite, gemini-2.5-flash, gemini-2.5-pro, gemini-2.0-flash
# For OpenAI: gpt-4o, gpt-4o-mini, gpt-4-turbo
# For Anthropic: claude-3-opus, claude-3-sonnet
# For Groq: llama-3-70b-versatile, mixtral-8x7b-32768
# For Ollama: mistral, neural-chat, starling-lm (local models)
# Leave empty to use default: "gemini-2.5-flash-lite"
LLM_MODEL=gemini-2.5-flash-lite

# =============================================================================
# API Keys (Provider-Specific)
# =============================================================================

# Google Gemini API Key (required for LLM_PROVIDER=gemini)
# Get from: https://aistudio.google.com/app/apikey
GOOGLE_API_KEY=your-gemini-api-key-here

# Alternative Gemini API Key (fallback if GOOGLE_API_KEY not set)
GENAI_API_KEY=

# OpenAI API Key (required for LLM_PROVIDER=openai)
OPENAI_API_KEY=sk-...

# Anthropic API Key (required for LLM_PROVIDER=anthropic)
ANTHROPIC_API_KEY=sk-ant-...

# Groq API Key (required for LLM_PROVIDER=groq)
GROQ_API_KEY=gsk_...

# Note: Ollama (LLM_PROVIDER=ollama) requires no API key - runs locally

# =============================================================================
# MCP Server Configuration (QuantumArchitect-MCP)
# =============================================================================

# MCP Server Base URL
# Local: http://127.0.0.1:7861
# Remote (ngrok example): https://your-ngrok-url.ngrok.io
# Leave empty to use default: http://127.0.0.1:7861
MCP_SERVER_URL=http://127.0.0.1:7861

# =============================================================================
# Optional: Cost Tracking and Evaluation
# =============================================================================

# Cost tracking can be enabled/disabled
# TRACK_COSTS=true

# =============================================================================
# Notes for Hugging Face Spaces
# =============================================================================

# 1. Upload this file as .env to your Space (or use Space Settings UI)
# 2. Go to Space Settings > Secrets > Add Secret
# 3. Add each variable:
#    - Name: LLM_PROVIDER, Value: gemini
#    - Name: LLM_MODEL, Value: gemini-2.5-flash-lite
#    - Name: GOOGLE_API_KEY, Value: your-key
#    - Name: MCP_SERVER_URL, Value: https://your-backend-url.ngrok.io
#
# 4. Restart the Space for changes to take effect
#
# Alternative: Use Space Variables (visible in Space info) instead of Secrets
# This is useful for non-sensitive settings like LLM_PROVIDER and MCP_SERVER_URL