| # ============================================================================ | |
| # SPARKNET - Streamlit Secrets Configuration | |
| # ============================================================================ | |
| # Copy this file to secrets.toml (DO NOT commit secrets.toml!) | |
| # For Streamlit Cloud: Add these via the Streamlit Cloud dashboard | |
| # | |
| # VISTA/Horizon EU Project - Technology Transfer Office Automation | |
| # ============================================================================ | |
| # ============================================================================ | |
| # Authentication (Required) | |
| # ============================================================================ | |
| [auth] | |
| # Single user mode | |
| password = "your-secure-password" | |
| # ============================================================================ | |
| # Backend Server (Optional - for GPU processing) | |
| # ============================================================================ | |
| # If you have a GPU server (e.g., Lytos), configure the backend URL here. | |
| # The backend provides GPU-accelerated OCR, embeddings, and RAG processing. | |
| # See DEPLOYMENT.md for setup instructions. | |
| # BACKEND_URL = "https://your-gpu-server.com:8000" | |
| # Or for local testing: | |
| # BACKEND_URL = "http://localhost:8000" | |
| # Multi-user mode (uncomment to use): | |
| # [auth.users] | |
| # admin = "admin-password-here" | |
| # viewer = "viewer-password-here" | |
| # analyst = "analyst-password-here" | |
| # ============================================================================ | |
| # LLM Provider API Keys | |
| # ============================================================================ | |
| # Add only the providers you want to use - system auto-selects best available | |
| # Priority: Groq > Gemini > OpenRouter > GitHub > Together > Mistral > HuggingFace > Offline | |
| # Groq - Fastest inference, 14,400 requests/day free | |
| # Get key: https://console.groq.com/keys | |
| GROQ_API_KEY = "" | |
| # Google Gemini/AI Studio - 15 requests/min free | |
| # Get key: https://aistudio.google.com/apikey | |
| GOOGLE_API_KEY = "" | |
| # OpenRouter - Access to many free models with single API | |
| # Get key: https://openrouter.ai/keys | |
| OPENROUTER_API_KEY = "" | |
| # GitHub Models - Free GPT-4o, Llama 3.1 access | |
| # Get token: https://github.com/settings/tokens (enable 'models' scope) | |
| GITHUB_TOKEN = "" | |
| # HuggingFace - Thousands of free models, embeddings | |
| # Get token: https://huggingface.co/settings/tokens | |
| HF_TOKEN = "" | |
| # Together AI - $25 free credits | |
| # Get key: https://www.together.ai/ | |
| TOGETHER_API_KEY = "" | |
| # Mistral AI - Free experiment plan | |
| # Get key: https://console.mistral.ai/ | |
| MISTRAL_API_KEY = "" | |
| # ============================================================================ | |
| # Premium/Paid Providers (Optional) | |
| # ============================================================================ | |
| # OpenAI - GPT-4, embeddings (paid) | |
| # Get key: https://platform.openai.com/api-keys | |
| OPENAI_API_KEY = "" | |
| # Anthropic Claude (paid) | |
| # Get key: https://console.anthropic.com/ | |
| ANTHROPIC_API_KEY = "" | |
| # ============================================================================ | |
| # Database Configuration (Optional - for production) | |
| # ============================================================================ | |
| [database] | |
| # PostgreSQL connection (uncomment for production) | |
| # url = "postgresql://user:password@host:5432/sparknet" | |
| # ChromaDB persistence directory | |
| chroma_persist_dir = "./data/chroma" | |
| # ============================================================================ | |
| # Security Configuration | |
| # ============================================================================ | |
| [security] | |
| # Secret key for session management (generate with: python -c "import secrets; print(secrets.token_hex(32))") | |
| secret_key = "" | |
| # Enable audit logging | |
| audit_logging = false | |
| # ============================================================================ | |
| # GDPR & Data Privacy | |
| # ============================================================================ | |
| # IMPORTANT: For EU/VISTA deployments, configure these settings | |
| [privacy] | |
| # Data retention in days (0 = indefinite) | |
| data_retention_days = 0 | |
| # Enable PII detection and masking | |
| pii_detection = false | |
| # Enable data anonymization for exports | |
| anonymize_exports = false | |
| # ============================================================================ | |
| # Feature Flags | |
| # ============================================================================ | |
| [features] | |
| # Enable experimental scenarios | |
| experimental_scenarios = false | |
| # Enable GPU acceleration (requires CUDA) | |
| gpu_enabled = true | |
| # Enable response caching | |
| caching_enabled = true | |
| cache_ttl_seconds = 3600 | |
| # ============================================================================ | |
| # Private Deployment Notes | |
| # ============================================================================ | |
| # For enterprise/private deployments: | |
| # | |
| # 1. LOCAL INFERENCE (Maximum Privacy): | |
| # - Use Ollama for 100% on-premise inference | |
| # - No data leaves your network | |
| # - Set OLLAMA_HOST = "http://localhost:11434" | |
| # | |
| # 2. HYBRID DEPLOYMENT: | |
| # - Use local Ollama for sensitive documents | |
| # - Use cloud LLMs for non-sensitive queries | |
| # - Configure document classification rules | |
| # | |
| # 3. CLOUD DEPLOYMENT (Streamlit Cloud): | |
| # - Use secrets management (this file) | |
| # - Enable audit logging | |
| # - Configure data retention policies | |
| # - Review GDPR compliance checklist | |
| # | |
| # See DEPLOYMENT.md for detailed instructions | |
| # ============================================================================ | |