File size: 5,435 Bytes
76c3b0a 3331a87 76c3b0a a05467c 76c3b0a a05467c 4718630 76c3b0a a05467c 76c3b0a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
# ============================================================================
# SPARKNET - Streamlit Secrets Configuration
# ============================================================================
# Copy this file to secrets.toml (DO NOT commit secrets.toml!)
# For Streamlit Cloud: Add these via the Streamlit Cloud dashboard
#
# VISTA/Horizon EU Project - Technology Transfer Office Automation
# ============================================================================
# ============================================================================
# Authentication (Required)
# ============================================================================
[auth]
# Single user mode
password = "your-secure-password"
# ============================================================================
# Backend Server (Optional - for GPU processing)
# ============================================================================
# If you have a GPU server (e.g., Lytos), configure the backend URL here.
# The backend provides GPU-accelerated OCR, embeddings, and RAG processing.
# See DEPLOYMENT.md for setup instructions.
# BACKEND_URL = "https://your-gpu-server.com:8000"
# Or for local testing:
# BACKEND_URL = "http://localhost:8000"
# Multi-user mode (uncomment to use):
# [auth.users]
# admin = "admin-password-here"
# viewer = "viewer-password-here"
# analyst = "analyst-password-here"
# ============================================================================
# LLM Provider API Keys
# ============================================================================
# Add only the providers you want to use - system auto-selects best available
# Priority: Groq > Gemini > OpenRouter > GitHub > Together > Mistral > HuggingFace > Offline
# Groq - Fastest inference, 14,400 requests/day free
# Get key: https://console.groq.com/keys
GROQ_API_KEY = ""
# Google Gemini/AI Studio - 15 requests/min free
# Get key: https://aistudio.google.com/apikey
GOOGLE_API_KEY = ""
# OpenRouter - Access to many free models with single API
# Get key: https://openrouter.ai/keys
OPENROUTER_API_KEY = ""
# GitHub Models - Free GPT-4o, Llama 3.1 access
# Get token: https://github.com/settings/tokens (enable 'models' scope)
GITHUB_TOKEN = ""
# HuggingFace - Thousands of free models, embeddings
# Get token: https://huggingface.co/settings/tokens
HF_TOKEN = ""
# Together AI - $25 free credits
# Get key: https://www.together.ai/
TOGETHER_API_KEY = ""
# Mistral AI - Free experiment plan
# Get key: https://console.mistral.ai/
MISTRAL_API_KEY = ""
# ============================================================================
# Premium/Paid Providers (Optional)
# ============================================================================
# OpenAI - GPT-4, embeddings (paid)
# Get key: https://platform.openai.com/api-keys
OPENAI_API_KEY = ""
# Anthropic Claude (paid)
# Get key: https://console.anthropic.com/
ANTHROPIC_API_KEY = ""
# ============================================================================
# Database Configuration (Optional - for production)
# ============================================================================
[database]
# PostgreSQL connection (uncomment for production)
# url = "postgresql://user:password@host:5432/sparknet"
# ChromaDB persistence directory
chroma_persist_dir = "./data/chroma"
# ============================================================================
# Security Configuration
# ============================================================================
[security]
# Secret key for session management (generate with: python -c "import secrets; print(secrets.token_hex(32))")
secret_key = ""
# Enable audit logging
audit_logging = false
# ============================================================================
# GDPR & Data Privacy
# ============================================================================
# IMPORTANT: For EU/VISTA deployments, configure these settings
[privacy]
# Data retention in days (0 = indefinite)
data_retention_days = 0
# Enable PII detection and masking
pii_detection = false
# Enable data anonymization for exports
anonymize_exports = false
# ============================================================================
# Feature Flags
# ============================================================================
[features]
# Enable experimental scenarios
experimental_scenarios = false
# Enable GPU acceleration (requires CUDA)
gpu_enabled = true
# Enable response caching
caching_enabled = true
cache_ttl_seconds = 3600
# ============================================================================
# Private Deployment Notes
# ============================================================================
# For enterprise/private deployments:
#
# 1. LOCAL INFERENCE (Maximum Privacy):
# - Use Ollama for 100% on-premise inference
# - No data leaves your network
# - Set OLLAMA_HOST = "http://localhost:11434"
#
# 2. HYBRID DEPLOYMENT:
# - Use local Ollama for sensitive documents
# - Use cloud LLMs for non-sensitive queries
# - Configure document classification rules
#
# 3. CLOUD DEPLOYMENT (Streamlit Cloud):
# - Use secrets management (this file)
# - Enable audit logging
# - Configure data retention policies
# - Review GDPR compliance checklist
#
# See DEPLOYMENT.md for detailed instructions
# ============================================================================
|