RepoReaper / .env.example
GitHub Actions Bot
deploy: auto-inject hf config & sync
4e98fb0
# ======================================
# GitHub Agent Demo - 环境变量配置
# ======================================
# --- LLM 供应商选择 ---
# 支持: openai, deepseek, anthropic, gemini
# 默认: deepseek
LLM_PROVIDER=
# --- API Keys (根据选择的供应商配置对应的 Key) ---
# OpenAI (如果 LLM_PROVIDER=openai)
OPENAI_API_KEY=
# OPENAI_BASE_URL= # 可选: 自定义端点 (如 Azure OpenAI)
# DeepSeek (如果 LLM_PROVIDER=deepseek)
DEEPSEEK_API_KEY=
# DEEPSEEK_BASE_URL=https://api.deepseek.com # 可选: 默认值
# Anthropic Claude (如果 LLM_PROVIDER=anthropic)
ANTHROPIC_API_KEY=
# Google Gemini (如果 LLM_PROVIDER=gemini)
GEMINI_API_KEY=
# GEMINI_BASE_URL= # 可选: OpenAI 兼容端点
# --- 模型配置 ---
# 如果不指定,将使用各供应商的默认模型:
# - openai: gpt-4o-mini
# - deepseek: deepseek-chat
# - anthropic: claude-3-5-sonnet-20241022
# - gemini: gemini-1.5-flash
# MODEL_NAME=deepseek-chat
# --- GitHub Token ---
# 用于访问 GitHub API,提高请求限制
GITHUB_TOKEN=
# --- Embedding 服务 ---
# SiliconFlow API Key (用于 BGE-M3 Embedding)
SILICON_API_KEY=
# --- Qdrant 向量数据库配置 ---
# 模式选择: "local" | "server" | "cloud"
# - local: 本地嵌入式存储 (开发环境, 单 Worker)
# - server: Qdrant Server Docker (生产环境, 多 Worker)
# - cloud: Qdrant Cloud 托管服务
QDRANT_MODE=local
QDRANT_LOCAL_PATH=data/qdrant_db
# Server 模式: 连接 Qdrant Server (Docker)
# QDRANT_MODE=server
# QDRANT_URL=http://localhost:6333
# 或分开配置:
# QDRANT_HOST=localhost
# QDRANT_PORT=6333
# Cloud 模式: 连接 Qdrant Cloud
# QDRANT_MODE=cloud
# QDRANT_URL=https://xxx.qdrant.tech
# QDRANT_API_KEY=your-api-key
# 向量维度 (BGE-M3 = 1024)
# QDRANT_VECTOR_SIZE=1024
# --- Langfuse 追踪配置 (可选) ---
# LANGFUSE_ENABLED=true
# LANGFUSE_HOST=http://localhost:3000
# LANGFUSE_PUBLIC_KEY=
# LANGFUSE_SECRET_KEY=
# --- 分布式锁配置 ---
# 锁后端: "memory" | "file" | "redis"
# - memory: 内存锁 (单进程)
# - file: 文件锁 (多 Worker 单节点)
# - redis: Redis 分布式锁 (多节点)
LOCK_BACKEND=file
LOCK_DIR=data/locks
# REDIS_URL=redis://localhost:6379/0
# --- 服务配置 ---
# HOST=127.0.0.1
# PORT=8000
# --- LLM 参数 (可选) ---
# LLM_TEMPERATURE=0.1
# LLM_MAX_TOKENS=4096
# LLM_TIMEOUT=600
# Auto Eval
AUTO_EVAL_ENABLED=true
AUTO_EVAL_ASYNC=true
AUTO_EVAL_QUEUE_ENABLED=true
AUTO_EVAL_USE_RAGAS=true
AUTO_EVAL_RAGAS_SAMPLE_RATE=0.3
AUTO_EVAL_RAGAS_TIMEOUT_SEC=8
AUTO_EVAL_RAGAS_CB_ENABLED=true
# Langfuse
LANGFUSE_ENABLED=true
LANGFUSE_HOST=http://localhost:3000
LANGFUSE_PUBLIC_KEY=
LANGFUSE_SECRET_KEY=