reachy_mini_minder / .env.example
Boopster's picture
refactor: drop Gemini Live support, standardizing on OpenAI Realtime
d8a2aab
# OpenAI settings
OPENAI_API_KEY=
MODEL_NAME="gpt-realtime"
# Local vision model (only used with --local-vision CLI flag)
# By default, vision is handled by gpt-realtime when the camera tool is used
LOCAL_VISION_MODEL=HuggingFaceTB/SmolVLM2-2.2B-Instruct
# Cache for local VLM (only used with --local-vision CLI flag)
HF_HOME=./cache
# Hugging Face token for accessing datasets/models
HF_TOKEN=
# To select a specific profile with custom instructions and tools, to be placed in profiles/<myprofile>/__init__.py
REACHY_MINI_CUSTOM_PROFILE="example"
# Cost monitoring: pricing per million tokens for the gpt-realtime model
# Override these if using gpt-realtime-mini or custom pricing
# REALTIME_PRICE_TEXT_INPUT=4.00
# REALTIME_PRICE_TEXT_OUTPUT=16.00
# REALTIME_PRICE_AUDIO_INPUT=32.00
# REALTIME_PRICE_AUDIO_OUTPUT=64.00
# Security: API server host binding (default: 127.0.0.1, loopback only)
# Set to 0.0.0.0 for LAN deployments (e.g. tablet UI connecting to robot)
# API_HOST=0.0.0.0
# Security: additional CORS origins (comma-separated)
# Required when frontend runs on a different device (e.g. tablet on LAN)
# ALLOWED_ORIGINS=http://192.168.1.100:3000
# Security: API bearer token for LAN deployments
# When set, all API requests must include 'Authorization: Bearer <token>'
# API_TOKEN=
# Tracing to monitor LangGraph Agent runs
# Sign up/log in at https://smith.langchain.com to get a free API key
# LANGSMITH_API_KEY=
# Optional: set to true to enable LangSmith tracing
# SMITH_TRACING=true