anima_conversation_app / .env.example
danielritchie's picture
Sync from GitHub via hub-sync
621c9ef verified
raw
history blame contribute delete
933 Bytes
OPENAI_API_KEY=
MODEL_NAME="gpt-realtime"
# Local vision model (only used with --local-vision CLI flag)
# By default, vision is handled by gpt-realtime when the camera tool is used
LOCAL_VISION_MODEL=HuggingFaceTB/SmolVLM2-2.2B-Instruct
# Cache for local VLM (only used with --local-vision CLI flag)
HF_HOME=./cache
# Hugging Face token for accessing datasets/models
HF_TOKEN=
# Profile selection (defaults to "default" when unset)
REACHY_MINI_CUSTOM_PROFILE="example"
# Optional external profile/tool directories
# REACHY_MINI_EXTERNAL_PROFILES_DIRECTORY=external_content/external_profiles
# REACHY_MINI_EXTERNAL_TOOLS_DIRECTORY=external_content/external_tools
# Optional: discover and auto-load all tools found in REACHY_MINI_EXTERNAL_TOOLS_DIRECTORY,
# even if they are not listed in the selected profile's tools.txt.
# This is convenient for downloaded tools used with built-in/default profiles.
# AUTOLOAD_EXTERNAL_TOOLS=1