services: backend: build: context: ./backend environment: - LLM_API_KEY=${LLM_API_KEY} - LLM_MODEL=${LLM_MODEL:-gpt-4o-mini} - LLM_API_BASE=${LLM_API_BASE:-https://api.openai.com} - PORT=8080 ports: - "8080:8080" restart: unless-stopped frontend: build: context: ./frontend environment: - VITE_API_BASE=http://localhost:8080 ports: - "4173:4173" depends_on: - backend restart: unless-stopped