| version: '3.8' | |
| services: | |
| gpt-local: | |
| build: . | |
| container_name: gpt-local | |
| ports: | |
| - "7860:7860" | |
| volumes: | |
| # Persistir cache de modelos | |
| - ./models_cache:/app/models_cache | |
| - ~/.cache/huggingface:/root/.cache/huggingface | |
| environment: | |
| - PYTHONPATH=/app | |
| - HF_HOME=/root/.cache/huggingface | |
| - HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN} | |
| - GITHUB_TOKEN=${GITHUB_TOKEN} | |
| - DEFAULT_MODEL=${DEFAULT_MODEL:-microsoft/DialoGPT-small} | |
| - DEVICE=${DEVICE:-auto} | |
| # Para chat en terminal | |
| tty: true | |
| stdin_open: true | |
| command: python3 chat_terminal.py | |
| gpt-local-web: | |
| build: . | |
| container_name: gpt-local-web | |
| ports: | |
| - "7860:7860" | |
| volumes: | |
| - ./models_cache:/app/models_cache | |
| - ~/.cache/huggingface:/root/.cache/huggingface | |
| environment: | |
| - PYTHONPATH=/app | |
| - HF_HOME=/root/.cache/huggingface | |
| - HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN} | |
| - GITHUB_TOKEN=${GITHUB_TOKEN} | |
| - DEFAULT_MODEL=${DEFAULT_MODEL:-microsoft/DialoGPT-small} | |
| - DEVICE=${DEVICE:-auto} | |
| # Para interfaz web | |
| command: python3 main.py | |