Spaces:
Running
Running
| services: | |
| # ββ ResearchRadar API ββββββββββββββββββββββββββββββββββββββββββββββ | |
| api: | |
| build: . | |
| ports: | |
| - "8000:8000" | |
| env_file: .env | |
| environment: | |
| SQLITE_DB_PATH: /app/data/researchradar.db | |
| CHROMA_DB_PATH: /app/data/chroma_db | |
| OLLAMA_HOST: http://ollama:11434 | |
| volumes: | |
| - app-data:/app/data | |
| - model-cache:/app/.cache/huggingface | |
| depends_on: | |
| ollama: | |
| condition: service_started | |
| restart: unless-stopped | |
| # ββ Ollama (local LLM) ββββββββββββββββββββββββββββββββββββββββββββ | |
| ollama: | |
| image: ollama/ollama:latest | |
| ports: | |
| - "11434:11434" | |
| volumes: | |
| - ollama-models:/root/.ollama | |
| restart: unless-stopped | |
| # Pull Qwen2.5-14B on first startup (or use a smaller model) | |
| # Run manually: docker compose exec ollama ollama pull qwen2.5:14b | |
| # ββ Ingestion worker (one-shot) ββββββββββββββββββββββββββββββββββββ | |
| ingest: | |
| build: . | |
| env_file: .env | |
| environment: | |
| SQLITE_DB_PATH: /app/data/researchradar.db | |
| CHROMA_DB_PATH: /app/data/chroma_db | |
| volumes: | |
| - app-data:/app/data | |
| - model-cache:/app/.cache/huggingface | |
| entrypoint: ["python", "scripts/setup_db.py"] | |
| profiles: | |
| - setup | |
| # ββ Enrichment worker (one-shot) βββββββββββββββββββββββββββββββββββ | |
| enrich: | |
| build: . | |
| env_file: .env | |
| environment: | |
| SQLITE_DB_PATH: /app/data/researchradar.db | |
| CHROMA_DB_PATH: /app/data/chroma_db | |
| OLLAMA_HOST: http://ollama:11434 | |
| volumes: | |
| - app-data:/app/data | |
| - model-cache:/app/.cache/huggingface | |
| entrypoint: ["python", "scripts/enrich.py", "--mode", "llm", "--backend", "groq", "--show-stats"] | |
| depends_on: | |
| - ollama | |
| profiles: | |
| - tools | |
| volumes: | |
| app-data: | |
| model-cache: | |
| ollama-models: | |