diff --git a/.gitattributes b/.gitattributes index b0b27ac4c1487f9d1280dd8002baf1c4116168a3..a3925ca6071e8689d091ac37573bc6ff061ae744 100644 --- a/.gitattributes +++ b/.gitattributes @@ -4087,3 +4087,65 @@ platform/dbops/binaries/go/go/bin/go filter=lfs diff=lfs merge=lfs -text platform/dbops/binaries/go/go/bin/gofmt filter=lfs diff=lfs merge=lfs -text platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/el.dat filter=lfs diff=lfs merge=lfs -text platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/en.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/eo.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/es.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/et.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/eu.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/fa.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ff_Adlm.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/fi.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/fil.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/fo.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/fr.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/frr.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/fy.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ga.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/gd.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/gl.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/gu.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ha.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/he.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/hi.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/hr.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/hsb.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/hu.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/hy.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ia.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/id.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/is.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/it.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ja.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/jv.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ka.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/kab.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/kgp.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/kk.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/kk_Arab.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/km.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/kn.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ko.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/kok.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ks.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ku.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ky.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/lb.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/lij.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/lo.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/lt.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/lv.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/mk.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ml.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/mn.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/mr.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ms.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/my.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ne.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/nl.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/no.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/nqo.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/or.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/pa.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/pcm.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/pl.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/ps.dat filter=lfs diff=lfs merge=lfs -text +platform/dataops/dto/.venv/lib/python3.12/site-packages/babel/locale-data/pt.dat filter=lfs diff=lfs merge=lfs -text diff --git a/novas/novacore-QuantumGate/.env b/novas/novacore-QuantumGate/.env new file mode 100644 index 0000000000000000000000000000000000000000..0d58ac17cde322d45d1a7417385d0e31af580382 --- /dev/null +++ b/novas/novacore-QuantumGate/.env @@ -0,0 +1,83 @@ +# Vast.ai Credentials for India-2xH200 Server +VAST_AI_API_KEY=a236f77abde627d0a71658261fdf290c5fcae708d0f5ecc9f0caf2a9529b7547 + +# Server Configuration +SERVER_NAME=India-2xH200 +SERVER_TYPE=H200 +SERVER_LOCATION=India +INTERNAL_IP_RANGE=172.16.0.0/12 + +# SSH Connection Details +SSH_HOST=ssh8.vast.ai +SSH_PORT=26907 +SSH_USER=root + +# Internal Services (Memory Services) +MEMORY_SERVICE_1=localhost:18000 +MEMORY_SERVICE_2=localhost:18010 +MEMORY_SERVICE_3=localhost:18011 +MEMORY_SERVICE_4=localhost:18012 +MEMORY_SERVICE_5=localhost:17000 + +# Bandwidth Optimization (for cost control) +BANDWIDTH_COST_PER_GB=0.66 +MAX_BANDWIDTH_MB=1000 + +# Repository Configuration +REPO_NAME=novacore-QuantumGate +REPO_OWNER=adaptnova +REPO_BRANCH_MAIN=main +REPO_BRANCH_DEV=dev +REPO_BRANCH_FEATURE=feature +REPO_BRANCH_ENV=env +REPO_BRANCH_SERVER=server + +# Infrastructure Settings +INFRA_OPS_ROLE=Head of InfraOps +DEPLOYMENT_TYPE=bare-metal +CONTAINER_RUNTIME=dockerless +NETWORK_MODE=internal-only + +# Security Settings +SSH_KEY_ALGORITHM=ed25519 +FIREWALL_ENABLED=true +AUTO_UPDATE=false + +# Monitoring +HEALTH_CHECK_INTERVAL=300 +BANDWIDTH_MONITOR_ENABLED=true +LOG_RETENTION_DAYS=30 + +# ------------------------------------------------------------ +# Registry & InfraOps Settings (QuantumGate) +# ------------------------------------------------------------ +# Personal Nova workspace for this repo +PERSONAL_NOVA_DIR=/data/adaptai/novas/novacore-QuantumGate + +# Accepted InfraOps location on platform +# InfraOps image home (platform) +INFRAOPS_DIR=/data/adaptai/platform/infraops/docker/images + +# Platform InfraOps repo (private) and branching model +PLATFORM_INFRAOPS_REPO=adaptnova/platform-infraops +PLATFORM_INFRAOPS_REPO_VISIBILITY=private +PLATFORM_INFRAOPS_BRANCHES=main,server/* + +# Docker registry access (Docker Hub) +DOCKER_REGISTRY=docker.io +DOCKER_NAMESPACE=adaptchase +DOCKER_USERNAME=adaptchase +DOCKER_REGISTRY_PERMS=read,write,delete + +# Personal access token for Docker Hub CLI login +# Usage: +# echo "$DOCKER_HUB_PAT" | docker login -u "$DOCKER_USERNAME" --password-stdin +DOCKER_HUB_PAT=dckr_pat_r9rW9wyc6KQpo3C4h0Ha9x7X3Tw + +# GHCR settings (secondary registry) +GHCR_REGISTRY=ghcr.io +GHCR_NAMESPACE=adaptnova + +# Canonical image names (tags resolved in CI) +ELIZABETH_SERVE_IMAGE=${DOCKER_REGISTRY}/${DOCKER_NAMESPACE}/elizabeth-serve +ELIZABETH_TRAIN_IMAGE=${DOCKER_REGISTRY}/${DOCKER_NAMESPACE}/elizabeth-train diff --git a/novas/novacore-prime/.env.example b/novas/novacore-prime/.env.example new file mode 100644 index 0000000000000000000000000000000000000000..d629024bad015d3c28e03315c83eeec5c218d31b --- /dev/null +++ b/novas/novacore-prime/.env.example @@ -0,0 +1,31 @@ +# Copy to .env and adjust values as needed + +# API server +HOST=0.0.0.0 +PORT=8000 +API_KEY=change-me-please +METRICS_KEY= + +# Model loading +# Prefer local path if available, else HF model id +# MODEL_PATH=/abs/path/to/qwen3-8b-elizabeth-simple +HF_MODEL_ID=LevelUp2x/qwen3-8b-elizabeth-simple + +# CORS allowlist (comma-separated) +ALLOW_ORIGINS=http://localhost,http://127.0.0.1 + +# Redis / Dragonfly +REDIS_HOST=localhost +REDIS_PORT=18000 +# REDIS_PASSWORD= + +# Profiles root +NOVA_PROFILES_ROOT=/nfs/novas/profiles + +# Client settings +API_BASE=http://localhost:8000 + +# Evaluation +EVAL_ROOT=/data/adaptai/projects/elizabeth/evaluation +EVAL_LIMIT=0 +EVAL_STRICT=0 diff --git a/novas/novacore-promethius/.env.example b/novas/novacore-promethius/.env.example new file mode 100644 index 0000000000000000000000000000000000000000..4e35ca207ce91c5876a9224acaab187cab2d306d --- /dev/null +++ b/novas/novacore-promethius/.env.example @@ -0,0 +1,45 @@ +### NovaCore Prometheus environment (copy to .env and edit) + +# Canonical DTO repo path (do not duplicate) +DTO_PATH=/data/adaptai/platform/dataops/dto + +# DTO environment overlay to validate/generate +# Options: development | staging | production +DTO_ENVIRONMENT=production + +# Server identity for inventory tracking +SERVER_NAME=vast1-2 + +# Secrets directory (env files only, not committed) +SECRETS_DIR=/data/adaptai/secrets + +# Optional Python settings +# VENV_DIR=/data/adaptai/platform/dataops/dto/.venv +# PYTHON_BIN=python3 + +# Non-secret service endpoints (examples) +JANUSGRAPH_HOST=127.0.0.1 +JANUSGRAPH_PORT=8182 +SCYLLA_HOSTS=127.0.0.1 +SCYLLA_PORT=9042 +SCYLLA_KEYSPACE=dto +NATS_URL=nats://127.0.0.1:4222 +DRAGONFLY_ENDPOINTS=127.0.0.1:18000 + +# ------------------------------------------------------------ +# Registry settings (no secrets here; use GitHub Secrets) +# ------------------------------------------------------------ +DOCKER_REGISTRY=docker.io +DOCKER_NAMESPACE=adaptchase +# DOCKERHUB_USERNAME=adaptchase # set in GitHub Secrets +# DOCKERHUB_TOKEN= # set in GitHub Secrets + +GHCR_REGISTRY=ghcr.io +GHCR_NAMESPACE=adaptnova + +# ------------------------------------------------------------ +# Vault addresses (configure at runtime, not committed) +# ------------------------------------------------------------ +# VAULT_ADDR=https:// +# VAULT_TOKEN= + diff --git a/novas/novacore-quartz-glm45v/.env.cloudflare b/novas/novacore-quartz-glm45v/.env.cloudflare new file mode 100644 index 0000000000000000000000000000000000000000..f1624e5b668c2326cc73937975dc326251c2adf6 --- /dev/null +++ b/novas/novacore-quartz-glm45v/.env.cloudflare @@ -0,0 +1,25 @@ +# Cloudflare Configuration - KEEP SECURE +# Domain: adaptdev.ai + +# API Credentials +CLOUDFLARE_GLOBAL_API_KEY=a37d2db4459a2123f98ab635a2ac9a85c0380 +CLOUDFLARE_ORIGIN_CA_KEY=v1.0-1d99fdecccc8b700e7bc44b4-0ba5f156f123c87a36e036b63cc1709194bb2c70a8cb5e0a98d13402f805a947227065152d4a6c7fd22ae40f0773fe617f8f6fa9ea06d5802c69b7cac4a1c0afb38f4d02129fd39c97 +CLOUDFLARE_ADMIN_API_TOKEN=cH-8tuZdztKZyYvc2JlJRk78_TDksULXJ2WesbcC +CLOUDFLARE_R2_API_TOKEN=O-SGjpen4e9NdYJso4LCZPYpMPb_R9N-nZ6QGopY +CLOUDFLARE_WORKERS_R2_TOKEN=O-SGjpen4e9NdYJso4LCZPYpMPb_R9N-nZ6QGopY +CLOUDFLARE_WORKERS_FULL_TOKEN=uEhieo_hNeJ-yR3L8LZK2qKg5kjSkAqKOnAl5rob +CLOUDFLARE_WORKERS_AI_TOKEN=YOUR_NEW_WORKERS_AI_TOKEN_HERE + +# Zone Configuration +CLOUDFLARE_ZONE_ID=7981a8217e9e9fc828a6ed793d81ad6c +CLOUDFLARE_ACCOUNT_ID=9bd70e8eb28637e723c8984b8c85c81e +CLOUDFLARE_DOMAIN=adaptdev.ai + +# R2 S3 API Credentials +CLOUDFLARE_R2_ACCESS_KEY=e5c4452f8acdd362720e38d8b75707cd +CLOUDFLARE_R2_SECRET_KEY=b67d1ba6b2bfad98837a912eb012061b023c73524c1d29afde8a10d16a3f7554 + +# Service Endpoints +CLOUDFLARE_R2_ENDPOINT=https://9bd70e8eb28637e723c8984b8c85c81e.r2.cloudflarestorage.com +CLOUDFLARE_WORKERS_SUBDOMAIN=adaptdev +CLOUDFLARE_AI_GATEWAY=https://gateway.ai.cloudflare.com/v1/9bd70e8eb28637e723c8984b8c85c81e \ No newline at end of file diff --git a/novas/novacore-zephyr/claude-code-router/.env b/novas/novacore-zephyr/claude-code-router/.env new file mode 100644 index 0000000000000000000000000000000000000000..47db21b15736a87440a336e6183160c482eb817d --- /dev/null +++ b/novas/novacore-zephyr/claude-code-router/.env @@ -0,0 +1,48 @@ +# Kimi (Moonshot) Configuration (Primary - 128K context) +ANTHROPIC_BASE_URL=https://api.moonshot.cn/v1/chat/completions +ANTHROPIC_MODEL=kimi-k2-turbo-preview +ANTHROPIC_AUTH_TOKEN=sk-nTLc2WGLFgpXRZ76IPZPxTVJM7mhR8XKUDRd0GovsnliNbJ9 +ANTHROPIC_API_KEY=sk-nTLc2WGLFgpXRZ76IPZPxTVJM7mhR8XKUDRd0GovsnliNbJ9 + +# Direct Kimi +MOONSHOT_BASE_URL=https://api.moonshot.cn/v1/chat/completions +MOONSHOT_API_KEY=sk-nTLc2WGLFgpXRZ76IPZPxTVJM7mhR8XKUDRd0GovsnliNbJ9 + +# CCR Configuration - Universal Kimi +CCR_DEFAULT_PROVIDER=moonshot +CCR_DEFAULT_MODEL=kimi-k2-turbo-preview +CCR_MAX_TOKENS=131072 + +# OpenAI (Fallback) +OPENAI_API_KEY=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 +OPENAI_API_KEY_2=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 + +# Groq +GROQ_API_KEY=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 +LM_GROQ_API_KEY=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 + +# Moonshot (Kimi) +MOONSHOT_AI_API_KEY=sk-nTLc2WGLFgpXRZ76IPZPxTVJM7mhR8XKUDRd0GovsnliNbJ9 + +# Replicate +REPLICATE_API_KEY=r8_4EeWRtznI4zF6RTFaQKdGwzWQbCPCTI4gA0tb + +# Mistral +MISTRAL_API_KEY=F63TOQT71mRALyMXE2sLRXXsV4pr8Xtu +MISTRAL_MODEL=open-mixtral-8x22b + +# Gemini AI Studio +GEMEINI_AI_STUDIO_API_KEY=AIzaSyBl3dMJ6WkapvZmtt_ai2r-9Ly0Tw7UZSk + +# DeepSeek +DEEPSEEK_base_url=https://api.deepseek.com +DEEPSEEK_API_KEY=sk-a24089cabe414abbad6ca32aa15a6841 + +# z.ai +Z_AI_USER_ID=78661754977918738 +Z_AI_BASE_URL=https://api.z.ai/api/paas/v4/ +z_ai_api_key=4e020facea084849a3a27ba7eaba07e6.oWmgUFrwNL1wQdoP + +# Grok (x.ai) +Grok_PERSONAL_API_Key=xai-0ajh9dOLn3UIYHJ7sZdv5VWaxcyKru1Xv7IXpSA0oG4ivIZJJZHuwubHC9uDY1OibQYuBDs5GcyppCqh +GROK_API_KEY=xai-3L1KlS5PJM1m7CrPTv2uEORlhrA0r8Y2BIxM7iDFVZZfkxtzYqAoLNZmKjzI7z8n2zMDkugwd1nr8jLW diff --git a/projects/nova-z/.env.example b/projects/nova-z/.env.example new file mode 100644 index 0000000000000000000000000000000000000000..d629024bad015d3c28e03315c83eeec5c218d31b --- /dev/null +++ b/projects/nova-z/.env.example @@ -0,0 +1,31 @@ +# Copy to .env and adjust values as needed + +# API server +HOST=0.0.0.0 +PORT=8000 +API_KEY=change-me-please +METRICS_KEY= + +# Model loading +# Prefer local path if available, else HF model id +# MODEL_PATH=/abs/path/to/qwen3-8b-elizabeth-simple +HF_MODEL_ID=LevelUp2x/qwen3-8b-elizabeth-simple + +# CORS allowlist (comma-separated) +ALLOW_ORIGINS=http://localhost,http://127.0.0.1 + +# Redis / Dragonfly +REDIS_HOST=localhost +REDIS_PORT=18000 +# REDIS_PASSWORD= + +# Profiles root +NOVA_PROFILES_ROOT=/nfs/novas/profiles + +# Client settings +API_BASE=http://localhost:8000 + +# Evaluation +EVAL_ROOT=/data/adaptai/projects/elizabeth/evaluation +EVAL_LIMIT=0 +EVAL_STRICT=0 diff --git a/projects/oui-max/.env b/projects/oui-max/.env new file mode 100644 index 0000000000000000000000000000000000000000..a5a7d07c80a7ccf4a664a87d9f9a19f8e353a119 --- /dev/null +++ b/projects/oui-max/.env @@ -0,0 +1,27 @@ +WEBUI_ROOT=/data/adaptai/migrate/vast/workspace-vast1-2/webui +WEBUI_DB=${WEBUI_ROOT}/webui.db +OUI_MAX_ROOT=/data/adaptai/projects/oui-max + +# Data directories rooted under WEBUI_ROOT for portability +OUI_ARTIFACTS_DIR=${WEBUI_ROOT}/artifacts +OUI_WEB_DIR=${OUI_WEB_DIR}/web +OUI_PAGES_DIR=${OUI_WEB_DIR}/pages +OUI_PDF_DIR=${OUI_WEB_DIR}/pdf +OUI_TMP_DIR=${OUI_WEB_DIR}/tmp +OUI_RAG_DIR=${WEBUI_ROOT}/rag +OUI_MLFLOW_LITE_DIR=${WEBUI_ROOT}/mlflow_lite +OUI_INPUT_DIR=${WEBUI_ROOT}/input + +# Optional: enable DB writes for db_admin.exec +# OUI_DB_ADMIN_WRITE=1 + +# Research mode: disable path guardrails (dangerous). Set to 1 for no constraints. +OUI_DISABLE_GUARDS=1 + +# Default LLM model for local coordination (Ollama) +OUI_DEFAULT_MODEL=qwen3:8b + +# Disable authentication and persistent config for no-auth setup +WEBUI_AUTH=False +ENABLE_PERSISTENT_CONFIG=False +OLLAMA_URL=http://127.0.0.1:21434 diff --git a/projects/ui/qwen-code/packages/cli/src/commands/mcp/add.test.ts b/projects/ui/qwen-code/packages/cli/src/commands/mcp/add.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..fc1ffb64cdafd360fd1a0aac6185f6ebfe2f4ce3 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/commands/mcp/add.test.ts @@ -0,0 +1,122 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import yargs from 'yargs'; +import { addCommand } from './add.js'; +import { loadSettings, SettingScope } from '../../config/settings.js'; + +vi.mock('fs/promises', () => ({ + readFile: vi.fn(), + writeFile: vi.fn(), +})); + +vi.mock('../../config/settings.js', async () => { + const actual = await vi.importActual('../../config/settings.js'); + return { + ...actual, + loadSettings: vi.fn(), + }; +}); + +const mockedLoadSettings = loadSettings as vi.Mock; + +describe('mcp add command', () => { + let parser: yargs.Argv; + let mockSetValue: vi.Mock; + + beforeEach(() => { + vi.resetAllMocks(); + const yargsInstance = yargs([]).command(addCommand); + parser = yargsInstance; + mockSetValue = vi.fn(); + mockedLoadSettings.mockReturnValue({ + forScope: () => ({ settings: {} }), + setValue: mockSetValue, + }); + }); + + it('should add a stdio server to project settings', async () => { + await parser.parseAsync( + 'add my-server /path/to/server arg1 arg2 -e FOO=bar', + ); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + { + 'my-server': { + command: '/path/to/server', + args: ['arg1', 'arg2'], + env: { FOO: 'bar' }, + }, + }, + ); + }); + + it('should add an sse server to user settings', async () => { + await parser.parseAsync( + 'add --transport sse sse-server https://example.com/sse-endpoint --scope user -H "X-API-Key: your-key"', + ); + + expect(mockSetValue).toHaveBeenCalledWith(SettingScope.User, 'mcpServers', { + 'sse-server': { + url: 'https://example.com/sse-endpoint', + headers: { 'X-API-Key': 'your-key' }, + }, + }); + }); + + it('should add an http server to project settings', async () => { + await parser.parseAsync( + 'add --transport http http-server https://example.com/mcp -H "Authorization: Bearer your-token"', + ); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + { + 'http-server': { + httpUrl: 'https://example.com/mcp', + headers: { Authorization: 'Bearer your-token' }, + }, + }, + ); + }); + + it('should handle MCP server args with -- separator', async () => { + await parser.parseAsync( + 'add my-server npx -- -y http://example.com/some-package', + ); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + { + 'my-server': { + command: 'npx', + args: ['-y', 'http://example.com/some-package'], + }, + }, + ); + }); + + it('should handle unknown options as MCP server args', async () => { + await parser.parseAsync( + 'add test-server npx -y http://example.com/some-package', + ); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + { + 'test-server': { + command: 'npx', + args: ['-y', 'http://example.com/some-package'], + }, + }, + ); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/commands/mcp/add.ts b/projects/ui/qwen-code/packages/cli/src/commands/mcp/add.ts new file mode 100644 index 0000000000000000000000000000000000000000..9523d7a4607207ed22010f56aacf4d25341a8354 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/commands/mcp/add.ts @@ -0,0 +1,222 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// File for 'gemini mcp add' command +import type { CommandModule } from 'yargs'; +import { loadSettings, SettingScope } from '../../config/settings.js'; +import { MCPServerConfig } from '@qwen-code/qwen-code-core'; + +async function addMcpServer( + name: string, + commandOrUrl: string, + args: Array | undefined, + options: { + scope: string; + transport: string; + env: string[] | undefined; + header: string[] | undefined; + timeout?: number; + trust?: boolean; + description?: string; + includeTools?: string[]; + excludeTools?: string[]; + }, +) { + const { + scope, + transport, + env, + header, + timeout, + trust, + description, + includeTools, + excludeTools, + } = options; + const settingsScope = + scope === 'user' ? SettingScope.User : SettingScope.Workspace; + const settings = loadSettings(process.cwd()); + + let newServer: Partial = {}; + + const headers = header?.reduce( + (acc, curr) => { + const [key, ...valueParts] = curr.split(':'); + const value = valueParts.join(':').trim(); + if (key.trim() && value) { + acc[key.trim()] = value; + } + return acc; + }, + {} as Record, + ); + + switch (transport) { + case 'sse': + newServer = { + url: commandOrUrl, + headers, + timeout, + trust, + description, + includeTools, + excludeTools, + }; + break; + case 'http': + newServer = { + httpUrl: commandOrUrl, + headers, + timeout, + trust, + description, + includeTools, + excludeTools, + }; + break; + case 'stdio': + default: + newServer = { + command: commandOrUrl, + args: args?.map(String), + env: env?.reduce( + (acc, curr) => { + const [key, value] = curr.split('='); + if (key && value) { + acc[key] = value; + } + return acc; + }, + {} as Record, + ), + timeout, + trust, + description, + includeTools, + excludeTools, + }; + break; + } + + const existingSettings = settings.forScope(settingsScope).settings; + const mcpServers = existingSettings.mcpServers || {}; + + const isExistingServer = !!mcpServers[name]; + if (isExistingServer) { + console.log( + `MCP server "${name}" is already configured within ${scope} settings.`, + ); + } + + mcpServers[name] = newServer as MCPServerConfig; + + settings.setValue(settingsScope, 'mcpServers', mcpServers); + + if (isExistingServer) { + console.log(`MCP server "${name}" updated in ${scope} settings.`); + } else { + console.log( + `MCP server "${name}" added to ${scope} settings. (${transport})`, + ); + } +} + +export const addCommand: CommandModule = { + command: 'add [args...]', + describe: 'Add a server', + builder: (yargs) => + yargs + .usage('Usage: gemini mcp add [options] [args...]') + .parserConfiguration({ + 'unknown-options-as-args': true, // Pass unknown options as server args + 'populate--': true, // Populate server args after -- separator + }) + .positional('name', { + describe: 'Name of the server', + type: 'string', + demandOption: true, + }) + .positional('commandOrUrl', { + describe: 'Command (stdio) or URL (sse, http)', + type: 'string', + demandOption: true, + }) + .option('scope', { + alias: 's', + describe: 'Configuration scope (user or project)', + type: 'string', + default: 'project', + choices: ['user', 'project'], + }) + .option('transport', { + alias: 't', + describe: 'Transport type (stdio, sse, http)', + type: 'string', + default: 'stdio', + choices: ['stdio', 'sse', 'http'], + }) + .option('env', { + alias: 'e', + describe: 'Set environment variables (e.g. -e KEY=value)', + type: 'array', + string: true, + }) + .option('header', { + alias: 'H', + describe: + 'Set HTTP headers for SSE and HTTP transports (e.g. -H "X-Api-Key: abc123" -H "Authorization: Bearer abc123")', + type: 'array', + string: true, + }) + .option('timeout', { + describe: 'Set connection timeout in milliseconds', + type: 'number', + }) + .option('trust', { + describe: + 'Trust the server (bypass all tool call confirmation prompts)', + type: 'boolean', + }) + .option('description', { + describe: 'Set the description for the server', + type: 'string', + }) + .option('include-tools', { + describe: 'A comma-separated list of tools to include', + type: 'array', + string: true, + }) + .option('exclude-tools', { + describe: 'A comma-separated list of tools to exclude', + type: 'array', + string: true, + }) + .middleware((argv) => { + // Handle -- separator args as server args if present + if (argv['--']) { + const existingArgs = (argv['args'] as Array) || []; + argv['args'] = [...existingArgs, ...(argv['--'] as string[])]; + } + }), + handler: async (argv) => { + await addMcpServer( + argv['name'] as string, + argv['commandOrUrl'] as string, + argv['args'] as Array, + { + scope: argv['scope'] as string, + transport: argv['transport'] as string, + env: argv['env'] as string[], + header: argv['header'] as string[], + timeout: argv['timeout'] as number | undefined, + trust: argv['trust'] as boolean | undefined, + description: argv['description'] as string | undefined, + includeTools: argv['includeTools'] as string[] | undefined, + excludeTools: argv['excludeTools'] as string[] | undefined, + }, + ); + }, +}; diff --git a/projects/ui/qwen-code/packages/cli/src/commands/mcp/list.test.ts b/projects/ui/qwen-code/packages/cli/src/commands/mcp/list.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..c268fdbd70c81be34846f732df1c437cefbe2672 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/commands/mcp/list.test.ts @@ -0,0 +1,154 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { listMcpServers } from './list.js'; +import { loadSettings } from '../../config/settings.js'; +import { loadExtensions } from '../../config/extension.js'; +import { createTransport } from '@qwen-code/qwen-code-core'; +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; + +vi.mock('../../config/settings.js'); +vi.mock('../../config/extension.js'); +vi.mock('@qwen-code/qwen-code-core'); +vi.mock('@modelcontextprotocol/sdk/client/index.js'); + +const mockedLoadSettings = loadSettings as vi.Mock; +const mockedLoadExtensions = loadExtensions as vi.Mock; +const mockedCreateTransport = createTransport as vi.Mock; +const MockedClient = Client as vi.Mock; + +interface MockClient { + connect: vi.Mock; + ping: vi.Mock; + close: vi.Mock; +} + +interface MockTransport { + close: vi.Mock; +} + +describe('mcp list command', () => { + let consoleSpy: vi.SpyInstance; + let mockClient: MockClient; + let mockTransport: MockTransport; + + beforeEach(() => { + vi.resetAllMocks(); + + consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + + mockTransport = { close: vi.fn() }; + mockClient = { + connect: vi.fn(), + ping: vi.fn(), + close: vi.fn(), + }; + + MockedClient.mockImplementation(() => mockClient); + mockedCreateTransport.mockResolvedValue(mockTransport); + mockedLoadExtensions.mockReturnValue([]); + }); + + afterEach(() => { + consoleSpy.mockRestore(); + }); + + it('should display message when no servers configured', async () => { + mockedLoadSettings.mockReturnValue({ merged: { mcpServers: {} } }); + + await listMcpServers(); + + expect(consoleSpy).toHaveBeenCalledWith('No MCP servers configured.'); + }); + + it('should display different server types with connected status', async () => { + mockedLoadSettings.mockReturnValue({ + merged: { + mcpServers: { + 'stdio-server': { command: '/path/to/server', args: ['arg1'] }, + 'sse-server': { url: 'https://example.com/sse' }, + 'http-server': { httpUrl: 'https://example.com/http' }, + }, + }, + }); + + mockClient.connect.mockResolvedValue(undefined); + mockClient.ping.mockResolvedValue(undefined); + + await listMcpServers(); + + expect(consoleSpy).toHaveBeenCalledWith('Configured MCP servers:\n'); + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'stdio-server: /path/to/server arg1 (stdio) - Connected', + ), + ); + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'sse-server: https://example.com/sse (sse) - Connected', + ), + ); + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'http-server: https://example.com/http (http) - Connected', + ), + ); + }); + + it('should display disconnected status when connection fails', async () => { + mockedLoadSettings.mockReturnValue({ + merged: { + mcpServers: { + 'test-server': { command: '/test/server' }, + }, + }, + }); + + mockClient.connect.mockRejectedValue(new Error('Connection failed')); + + await listMcpServers(); + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'test-server: /test/server (stdio) - Disconnected', + ), + ); + }); + + it('should merge extension servers with config servers', async () => { + mockedLoadSettings.mockReturnValue({ + merged: { + mcpServers: { 'config-server': { command: '/config/server' } }, + }, + }); + + mockedLoadExtensions.mockReturnValue([ + { + config: { + name: 'test-extension', + mcpServers: { 'extension-server': { command: '/ext/server' } }, + }, + }, + ]); + + mockClient.connect.mockResolvedValue(undefined); + mockClient.ping.mockResolvedValue(undefined); + + await listMcpServers(); + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'config-server: /config/server (stdio) - Connected', + ), + ); + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining( + 'extension-server: /ext/server (stdio) - Connected', + ), + ); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/commands/mcp/list.ts b/projects/ui/qwen-code/packages/cli/src/commands/mcp/list.ts new file mode 100644 index 0000000000000000000000000000000000000000..57bed6d81989466aab904af1b98caece7fac8aa8 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/commands/mcp/list.ts @@ -0,0 +1,139 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// File for 'gemini mcp list' command +import type { CommandModule } from 'yargs'; +import { loadSettings } from '../../config/settings.js'; +import { + MCPServerConfig, + MCPServerStatus, + createTransport, +} from '@qwen-code/qwen-code-core'; +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { loadExtensions } from '../../config/extension.js'; + +const COLOR_GREEN = '\u001b[32m'; +const COLOR_YELLOW = '\u001b[33m'; +const COLOR_RED = '\u001b[31m'; +const RESET_COLOR = '\u001b[0m'; + +async function getMcpServersFromConfig(): Promise< + Record +> { + const settings = loadSettings(process.cwd()); + const extensions = loadExtensions(process.cwd()); + const mcpServers = { ...(settings.merged.mcpServers || {}) }; + for (const extension of extensions) { + Object.entries(extension.config.mcpServers || {}).forEach( + ([key, server]) => { + if (mcpServers[key]) { + return; + } + mcpServers[key] = { + ...server, + extensionName: extension.config.name, + }; + }, + ); + } + return mcpServers; +} + +async function testMCPConnection( + serverName: string, + config: MCPServerConfig, +): Promise { + const client = new Client({ + name: 'mcp-test-client', + version: '0.0.1', + }); + + let transport; + try { + // Use the same transport creation logic as core + transport = await createTransport(serverName, config, false); + } catch (_error) { + await client.close(); + return MCPServerStatus.DISCONNECTED; + } + + try { + // Attempt actual MCP connection with short timeout + await client.connect(transport, { timeout: 5000 }); // 5s timeout + + // Test basic MCP protocol by pinging the server + await client.ping(); + + await client.close(); + return MCPServerStatus.CONNECTED; + } catch (_error) { + await transport.close(); + return MCPServerStatus.DISCONNECTED; + } +} + +async function getServerStatus( + serverName: string, + server: MCPServerConfig, +): Promise { + // Test all server types by attempting actual connection + return await testMCPConnection(serverName, server); +} + +export async function listMcpServers(): Promise { + const mcpServers = await getMcpServersFromConfig(); + const serverNames = Object.keys(mcpServers); + + if (serverNames.length === 0) { + console.log('No MCP servers configured.'); + return; + } + + console.log('Configured MCP servers:\n'); + + for (const serverName of serverNames) { + const server = mcpServers[serverName]; + + const status = await getServerStatus(serverName, server); + + let statusIndicator = ''; + let statusText = ''; + switch (status) { + case MCPServerStatus.CONNECTED: + statusIndicator = COLOR_GREEN + '✓' + RESET_COLOR; + statusText = 'Connected'; + break; + case MCPServerStatus.CONNECTING: + statusIndicator = COLOR_YELLOW + '…' + RESET_COLOR; + statusText = 'Connecting'; + break; + case MCPServerStatus.DISCONNECTED: + default: + statusIndicator = COLOR_RED + '✗' + RESET_COLOR; + statusText = 'Disconnected'; + break; + } + + let serverInfo = `${serverName}: `; + if (server.httpUrl) { + serverInfo += `${server.httpUrl} (http)`; + } else if (server.url) { + serverInfo += `${server.url} (sse)`; + } else if (server.command) { + serverInfo += `${server.command} ${server.args?.join(' ') || ''} (stdio)`; + } + + console.log(`${statusIndicator} ${serverInfo} - ${statusText}`); + } +} + +export const listCommand: CommandModule = { + command: 'list', + describe: 'List all configured MCP servers', + handler: async () => { + await listMcpServers(); + }, +}; diff --git a/projects/ui/qwen-code/packages/cli/src/commands/mcp/remove.test.ts b/projects/ui/qwen-code/packages/cli/src/commands/mcp/remove.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..eb7dedce50e1d62661f94baf1f20eab26a0bd00a --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/commands/mcp/remove.test.ts @@ -0,0 +1,69 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach } from 'vitest'; +import yargs from 'yargs'; +import { loadSettings, SettingScope } from '../../config/settings.js'; +import { removeCommand } from './remove.js'; + +vi.mock('fs/promises', () => ({ + readFile: vi.fn(), + writeFile: vi.fn(), +})); + +vi.mock('../../config/settings.js', async () => { + const actual = await vi.importActual('../../config/settings.js'); + return { + ...actual, + loadSettings: vi.fn(), + }; +}); + +const mockedLoadSettings = loadSettings as vi.Mock; + +describe('mcp remove command', () => { + let parser: yargs.Argv; + let mockSetValue: vi.Mock; + let mockSettings: Record; + + beforeEach(() => { + vi.resetAllMocks(); + const yargsInstance = yargs([]).command(removeCommand); + parser = yargsInstance; + mockSetValue = vi.fn(); + mockSettings = { + mcpServers: { + 'test-server': { + command: 'echo "hello"', + }, + }, + }; + mockedLoadSettings.mockReturnValue({ + forScope: () => ({ settings: mockSettings }), + setValue: mockSetValue, + }); + }); + + it('should remove a server from project settings', async () => { + await parser.parseAsync('remove test-server'); + + expect(mockSetValue).toHaveBeenCalledWith( + SettingScope.Workspace, + 'mcpServers', + {}, + ); + }); + + it('should show a message if server not found', async () => { + const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); + await parser.parseAsync('remove non-existent-server'); + + expect(mockSetValue).not.toHaveBeenCalled(); + expect(consoleSpy).toHaveBeenCalledWith( + 'Server "non-existent-server" not found in project settings.', + ); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/commands/mcp/remove.ts b/projects/ui/qwen-code/packages/cli/src/commands/mcp/remove.ts new file mode 100644 index 0000000000000000000000000000000000000000..e05478e37ec7dae31286646c73f1526f333522ab --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/commands/mcp/remove.ts @@ -0,0 +1,60 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// File for 'gemini mcp remove' command +import type { CommandModule } from 'yargs'; +import { loadSettings, SettingScope } from '../../config/settings.js'; + +async function removeMcpServer( + name: string, + options: { + scope: string; + }, +) { + const { scope } = options; + const settingsScope = + scope === 'user' ? SettingScope.User : SettingScope.Workspace; + const settings = loadSettings(process.cwd()); + + const existingSettings = settings.forScope(settingsScope).settings; + const mcpServers = existingSettings.mcpServers || {}; + + if (!mcpServers[name]) { + console.log(`Server "${name}" not found in ${scope} settings.`); + return; + } + + delete mcpServers[name]; + + settings.setValue(settingsScope, 'mcpServers', mcpServers); + + console.log(`Server "${name}" removed from ${scope} settings.`); +} + +export const removeCommand: CommandModule = { + command: 'remove ', + describe: 'Remove a server', + builder: (yargs) => + yargs + .usage('Usage: gemini mcp remove [options] ') + .positional('name', { + describe: 'Name of the server', + type: 'string', + demandOption: true, + }) + .option('scope', { + alias: 's', + describe: 'Configuration scope (user or project)', + type: 'string', + default: 'project', + choices: ['user', 'project'], + }), + handler: async (argv) => { + await removeMcpServer(argv['name'] as string, { + scope: argv['scope'] as string, + }); + }, +}; diff --git a/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..1a4c0c6b75bffc6b13608c1341c1f77a872f8b5d --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts @@ -0,0 +1,41 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { DefaultArgumentProcessor } from './argumentProcessor.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { describe, it, expect } from 'vitest'; + +describe('Argument Processors', () => { + describe('DefaultArgumentProcessor', () => { + const processor = new DefaultArgumentProcessor(); + + it('should append the full command if args are provided', async () => { + const prompt = 'Parse the command.'; + const context = createMockCommandContext({ + invocation: { + raw: '/mycommand arg1 "arg two"', + name: 'mycommand', + args: 'arg1 "arg two"', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toBe('Parse the command.\n\n/mycommand arg1 "arg two"'); + }); + + it('should NOT append the full command if no args are provided', async () => { + const prompt = 'Parse the command.'; + const context = createMockCommandContext({ + invocation: { + raw: '/mycommand', + name: 'mycommand', + args: '', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toBe('Parse the command.'); + }); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/argumentProcessor.ts b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/argumentProcessor.ts new file mode 100644 index 0000000000000000000000000000000000000000..9d5fc36910f38146dca0fd530754059c14d3b2ab --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/argumentProcessor.ts @@ -0,0 +1,23 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { IPromptProcessor } from './types.js'; +import { CommandContext } from '../../ui/commands/types.js'; + +/** + * Appends the user's full command invocation to the prompt if arguments are + * provided, allowing the model to perform its own argument parsing. + * + * This processor is only used if the prompt does NOT contain {{args}}. + */ +export class DefaultArgumentProcessor implements IPromptProcessor { + async process(prompt: string, context: CommandContext): Promise { + if (context.invocation!.args) { + return `${prompt}\n\n${context.invocation!.raw}`; + } + return prompt; + } +} diff --git a/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/shellProcessor.test.ts b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/shellProcessor.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..f557c795a1b8f55fdcf6f099381fa57666549ce0 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/shellProcessor.test.ts @@ -0,0 +1,709 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest'; +import { ConfirmationRequiredError, ShellProcessor } from './shellProcessor.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { CommandContext } from '../../ui/commands/types.js'; +import { ApprovalMode, Config } from '@qwen-code/qwen-code-core'; +import os from 'os'; +import { quote } from 'shell-quote'; + +// Helper function to determine the expected escaped string based on the current OS, +// mirroring the logic in the actual `escapeShellArg` implementation. This makes +// our tests robust and platform-agnostic. +function getExpectedEscapedArgForPlatform(arg: string): string { + if (os.platform() === 'win32') { + const comSpec = (process.env['ComSpec'] || 'cmd.exe').toLowerCase(); + const isPowerShell = + comSpec.endsWith('powershell.exe') || comSpec.endsWith('pwsh.exe'); + + if (isPowerShell) { + return `'${arg.replace(/'/g, "''")}'`; + } else { + return `"${arg.replace(/"/g, '""')}"`; + } + } else { + return quote([arg]); + } +} + +const mockCheckCommandPermissions = vi.hoisted(() => vi.fn()); +const mockShellExecute = vi.hoisted(() => vi.fn()); + +vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + checkCommandPermissions: mockCheckCommandPermissions, + ShellExecutionService: { + execute: mockShellExecute, + }, + }; +}); + +const SUCCESS_RESULT = { + output: 'default shell output', + exitCode: 0, + error: null, + aborted: false, + signal: null, +}; + +describe('ShellProcessor', () => { + let context: CommandContext; + let mockConfig: Partial; + + beforeEach(() => { + vi.clearAllMocks(); + + mockConfig = { + getTargetDir: vi.fn().mockReturnValue('/test/dir'), + getApprovalMode: vi.fn().mockReturnValue(ApprovalMode.DEFAULT), + getShouldUseNodePtyShell: vi.fn().mockReturnValue(false), + }; + + context = createMockCommandContext({ + invocation: { + raw: '/cmd default args', + name: 'cmd', + args: 'default args', + }, + services: { + config: mockConfig as Config, + }, + session: { + sessionShellAllowlist: new Set(), + }, + }); + + mockShellExecute.mockReturnValue({ + result: Promise.resolve(SUCCESS_RESULT), + }); + + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + }); + + it('should throw an error if config is missing', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{ls}'; + const contextWithoutConfig = createMockCommandContext({ + services: { + config: null, + }, + }); + + await expect( + processor.process(prompt, contextWithoutConfig), + ).rejects.toThrow(/Security configuration not loaded/); + }); + + it('should not change the prompt if no shell injections are present', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'This is a simple prompt with no injections.'; + const result = await processor.process(prompt, context); + expect(result).toBe(prompt); + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should process a single valid shell injection if allowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'The current status is: !{git status}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'On branch main' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'git status', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledWith( + 'git status', + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + expect(result).toBe('The current status is: On branch main'); + }); + + it('should process multiple valid shell injections if all are allowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{git status} in !{pwd}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + + mockShellExecute + .mockReturnValueOnce({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + output: 'On branch main', + }), + }) + .mockReturnValueOnce({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: '/usr/home' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledTimes(2); + expect(mockShellExecute).toHaveBeenCalledTimes(2); + expect(result).toBe('On branch main in /usr/home'); + }); + + it('should throw ConfirmationRequiredError if a command is not allowed in default mode', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Do something dangerous: !{rm -rf /}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['rm -rf /'], + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + ConfirmationRequiredError, + ); + }); + + it('should NOT throw ConfirmationRequiredError if a command is not allowed but approval mode is YOLO', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Do something dangerous: !{rm -rf /}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['rm -rf /'], + }); + // Override the approval mode for this test + (mockConfig.getApprovalMode as Mock).mockReturnValue(ApprovalMode.YOLO); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'deleted' }), + }); + + const result = await processor.process(prompt, context); + + // It should proceed with execution + expect(mockShellExecute).toHaveBeenCalledWith( + 'rm -rf /', + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + expect(result).toBe('Do something dangerous: deleted'); + }); + + it('should still throw an error for a hard-denied command even in YOLO mode', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Do something forbidden: !{reboot}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['reboot'], + isHardDenial: true, // This is the key difference + blockReason: 'System commands are blocked', + }); + // Set approval mode to YOLO + (mockConfig.getApprovalMode as Mock).mockReturnValue(ApprovalMode.YOLO); + + await expect(processor.process(prompt, context)).rejects.toThrow( + /Blocked command: "reboot". Reason: System commands are blocked/, + ); + + // Ensure it never tried to execute + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should throw ConfirmationRequiredError with the correct command', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Do something dangerous: !{rm -rf /}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['rm -rf /'], + }); + + try { + await processor.process(prompt, context); + // Fail if it doesn't throw + expect(true).toBe(false); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['rm -rf /']); + } + } + + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should throw ConfirmationRequiredError with multiple commands if multiple are disallowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{cmd1} and !{cmd2}'; + mockCheckCommandPermissions.mockImplementation((cmd) => { + if (cmd === 'cmd1') { + return { allAllowed: false, disallowedCommands: ['cmd1'] }; + } + if (cmd === 'cmd2') { + return { allAllowed: false, disallowedCommands: ['cmd2'] }; + } + return { allAllowed: true, disallowedCommands: [] }; + }); + + try { + await processor.process(prompt, context); + // Fail if it doesn't throw + expect(true).toBe(false); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['cmd1', 'cmd2']); + } + } + }); + + it('should not execute any commands if at least one requires confirmation', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'First: !{echo "hello"}, Second: !{rm -rf /}'; + + mockCheckCommandPermissions.mockImplementation((cmd) => { + if (cmd.includes('rm')) { + return { allAllowed: false, disallowedCommands: [cmd] }; + } + return { allAllowed: true, disallowedCommands: [] }; + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + ConfirmationRequiredError, + ); + + // Ensure no commands were executed because the pipeline was halted. + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should only request confirmation for disallowed commands in a mixed prompt', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Allowed: !{ls -l}, Disallowed: !{rm -rf /}'; + + mockCheckCommandPermissions.mockImplementation((cmd) => ({ + allAllowed: !cmd.includes('rm'), + disallowedCommands: cmd.includes('rm') ? [cmd] : [], + })); + + try { + await processor.process(prompt, context); + expect.fail('Should have thrown ConfirmationRequiredError'); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['rm -rf /']); + } + } + }); + + it('should execute all commands if they are on the session allowlist', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Run !{cmd1} and !{cmd2}'; + + // Add commands to the session allowlist + context.session.sessionShellAllowlist = new Set(['cmd1', 'cmd2']); + + // checkCommandPermissions should now pass for these + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + + mockShellExecute + .mockReturnValueOnce({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'output1' }), + }) + .mockReturnValueOnce({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'output2' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'cmd1', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'cmd2', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledTimes(2); + expect(result).toBe('Run output1 and output2'); + }); + + it('should trim whitespace from the command inside the injection before interpolation', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Files: !{ ls {{args}} -l }'; + + const rawArgs = context.invocation!.args; + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + + const expectedCommand = `ls ${expectedEscapedArgs} -l`; + + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'total 0' }), + }); + + await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + expectedCommand, + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + }); + + it('should handle an empty command inside the injection gracefully (skips execution)', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'This is weird: !{}'; + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).not.toHaveBeenCalled(); + expect(mockShellExecute).not.toHaveBeenCalled(); + + // It replaces !{} with an empty string. + expect(result).toBe('This is weird: '); + }); + + describe('Robust Parsing (Balanced Braces)', () => { + it('should correctly parse commands containing nested braces (e.g., awk)', async () => { + const processor = new ShellProcessor('test-command'); + const command = "awk '{print $1}' file.txt"; + const prompt = `Output: !{${command}}`; + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'result' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + command, + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledWith( + command, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + expect(result).toBe('Output: result'); + }); + + it('should handle deeply nested braces correctly', async () => { + const processor = new ShellProcessor('test-command'); + const command = "echo '{{a},{b}}'"; + const prompt = `!{${command}}`; + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: '{{a},{b}}' }), + }); + + const result = await processor.process(prompt, context); + expect(mockShellExecute).toHaveBeenCalledWith( + command, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + expect(result).toBe('{{a},{b}}'); + }); + + it('should throw an error for unclosed shell injections', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'This prompt is broken: !{ls -l'; + + await expect(processor.process(prompt, context)).rejects.toThrow( + /Unclosed shell injection/, + ); + }); + + it('should throw an error for unclosed nested braces', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Broken: !{echo {a}'; + + await expect(processor.process(prompt, context)).rejects.toThrow( + /Unclosed shell injection/, + ); + }); + }); + + describe('Error Reporting', () => { + it('should append exit code and command name on failure', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{cmd}'; + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + output: 'some error output', + stderr: '', + exitCode: 1, + }), + }); + + const result = await processor.process(prompt, context); + + expect(result).toBe( + "some error output\n[Shell command 'cmd' exited with code 1]", + ); + }); + + it('should append signal info and command name if terminated by signal', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{cmd}'; + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + output: 'output', + stderr: '', + exitCode: null, + signal: 'SIGTERM', + }), + }); + + const result = await processor.process(prompt, context); + + expect(result).toBe( + "output\n[Shell command 'cmd' terminated by signal SIGTERM]", + ); + }); + + it('should throw a detailed error if the shell fails to spawn', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{bad-command}'; + const spawnError = new Error('spawn EACCES'); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + stdout: '', + stderr: '', + exitCode: null, + error: spawnError, + aborted: false, + }), + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + "Failed to start shell command in 'test-command': spawn EACCES. Command: bad-command", + ); + }); + + it('should report abort status with command name if aborted', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{long-running-command}'; + const spawnError = new Error('Aborted'); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + ...SUCCESS_RESULT, + output: 'partial output', + stderr: '', + exitCode: null, + error: spawnError, + aborted: true, // Key difference + }), + }); + + const result = await processor.process(prompt, context); + expect(result).toBe( + "partial output\n[Shell command 'long-running-command' aborted]", + ); + }); + }); + + describe('Context-Aware Argument Interpolation ({{args}})', () => { + const rawArgs = 'user input'; + + beforeEach(() => { + // Update context for these tests to use specific arguments + context.invocation!.args = rawArgs; + }); + + it('should perform raw replacement if no shell injections are present (optimization path)', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'The user said: {{args}}'; + + const result = await processor.process(prompt, context); + + expect(result).toBe(`The user said: ${rawArgs}`); + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should perform raw replacement outside !{} blocks', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Outside: {{args}}. Inside: !{echo "hello"}'; + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'hello' }), + }); + + const result = await processor.process(prompt, context); + + expect(result).toBe(`Outside: ${rawArgs}. Inside: hello`); + }); + + it('should perform escaped replacement inside !{} blocks', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Command: !{grep {{args}} file.txt}'; + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'match found' }), + }); + + const result = await processor.process(prompt, context); + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + const expectedCommand = `grep ${expectedEscapedArgs} file.txt`; + + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + + expect(result).toBe('Command: match found'); + }); + + it('should handle both raw (outside) and escaped (inside) injection simultaneously', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'User "({{args}})" requested search: !{search {{args}}}'; + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ ...SUCCESS_RESULT, output: 'results' }), + }); + + const result = await processor.process(prompt, context); + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + const expectedCommand = `search ${expectedEscapedArgs}`; + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + + expect(result).toBe(`User "(${rawArgs})" requested search: results`); + }); + + it('should perform security checks on the final, resolved (escaped) command', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{rm {{args}}}'; + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + const expectedResolvedCommand = `rm ${expectedEscapedArgs}`; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: [expectedResolvedCommand], + isHardDenial: false, + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + ConfirmationRequiredError, + ); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + expectedResolvedCommand, + expect.any(Object), + context.session.sessionShellAllowlist, + ); + }); + + it('should report the resolved command if a hard denial occurs', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{rm {{args}}}'; + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(rawArgs); + const expectedResolvedCommand = `rm ${expectedEscapedArgs}`; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: [expectedResolvedCommand], + isHardDenial: true, + blockReason: 'It is forbidden.', + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + `Blocked command: "${expectedResolvedCommand}". Reason: It is forbidden.`, + ); + }); + }); + describe('Real-World Escaping Scenarios', () => { + it('should correctly handle multiline arguments', async () => { + const processor = new ShellProcessor('test-command'); + const multilineArgs = 'first line\nsecond line'; + context.invocation!.args = multilineArgs; + const prompt = 'Commit message: !{git commit -m {{args}}}'; + + const expectedEscapedArgs = + getExpectedEscapedArgForPlatform(multilineArgs); + const expectedCommand = `git commit -m ${expectedEscapedArgs}`; + + await processor.process(prompt, context); + + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + }); + + it.each([ + { name: 'spaces', input: 'file with spaces.txt' }, + { name: 'double quotes', input: 'a "quoted" string' }, + { name: 'single quotes', input: "it's a string" }, + { name: 'command substitution (backticks)', input: '`reboot`' }, + { name: 'command substitution (dollar)', input: '$(reboot)' }, + { name: 'variable expansion', input: '$HOME' }, + { name: 'command chaining (semicolon)', input: 'a; reboot' }, + { name: 'command chaining (ampersand)', input: 'a && reboot' }, + ])('should safely escape args containing $name', async ({ input }) => { + const processor = new ShellProcessor('test-command'); + context.invocation!.args = input; + const prompt = '!{echo {{args}}}'; + + const expectedEscapedArgs = getExpectedEscapedArgForPlatform(input); + const expectedCommand = `echo ${expectedEscapedArgs}`; + + await processor.process(prompt, context); + + expect(mockShellExecute).toHaveBeenCalledWith( + expectedCommand, + expect.any(String), + expect.any(Function), + expect.any(Object), + false, + ); + }); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/shellProcessor.ts b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/shellProcessor.ts new file mode 100644 index 0000000000000000000000000000000000000000..039fd2849484b7c55fe140a05259d895b83da3b5 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/shellProcessor.ts @@ -0,0 +1,248 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + ApprovalMode, + checkCommandPermissions, + escapeShellArg, + getShellConfiguration, + ShellExecutionService, +} from '@qwen-code/qwen-code-core'; + +import { CommandContext } from '../../ui/commands/types.js'; +import { + IPromptProcessor, + SHELL_INJECTION_TRIGGER, + SHORTHAND_ARGS_PLACEHOLDER, +} from './types.js'; + +export class ConfirmationRequiredError extends Error { + constructor( + message: string, + public commandsToConfirm: string[], + ) { + super(message); + this.name = 'ConfirmationRequiredError'; + } +} + +/** + * Represents a single detected shell injection site in the prompt. + */ +interface ShellInjection { + /** The shell command extracted from within !{...}, trimmed. */ + command: string; + /** The starting index of the injection (inclusive, points to '!'). */ + startIndex: number; + /** The ending index of the injection (exclusive, points after '}'). */ + endIndex: number; + /** The command after {{args}} has been escaped and substituted. */ + resolvedCommand?: string; +} + +/** + * Handles prompt interpolation, including shell command execution (`!{...}`) + * and context-aware argument injection (`{{args}}`). + * + * This processor ensures that: + * 1. `{{args}}` outside `!{...}` are replaced with raw input. + * 2. `{{args}}` inside `!{...}` are replaced with shell-escaped input. + * 3. Shell commands are executed securely after argument substitution. + * 4. Parsing correctly handles nested braces. + */ +export class ShellProcessor implements IPromptProcessor { + constructor(private readonly commandName: string) {} + + async process(prompt: string, context: CommandContext): Promise { + const userArgsRaw = context.invocation?.args || ''; + + if (!prompt.includes(SHELL_INJECTION_TRIGGER)) { + return prompt.replaceAll(SHORTHAND_ARGS_PLACEHOLDER, userArgsRaw); + } + + const config = context.services.config; + if (!config) { + throw new Error( + `Security configuration not loaded. Cannot verify shell command permissions for '${this.commandName}'. Aborting.`, + ); + } + const { sessionShellAllowlist } = context.session; + + const injections = this.extractInjections(prompt); + // If extractInjections found no closed blocks (and didn't throw), treat as raw. + if (injections.length === 0) { + return prompt.replaceAll(SHORTHAND_ARGS_PLACEHOLDER, userArgsRaw); + } + + const { shell } = getShellConfiguration(); + const userArgsEscaped = escapeShellArg(userArgsRaw, shell); + + const resolvedInjections = injections.map((injection) => { + if (injection.command === '') { + return injection; + } + // Replace {{args}} inside the command string with the escaped version. + const resolvedCommand = injection.command.replaceAll( + SHORTHAND_ARGS_PLACEHOLDER, + userArgsEscaped, + ); + return { ...injection, resolvedCommand }; + }); + + const commandsToConfirm = new Set(); + for (const injection of resolvedInjections) { + const command = injection.resolvedCommand; + + if (!command) continue; + + // Security check on the final, escaped command string. + const { allAllowed, disallowedCommands, blockReason, isHardDenial } = + checkCommandPermissions(command, config, sessionShellAllowlist); + + if (!allAllowed) { + if (isHardDenial) { + throw new Error( + `${this.commandName} cannot be run. Blocked command: "${command}". Reason: ${blockReason || 'Blocked by configuration.'}`, + ); + } + + // If not a hard denial, respect YOLO mode and auto-approve. + if (config.getApprovalMode() !== ApprovalMode.YOLO) { + disallowedCommands.forEach((uc) => commandsToConfirm.add(uc)); + } + } + } + + // Handle confirmation requirements. + if (commandsToConfirm.size > 0) { + throw new ConfirmationRequiredError( + 'Shell command confirmation required', + Array.from(commandsToConfirm), + ); + } + + let processedPrompt = ''; + let lastIndex = 0; + + for (const injection of resolvedInjections) { + // Append the text segment BEFORE the injection, substituting {{args}} with RAW input. + const segment = prompt.substring(lastIndex, injection.startIndex); + processedPrompt += segment.replaceAll( + SHORTHAND_ARGS_PLACEHOLDER, + userArgsRaw, + ); + + // Execute the resolved command (which already has ESCAPED input). + if (injection.resolvedCommand) { + const { result } = await ShellExecutionService.execute( + injection.resolvedCommand, + config.getTargetDir(), + () => {}, + new AbortController().signal, + config.getShouldUseNodePtyShell(), + ); + + const executionResult = await result; + + // Handle Spawn Errors + if (executionResult.error && !executionResult.aborted) { + throw new Error( + `Failed to start shell command in '${this.commandName}': ${executionResult.error.message}. Command: ${injection.resolvedCommand}`, + ); + } + + // Append the output, making stderr explicit for the model. + processedPrompt += executionResult.output; + + // Append a status message if the command did not succeed. + if (executionResult.aborted) { + processedPrompt += `\n[Shell command '${injection.resolvedCommand}' aborted]`; + } else if ( + executionResult.exitCode !== 0 && + executionResult.exitCode !== null + ) { + processedPrompt += `\n[Shell command '${injection.resolvedCommand}' exited with code ${executionResult.exitCode}]`; + } else if (executionResult.signal !== null) { + processedPrompt += `\n[Shell command '${injection.resolvedCommand}' terminated by signal ${executionResult.signal}]`; + } + } + + lastIndex = injection.endIndex; + } + + // Append the remaining text AFTER the last injection, substituting {{args}} with RAW input. + const finalSegment = prompt.substring(lastIndex); + processedPrompt += finalSegment.replaceAll( + SHORTHAND_ARGS_PLACEHOLDER, + userArgsRaw, + ); + + return processedPrompt; + } + + /** + * Iteratively parses the prompt string to extract shell injections (!{...}), + * correctly handling nested braces within the command. + * + * @param prompt The prompt string to parse. + * @returns An array of extracted ShellInjection objects. + * @throws Error if an unclosed injection (`!{`) is found. + */ + private extractInjections(prompt: string): ShellInjection[] { + const injections: ShellInjection[] = []; + let index = 0; + + while (index < prompt.length) { + const startIndex = prompt.indexOf(SHELL_INJECTION_TRIGGER, index); + + if (startIndex === -1) { + break; + } + + let currentIndex = startIndex + SHELL_INJECTION_TRIGGER.length; + let braceCount = 1; + let foundEnd = false; + + while (currentIndex < prompt.length) { + const char = prompt[currentIndex]; + + // We count literal braces. This parser does not interpret shell quoting/escaping. + if (char === '{') { + braceCount++; + } else if (char === '}') { + braceCount--; + if (braceCount === 0) { + const commandContent = prompt.substring( + startIndex + SHELL_INJECTION_TRIGGER.length, + currentIndex, + ); + const endIndex = currentIndex + 1; + + injections.push({ + command: commandContent.trim(), + startIndex, + endIndex, + }); + + index = endIndex; + foundEnd = true; + break; + } + } + currentIndex++; + } + + // Check if the inner loop finished without finding the closing brace. + if (!foundEnd) { + throw new Error( + `Invalid syntax in command '${this.commandName}': Unclosed shell injection starting at index ${startIndex} ('!{'). Ensure braces are balanced.`, + ); + } + } + + return injections; + } +} diff --git a/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/types.ts b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..956bb432c459c95b8dddeb47d3e96cea78555781 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/services/prompt-processors/types.ts @@ -0,0 +1,44 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { CommandContext } from '../../ui/commands/types.js'; + +/** + * Defines the interface for a prompt processor, a module that can transform + * a prompt string before it is sent to the model. Processors are chained + * together to create a processing pipeline. + */ +export interface IPromptProcessor { + /** + * Processes a prompt string, applying a specific transformation as part of a pipeline. + * + * Each processor in a command's pipeline receives the output of the previous + * processor. This method provides the full command context, allowing for + * complex transformations that may require access to invocation details, + * application services, or UI state. + * + * @param prompt The current state of the prompt string. This may have been + * modified by previous processors in the pipeline. + * @param context The full command context, providing access to invocation + * details (like `context.invocation.raw` and `context.invocation.args`), + * application services, and UI handlers. + * @returns A promise that resolves to the transformed prompt string, which + * will be passed to the next processor or, if it's the last one, sent to the model. + */ + process(prompt: string, context: CommandContext): Promise; +} + +/** + * The placeholder string for shorthand argument injection in custom commands. + * When used outside of !{...}, arguments are injected raw. + * When used inside !{...}, arguments are shell-escaped. + */ +export const SHORTHAND_ARGS_PLACEHOLDER = '{{args}}'; + +/** + * The trigger string for shell command injection in custom commands. + */ +export const SHELL_INJECTION_TRIGGER = '!{'; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/__snapshots__/App.test.tsx.snap b/projects/ui/qwen-code/packages/cli/src/ui/__snapshots__/App.test.tsx.snap new file mode 100644 index 0000000000000000000000000000000000000000..f5425dba5859b99a55f3d552b7f3a6ff9ccf37d6 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/__snapshots__/App.test.tsx.snap @@ -0,0 +1,31 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`App UI > should render correctly with the prompt input box 1`] = ` +" + +╭────────────────────────────────────────────────────────────────────────────────────────╮ +│ > Type your message or @path/to/file │ +╰────────────────────────────────────────────────────────────────────────────────────────╯ +/test/dir no sandbox (see /docs) model (100% context left)" +`; + +exports[`App UI > should render the initial UI correctly 1`] = ` +" I'm Feeling Lucky (esc to cancel, 0s) + + +/test/dir no sandbox (see /docs) model (100% context left)" +`; + +exports[`App UI > when in a narrow terminal > should render with a column layout 1`] = ` +" + + +╭────────────────────────────────────────────────────────────────────────────────────────╮ +│ > Type your message or @path/to/file │ +╰────────────────────────────────────────────────────────────────────────────────────────╯ +dir + +no sandbox (see /docs) + +model (100% context left)| ✖ 5 errors (ctrl+o for details)" +`; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/aboutCommand.test.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/aboutCommand.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..53510e757f85620fe2bad24dd29577b155289df8 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/aboutCommand.test.ts @@ -0,0 +1,153 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; +import { aboutCommand } from './aboutCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import * as versionUtils from '../../utils/version.js'; +import { MessageType } from '../types.js'; + +import { IdeClient } from '../../../../core/src/ide/ide-client.js'; + +vi.mock('../../utils/version.js', () => ({ + getCliVersion: vi.fn(), +})); + +describe('aboutCommand', () => { + let mockContext: CommandContext; + const originalPlatform = process.platform; + const originalEnv = { ...process.env }; + + beforeEach(() => { + mockContext = createMockCommandContext({ + services: { + config: { + getModel: vi.fn(), + getIdeClient: vi.fn(), + getIdeMode: vi.fn().mockReturnValue(true), + }, + settings: { + merged: { + selectedAuthType: 'test-auth', + }, + }, + }, + ui: { + addItem: vi.fn(), + }, + } as unknown as CommandContext); + + vi.mocked(versionUtils.getCliVersion).mockResolvedValue('test-version'); + vi.spyOn(mockContext.services.config!, 'getModel').mockReturnValue( + 'test-model', + ); + process.env['GOOGLE_CLOUD_PROJECT'] = 'test-gcp-project'; + Object.defineProperty(process, 'platform', { + value: 'test-os', + }); + vi.spyOn(mockContext.services.config!, 'getIdeClient').mockReturnValue({ + getDetectedIdeDisplayName: vi.fn().mockReturnValue('test-ide'), + } as Partial as IdeClient); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + Object.defineProperty(process, 'platform', { + value: originalPlatform, + }); + process.env = originalEnv; + vi.clearAllMocks(); + }); + + it('should have the correct name and description', () => { + expect(aboutCommand.name).toBe('about'); + expect(aboutCommand.description).toBe('show version info'); + }); + + it('should call addItem with all version info', async () => { + process.env['SANDBOX'] = ''; + if (!aboutCommand.action) { + throw new Error('The about command must have an action.'); + } + + await aboutCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.ABOUT, + cliVersion: 'test-version', + osVersion: 'test-os', + sandboxEnv: 'no sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + gcpProject: 'test-gcp-project', + ideClient: 'test-ide', + }, + expect.any(Number), + ); + }); + + it('should show the correct sandbox environment variable', async () => { + process.env['SANDBOX'] = 'gemini-sandbox'; + if (!aboutCommand.action) { + throw new Error('The about command must have an action.'); + } + + await aboutCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + sandboxEnv: 'gemini-sandbox', + }), + expect.any(Number), + ); + }); + + it('should show sandbox-exec profile when applicable', async () => { + process.env['SANDBOX'] = 'sandbox-exec'; + process.env['SEATBELT_PROFILE'] = 'test-profile'; + if (!aboutCommand.action) { + throw new Error('The about command must have an action.'); + } + + await aboutCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + sandboxEnv: 'sandbox-exec (test-profile)', + }), + expect.any(Number), + ); + }); + + it('should not show ide client when it is not detected', async () => { + vi.spyOn(mockContext.services.config!, 'getIdeClient').mockReturnValue({ + getDetectedIdeDisplayName: vi.fn().mockReturnValue(undefined), + } as Partial as IdeClient); + + process.env['SANDBOX'] = ''; + if (!aboutCommand.action) { + throw new Error('The about command must have an action.'); + } + + await aboutCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.ABOUT, + cliVersion: 'test-version', + osVersion: 'test-os', + sandboxEnv: 'no sandbox', + modelVersion: 'test-model', + selectedAuthType: 'test-auth', + gcpProject: 'test-gcp-project', + ideClient: '', + }), + expect.any(Number), + ); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/aboutCommand.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/aboutCommand.ts new file mode 100644 index 0000000000000000000000000000000000000000..44bf00ddfe2ffe2fa1c18c481aa7d0dec26c9191 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/aboutCommand.ts @@ -0,0 +1,49 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { getCliVersion } from '../../utils/version.js'; +import { CommandKind, SlashCommand } from './types.js'; +import process from 'node:process'; +import { MessageType, type HistoryItemAbout } from '../types.js'; + +export const aboutCommand: SlashCommand = { + name: 'about', + description: 'show version info', + kind: CommandKind.BUILT_IN, + action: async (context) => { + const osVersion = process.platform; + let sandboxEnv = 'no sandbox'; + if (process.env['SANDBOX'] && process.env['SANDBOX'] !== 'sandbox-exec') { + sandboxEnv = process.env['SANDBOX']; + } else if (process.env['SANDBOX'] === 'sandbox-exec') { + sandboxEnv = `sandbox-exec (${ + process.env['SEATBELT_PROFILE'] || 'unknown' + })`; + } + const modelVersion = context.services.config?.getModel() || 'Unknown'; + const cliVersion = await getCliVersion(); + const selectedAuthType = + context.services.settings.merged.selectedAuthType || ''; + const gcpProject = process.env['GOOGLE_CLOUD_PROJECT'] || ''; + const ideClient = + (context.services.config?.getIdeMode() && + context.services.config?.getIdeClient()?.getDetectedIdeDisplayName()) || + ''; + + const aboutItem: Omit = { + type: MessageType.ABOUT, + cliVersion, + osVersion, + sandboxEnv, + modelVersion, + selectedAuthType, + gcpProject, + ideClient, + }; + + context.ui.addItem(aboutItem, Date.now()); + }, +}; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/authCommand.test.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/authCommand.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..d6d925dbd3d3901ee03c6296f802387ed6fbdb8e --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/authCommand.test.ts @@ -0,0 +1,36 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach } from 'vitest'; +import { authCommand } from './authCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; + +describe('authCommand', () => { + let mockContext: CommandContext; + + beforeEach(() => { + mockContext = createMockCommandContext(); + }); + + it('should return a dialog action to open the auth dialog', () => { + if (!authCommand.action) { + throw new Error('The auth command must have an action.'); + } + + const result = authCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'dialog', + dialog: 'auth', + }); + }); + + it('should have the correct name and description', () => { + expect(authCommand.name).toBe('auth'); + expect(authCommand.description).toBe('change the auth method'); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/authCommand.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/authCommand.ts new file mode 100644 index 0000000000000000000000000000000000000000..8e78cf862d4a9f7c7d6d1949bfd0e51e87116da7 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/authCommand.ts @@ -0,0 +1,17 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { CommandKind, OpenDialogActionReturn, SlashCommand } from './types.js'; + +export const authCommand: SlashCommand = { + name: 'auth', + description: 'change the auth method', + kind: CommandKind.BUILT_IN, + action: (_context, _args): OpenDialogActionReturn => ({ + type: 'dialog', + dialog: 'auth', + }), +}; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/bugCommand.test.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/bugCommand.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..4c2f53b65faebee50cb6a8ec2527c83e714b23a4 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/bugCommand.test.ts @@ -0,0 +1,114 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import open from 'open'; +import { bugCommand } from './bugCommand.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { getCliVersion } from '../../utils/version.js'; +import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; +import { formatMemoryUsage } from '../utils/formatters.js'; + +// Mock dependencies +vi.mock('open'); +vi.mock('../../utils/version.js'); +vi.mock('../utils/formatters.js'); +vi.mock('@qwen-code/qwen-code-core'); +vi.mock('node:process', () => ({ + default: { + platform: 'test-platform', + version: 'v20.0.0', + // Keep other necessary process properties if needed by other parts of the code + env: process.env, + memoryUsage: () => ({ rss: 0 }), + }, +})); + +describe('bugCommand', () => { + beforeEach(() => { + vi.mocked(getCliVersion).mockResolvedValue('0.1.0'); + vi.mocked(formatMemoryUsage).mockReturnValue('100 MB'); + vi.mock('@qwen-code/qwen-code-core', () => ({ + sessionId: 'test-session-id', + })); + vi.stubEnv('SANDBOX', 'qwen-test'); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + vi.clearAllMocks(); + }); + + it('should generate the default GitHub issue URL', async () => { + const mockContext = createMockCommandContext({ + services: { + config: { + getModel: () => 'qwen3-coder-plus', + getBugCommand: () => undefined, + getIdeClient: () => ({ + getDetectedIdeDisplayName: () => 'VSCode', + }), + getIdeMode: () => true, + }, + }, + }); + + if (!bugCommand.action) throw new Error('Action is not defined'); + await bugCommand.action(mockContext, 'A test bug'); + + const expectedInfo = ` +* **CLI Version:** 0.1.0 +* **Git Commit:** ${GIT_COMMIT_INFO} +* **Session ID:** test-session-id +* **Operating System:** test-platform v20.0.0 +* **Sandbox Environment:** test +* **Model Version:** qwen3-coder-plus +* **Memory Usage:** 100 MB +* **IDE Client:** VSCode +`; + const expectedUrl = + 'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title=A%20test%20bug&info=' + + encodeURIComponent(expectedInfo); + + expect(open).toHaveBeenCalledWith(expectedUrl); + }); + + it('should use a custom URL template from config if provided', async () => { + const customTemplate = + 'https://internal.bug-tracker.com/new?desc={title}&details={info}'; + const mockContext = createMockCommandContext({ + services: { + config: { + getModel: () => 'qwen3-coder-plus', + getBugCommand: () => ({ urlTemplate: customTemplate }), + getIdeClient: () => ({ + getDetectedIdeDisplayName: () => 'VSCode', + }), + getIdeMode: () => true, + }, + }, + }); + + if (!bugCommand.action) throw new Error('Action is not defined'); + await bugCommand.action(mockContext, 'A custom bug'); + + const expectedInfo = ` +* **CLI Version:** 0.1.0 +* **Git Commit:** ${GIT_COMMIT_INFO} +* **Session ID:** test-session-id +* **Operating System:** test-platform v20.0.0 +* **Sandbox Environment:** test +* **Model Version:** qwen3-coder-plus +* **Memory Usage:** 100 MB +* **IDE Client:** VSCode +`; + const expectedUrl = customTemplate + .replace('{title}', encodeURIComponent('A custom bug')) + .replace('{info}', encodeURIComponent(expectedInfo)); + + expect(open).toHaveBeenCalledWith(expectedUrl); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/bugCommand.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/bugCommand.ts new file mode 100644 index 0000000000000000000000000000000000000000..aad021629a20e07f75df5ef3599ea15a246882c1 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/bugCommand.ts @@ -0,0 +1,92 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import open from 'open'; +import process from 'node:process'; +import { + type CommandContext, + type SlashCommand, + CommandKind, +} from './types.js'; +import { MessageType } from '../types.js'; +import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; +import { formatMemoryUsage } from '../utils/formatters.js'; +import { getCliVersion } from '../../utils/version.js'; +import { sessionId } from '@qwen-code/qwen-code-core'; + +export const bugCommand: SlashCommand = { + name: 'bug', + description: 'submit a bug report', + kind: CommandKind.BUILT_IN, + action: async (context: CommandContext, args?: string): Promise => { + const bugDescription = (args || '').trim(); + const { config } = context.services; + + const osVersion = `${process.platform} ${process.version}`; + let sandboxEnv = 'no sandbox'; + if (process.env['SANDBOX'] && process.env['SANDBOX'] !== 'sandbox-exec') { + sandboxEnv = process.env['SANDBOX'].replace(/^qwen-(?:code-)?/, ''); + } else if (process.env['SANDBOX'] === 'sandbox-exec') { + sandboxEnv = `sandbox-exec (${ + process.env['SEATBELT_PROFILE'] || 'unknown' + })`; + } + const modelVersion = config?.getModel() || 'Unknown'; + const cliVersion = await getCliVersion(); + const memoryUsage = formatMemoryUsage(process.memoryUsage().rss); + const ideClient = + (context.services.config?.getIdeMode() && + context.services.config?.getIdeClient()?.getDetectedIdeDisplayName()) || + ''; + + let info = ` +* **CLI Version:** ${cliVersion} +* **Git Commit:** ${GIT_COMMIT_INFO} +* **Session ID:** ${sessionId} +* **Operating System:** ${osVersion} +* **Sandbox Environment:** ${sandboxEnv} +* **Model Version:** ${modelVersion} +* **Memory Usage:** ${memoryUsage} +`; + if (ideClient) { + info += `* **IDE Client:** ${ideClient}\n`; + } + + let bugReportUrl = + 'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title={title}&info={info}'; + + const bugCommandSettings = config?.getBugCommand(); + if (bugCommandSettings?.urlTemplate) { + bugReportUrl = bugCommandSettings.urlTemplate; + } + + bugReportUrl = bugReportUrl + .replace('{title}', encodeURIComponent(bugDescription)) + .replace('{info}', encodeURIComponent(info)); + + context.ui.addItem( + { + type: MessageType.INFO, + text: `To submit your bug report, please open the following URL in your browser:\n${bugReportUrl}`, + }, + Date.now(), + ); + + try { + await open(bugReportUrl); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + context.ui.addItem( + { + type: MessageType.ERROR, + text: `Could not open URL in browser: ${errorMessage}`, + }, + Date.now(), + ); + } + }, +}; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/chatCommand.test.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/chatCommand.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..1efb0711b9363b9ab0d476a133b2b6ad9a5c4ec9 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/chatCommand.test.ts @@ -0,0 +1,414 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + vi, + describe, + it, + expect, + beforeEach, + afterEach, + Mocked, +} from 'vitest'; + +import { + type CommandContext, + MessageActionReturn, + SlashCommand, +} from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { Content } from '@google/genai'; +import { GeminiClient } from '@qwen-code/qwen-code-core'; + +import * as fsPromises from 'fs/promises'; +import { chatCommand } from './chatCommand.js'; +import { Stats } from 'fs'; +import { HistoryItemWithoutId } from '../types.js'; + +vi.mock('fs/promises', () => ({ + stat: vi.fn(), + readdir: vi.fn().mockResolvedValue(['file1.txt', 'file2.txt'] as string[]), +})); + +describe('chatCommand', () => { + const mockFs = fsPromises as Mocked; + + let mockContext: CommandContext; + let mockGetChat: ReturnType; + let mockSaveCheckpoint: ReturnType; + let mockLoadCheckpoint: ReturnType; + let mockDeleteCheckpoint: ReturnType; + let mockGetHistory: ReturnType; + + const getSubCommand = ( + name: 'list' | 'save' | 'resume' | 'delete', + ): SlashCommand => { + const subCommand = chatCommand.subCommands?.find( + (cmd) => cmd.name === name, + ); + if (!subCommand) { + throw new Error(`/chat ${name} command not found.`); + } + return subCommand; + }; + + beforeEach(() => { + mockGetHistory = vi.fn().mockReturnValue([]); + mockGetChat = vi.fn().mockResolvedValue({ + getHistory: mockGetHistory, + }); + mockSaveCheckpoint = vi.fn().mockResolvedValue(undefined); + mockLoadCheckpoint = vi.fn().mockResolvedValue([]); + mockDeleteCheckpoint = vi.fn().mockResolvedValue(true); + + mockContext = createMockCommandContext({ + services: { + config: { + getProjectTempDir: () => '/tmp/gemini', + getGeminiClient: () => + ({ + getChat: mockGetChat, + }) as unknown as GeminiClient, + }, + logger: { + saveCheckpoint: mockSaveCheckpoint, + loadCheckpoint: mockLoadCheckpoint, + deleteCheckpoint: mockDeleteCheckpoint, + initialize: vi.fn().mockResolvedValue(undefined), + }, + }, + }); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should have the correct main command definition', () => { + expect(chatCommand.name).toBe('chat'); + expect(chatCommand.description).toBe('Manage conversation history.'); + expect(chatCommand.subCommands).toHaveLength(4); + }); + + describe('list subcommand', () => { + let listCommand: SlashCommand; + + beforeEach(() => { + listCommand = getSubCommand('list'); + }); + + it('should inform when no checkpoints are found', async () => { + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + [] as string[]) as unknown as typeof fsPromises.readdir, + ); + const result = await listCommand?.action?.(mockContext, ''); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No saved conversation checkpoints found.', + }); + }); + + it('should list found checkpoints', async () => { + const fakeFiles = ['checkpoint-test1.json', 'checkpoint-test2.json']; + const date = new Date(); + + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + fakeFiles as string[]) as unknown as typeof fsPromises.readdir, + ); + mockFs.stat.mockImplementation((async (path: string): Promise => { + if (path.endsWith('test1.json')) { + return { mtime: date } as Stats; + } + return { mtime: new Date(date.getTime() + 1000) } as Stats; + }) as unknown as typeof fsPromises.stat); + + const result = (await listCommand?.action?.( + mockContext, + '', + )) as MessageActionReturn; + + const content = result?.content ?? ''; + expect(result?.type).toBe('message'); + expect(content).toContain('List of saved conversations:'); + const isoDate = date + .toISOString() + .match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/); + const formattedDate = isoDate ? `${isoDate[1]} ${isoDate[2]}` : ''; + expect(content).toContain(formattedDate); + const index1 = content.indexOf('- \u001b[36mtest1\u001b[0m'); + const index2 = content.indexOf('- \u001b[36mtest2\u001b[0m'); + expect(index1).toBeGreaterThanOrEqual(0); + expect(index2).toBeGreaterThan(index1); + }); + + it('should handle invalid date formats gracefully', async () => { + const fakeFiles = ['checkpoint-baddate.json']; + const badDate = { + toISOString: () => 'an-invalid-date-string', + } as Date; + + mockFs.readdir.mockResolvedValue(fakeFiles); + mockFs.stat.mockResolvedValue({ mtime: badDate } as Stats); + + const result = (await listCommand?.action?.( + mockContext, + '', + )) as MessageActionReturn; + + const content = result?.content ?? ''; + expect(content).toContain('(saved on Invalid Date)'); + }); + }); + describe('save subcommand', () => { + let saveCommand: SlashCommand; + const tag = 'my-tag'; + let mockCheckpointExists: ReturnType; + + beforeEach(() => { + saveCommand = getSubCommand('save'); + mockCheckpointExists = vi.fn().mockResolvedValue(false); + mockContext.services.logger.checkpointExists = mockCheckpointExists; + }); + + it('should return an error if tag is missing', async () => { + const result = await saveCommand?.action?.(mockContext, ' '); + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat save ', + }); + }); + + it('should inform if conversation history is empty or only contains system context', async () => { + mockGetHistory.mockReturnValue([]); + let result = await saveCommand?.action?.(mockContext, tag); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No conversation found to save.', + }); + + mockGetHistory.mockReturnValue([ + { role: 'user', parts: [{ text: 'context for our chat' }] }, + { role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] }, + ]); + result = await saveCommand?.action?.(mockContext, tag); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No conversation found to save.', + }); + + mockGetHistory.mockReturnValue([ + { role: 'user', parts: [{ text: 'context for our chat' }] }, + { role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] }, + { role: 'user', parts: [{ text: 'Hello, how are you?' }] }, + ]); + result = await saveCommand?.action?.(mockContext, tag); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: `Conversation checkpoint saved with tag: ${tag}.`, + }); + }); + + it('should return confirm_action if checkpoint already exists', async () => { + mockCheckpointExists.mockResolvedValue(true); + mockContext.invocation = { + raw: `/chat save ${tag}`, + name: 'save', + args: tag, + }; + + const result = await saveCommand?.action?.(mockContext, tag); + + expect(mockCheckpointExists).toHaveBeenCalledWith(tag); + expect(mockSaveCheckpoint).not.toHaveBeenCalled(); + expect(result).toMatchObject({ + type: 'confirm_action', + originalInvocation: { raw: `/chat save ${tag}` }, + }); + // Check that prompt is a React element + expect(result).toHaveProperty('prompt'); + }); + + it('should save the conversation if overwrite is confirmed', async () => { + const history: Content[] = [ + { role: 'user', parts: [{ text: 'context for our chat' }] }, + { role: 'model', parts: [{ text: 'Got it. Thanks for the context!' }] }, + { role: 'user', parts: [{ text: 'hello' }] }, + { role: 'model', parts: [{ text: 'Hi there!' }] }, + ]; + mockGetHistory.mockReturnValue(history); + mockContext.overwriteConfirmed = true; + + const result = await saveCommand?.action?.(mockContext, tag); + + expect(mockCheckpointExists).not.toHaveBeenCalled(); // Should skip existence check + expect(mockSaveCheckpoint).toHaveBeenCalledWith(history, tag); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: `Conversation checkpoint saved with tag: ${tag}.`, + }); + }); + }); + + describe('resume subcommand', () => { + const goodTag = 'good-tag'; + const badTag = 'bad-tag'; + + let resumeCommand: SlashCommand; + beforeEach(() => { + resumeCommand = getSubCommand('resume'); + }); + + it('should return an error if tag is missing', async () => { + const result = await resumeCommand?.action?.(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat resume ', + }); + }); + + it('should inform if checkpoint is not found', async () => { + mockLoadCheckpoint.mockResolvedValue([]); + + const result = await resumeCommand?.action?.(mockContext, badTag); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: `No saved checkpoint found with tag: ${badTag}.`, + }); + }); + + it('should resume a conversation', async () => { + const conversation: Content[] = [ + { role: 'user', parts: [{ text: 'hello gemini' }] }, + { role: 'model', parts: [{ text: 'hello world' }] }, + ]; + mockLoadCheckpoint.mockResolvedValue(conversation); + + const result = await resumeCommand?.action?.(mockContext, goodTag); + + expect(result).toEqual({ + type: 'load_history', + history: [ + { type: 'user', text: 'hello gemini' }, + { type: 'gemini', text: 'hello world' }, + ] as HistoryItemWithoutId[], + clientHistory: conversation, + }); + }); + + describe('completion', () => { + it('should provide completion suggestions', async () => { + const fakeFiles = ['checkpoint-alpha.json', 'checkpoint-beta.json']; + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + fakeFiles as string[]) as unknown as typeof fsPromises.readdir, + ); + + mockFs.stat.mockImplementation( + (async (_: string): Promise => + ({ + mtime: new Date(), + }) as Stats) as unknown as typeof fsPromises.stat, + ); + + const result = await resumeCommand?.completion?.(mockContext, 'a'); + + expect(result).toEqual(['alpha']); + }); + + it('should suggest filenames sorted by modified time (newest first)', async () => { + const fakeFiles = ['checkpoint-test1.json', 'checkpoint-test2.json']; + const date = new Date(); + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + fakeFiles as string[]) as unknown as typeof fsPromises.readdir, + ); + mockFs.stat.mockImplementation((async ( + path: string, + ): Promise => { + if (path.endsWith('test1.json')) { + return { mtime: date } as Stats; + } + return { mtime: new Date(date.getTime() + 1000) } as Stats; + }) as unknown as typeof fsPromises.stat); + + const result = await resumeCommand?.completion?.(mockContext, ''); + // Sort items by last modified time (newest first) + expect(result).toEqual(['test2', 'test1']); + }); + }); + }); + + describe('delete subcommand', () => { + let deleteCommand: SlashCommand; + const tag = 'my-tag'; + beforeEach(() => { + deleteCommand = getSubCommand('delete'); + }); + + it('should return an error if tag is missing', async () => { + const result = await deleteCommand?.action?.(mockContext, ' '); + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat delete ', + }); + }); + + it('should return an error if checkpoint is not found', async () => { + mockDeleteCheckpoint.mockResolvedValue(false); + const result = await deleteCommand?.action?.(mockContext, tag); + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: `Error: No checkpoint found with tag '${tag}'.`, + }); + }); + + it('should delete the conversation', async () => { + const result = await deleteCommand?.action?.(mockContext, tag); + + expect(mockDeleteCheckpoint).toHaveBeenCalledWith(tag); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: `Conversation checkpoint '${tag}' has been deleted.`, + }); + }); + + describe('completion', () => { + it('should provide completion suggestions', async () => { + const fakeFiles = ['checkpoint-alpha.json', 'checkpoint-beta.json']; + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + fakeFiles as string[]) as unknown as typeof fsPromises.readdir, + ); + + mockFs.stat.mockImplementation( + (async (_: string): Promise => + ({ + mtime: new Date(), + }) as Stats) as unknown as typeof fsPromises.stat, + ); + + const result = await deleteCommand?.completion?.(mockContext, 'a'); + + expect(result).toEqual(['alpha']); + }); + }); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/chatCommand.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/chatCommand.ts new file mode 100644 index 0000000000000000000000000000000000000000..71b9cf78df7876b632c3e191dfb93d50345a4043 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/chatCommand.ts @@ -0,0 +1,280 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fsPromises from 'fs/promises'; +import React from 'react'; +import { Text } from 'ink'; +import { Colors } from '../colors.js'; +import { + CommandContext, + SlashCommand, + MessageActionReturn, + CommandKind, + SlashCommandActionReturn, +} from './types.js'; +import { decodeTagName } from '@qwen-code/qwen-code-core'; +import path from 'path'; +import { HistoryItemWithoutId, MessageType } from '../types.js'; + +interface ChatDetail { + name: string; + mtime: Date; +} + +const getSavedChatTags = async ( + context: CommandContext, + mtSortDesc: boolean, +): Promise => { + const geminiDir = context.services.config?.getProjectTempDir(); + if (!geminiDir) { + return []; + } + try { + const file_head = 'checkpoint-'; + const file_tail = '.json'; + const files = await fsPromises.readdir(geminiDir); + const chatDetails: Array<{ name: string; mtime: Date }> = []; + + for (const file of files) { + if (file.startsWith(file_head) && file.endsWith(file_tail)) { + const filePath = path.join(geminiDir, file); + const stats = await fsPromises.stat(filePath); + const tagName = file.slice(file_head.length, -file_tail.length); + chatDetails.push({ + name: decodeTagName(tagName), + mtime: stats.mtime, + }); + } + } + + chatDetails.sort((a, b) => + mtSortDesc + ? b.mtime.getTime() - a.mtime.getTime() + : a.mtime.getTime() - b.mtime.getTime(), + ); + + return chatDetails; + } catch (_err) { + return []; + } +}; + +const listCommand: SlashCommand = { + name: 'list', + description: 'List saved conversation checkpoints', + kind: CommandKind.BUILT_IN, + action: async (context): Promise => { + const chatDetails = await getSavedChatTags(context, false); + if (chatDetails.length === 0) { + return { + type: 'message', + messageType: 'info', + content: 'No saved conversation checkpoints found.', + }; + } + + const maxNameLength = Math.max( + ...chatDetails.map((chat) => chat.name.length), + ); + + let message = 'List of saved conversations:\n\n'; + for (const chat of chatDetails) { + const paddedName = chat.name.padEnd(maxNameLength, ' '); + const isoString = chat.mtime.toISOString(); + const match = isoString.match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/); + const formattedDate = match ? `${match[1]} ${match[2]}` : 'Invalid Date'; + message += ` - \u001b[36m${paddedName}\u001b[0m \u001b[90m(saved on ${formattedDate})\u001b[0m\n`; + } + message += `\n\u001b[90mNote: Newest last, oldest first\u001b[0m`; + return { + type: 'message', + messageType: 'info', + content: message, + }; + }, +}; + +const saveCommand: SlashCommand = { + name: 'save', + description: + 'Save the current conversation as a checkpoint. Usage: /chat save ', + kind: CommandKind.BUILT_IN, + action: async (context, args): Promise => { + const tag = args.trim(); + if (!tag) { + return { + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat save ', + }; + } + + const { logger, config } = context.services; + await logger.initialize(); + + if (!context.overwriteConfirmed) { + const exists = await logger.checkpointExists(tag); + if (exists) { + return { + type: 'confirm_action', + prompt: React.createElement( + Text, + null, + 'A checkpoint with the tag ', + React.createElement(Text, { color: Colors.AccentPurple }, tag), + ' already exists. Do you want to overwrite it?', + ), + originalInvocation: { + raw: context.invocation?.raw || `/chat save ${tag}`, + }, + }; + } + } + + const chat = await config?.getGeminiClient()?.getChat(); + if (!chat) { + return { + type: 'message', + messageType: 'error', + content: 'No chat client available to save conversation.', + }; + } + + const history = chat.getHistory(); + if (history.length > 2) { + await logger.saveCheckpoint(history, tag); + return { + type: 'message', + messageType: 'info', + content: `Conversation checkpoint saved with tag: ${decodeTagName(tag)}.`, + }; + } else { + return { + type: 'message', + messageType: 'info', + content: 'No conversation found to save.', + }; + } + }, +}; + +const resumeCommand: SlashCommand = { + name: 'resume', + altNames: ['load'], + description: + 'Resume a conversation from a checkpoint. Usage: /chat resume ', + kind: CommandKind.BUILT_IN, + action: async (context, args) => { + const tag = args.trim(); + if (!tag) { + return { + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat resume ', + }; + } + + const { logger } = context.services; + await logger.initialize(); + const conversation = await logger.loadCheckpoint(tag); + + if (conversation.length === 0) { + return { + type: 'message', + messageType: 'info', + content: `No saved checkpoint found with tag: ${decodeTagName(tag)}.`, + }; + } + + const rolemap: { [key: string]: MessageType } = { + user: MessageType.USER, + model: MessageType.GEMINI, + }; + + const uiHistory: HistoryItemWithoutId[] = []; + let hasSystemPrompt = false; + let i = 0; + + for (const item of conversation) { + i += 1; + const text = + item.parts + ?.filter((m) => !!m.text) + .map((m) => m.text) + .join('') || ''; + if (!text) { + continue; + } + if (i === 1 && text.match(/context for our chat/)) { + hasSystemPrompt = true; + } + if (i > 2 || !hasSystemPrompt) { + uiHistory.push({ + type: (item.role && rolemap[item.role]) || MessageType.GEMINI, + text, + } as HistoryItemWithoutId); + } + } + return { + type: 'load_history', + history: uiHistory, + clientHistory: conversation, + }; + }, + completion: async (context, partialArg) => { + const chatDetails = await getSavedChatTags(context, true); + return chatDetails + .map((chat) => chat.name) + .filter((name) => name.startsWith(partialArg)); + }, +}; + +const deleteCommand: SlashCommand = { + name: 'delete', + description: 'Delete a conversation checkpoint. Usage: /chat delete ', + kind: CommandKind.BUILT_IN, + action: async (context, args): Promise => { + const tag = args.trim(); + if (!tag) { + return { + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat delete ', + }; + } + + const { logger } = context.services; + await logger.initialize(); + const deleted = await logger.deleteCheckpoint(tag); + + if (deleted) { + return { + type: 'message', + messageType: 'info', + content: `Conversation checkpoint '${decodeTagName(tag)}' has been deleted.`, + }; + } else { + return { + type: 'message', + messageType: 'error', + content: `Error: No checkpoint found with tag '${decodeTagName(tag)}'.`, + }; + } + }, + completion: async (context, partialArg) => { + const chatDetails = await getSavedChatTags(context, true); + return chatDetails + .map((chat) => chat.name) + .filter((name) => name.startsWith(partialArg)); + }, +}; + +export const chatCommand: SlashCommand = { + name: 'chat', + description: 'Manage conversation history.', + kind: CommandKind.BUILT_IN, + subCommands: [listCommand, saveCommand, resumeCommand, deleteCommand], +}; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/clearCommand.test.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/clearCommand.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..d34b3d59530cade82962fb767288ccb90a3b460c --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/clearCommand.test.ts @@ -0,0 +1,100 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, Mock } from 'vitest'; +import { clearCommand } from './clearCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; + +// Mock the telemetry service +vi.mock('@qwen-code/qwen-code-core', async () => { + const actual = await vi.importActual('@qwen-code/qwen-code-core'); + return { + ...actual, + uiTelemetryService: { + resetLastPromptTokenCount: vi.fn(), + }, + }; +}); + +import { GeminiClient, uiTelemetryService } from '@qwen-code/qwen-code-core'; + +describe('clearCommand', () => { + let mockContext: CommandContext; + let mockResetChat: ReturnType; + + beforeEach(() => { + mockResetChat = vi.fn().mockResolvedValue(undefined); + vi.clearAllMocks(); + + mockContext = createMockCommandContext({ + services: { + config: { + getGeminiClient: () => + ({ + resetChat: mockResetChat, + }) as unknown as GeminiClient, + }, + }, + }); + }); + + it('should set debug message, reset chat, reset telemetry, and clear UI when config is available', async () => { + if (!clearCommand.action) { + throw new Error('clearCommand must have an action.'); + } + + await clearCommand.action(mockContext, ''); + + expect(mockContext.ui.setDebugMessage).toHaveBeenCalledWith( + 'Clearing terminal and resetting chat.', + ); + expect(mockContext.ui.setDebugMessage).toHaveBeenCalledTimes(1); + + expect(mockResetChat).toHaveBeenCalledTimes(1); + expect(uiTelemetryService.resetLastPromptTokenCount).toHaveBeenCalledTimes( + 1, + ); + expect(mockContext.ui.clear).toHaveBeenCalledTimes(1); + + // Check the order of operations. + const setDebugMessageOrder = (mockContext.ui.setDebugMessage as Mock).mock + .invocationCallOrder[0]; + const resetChatOrder = mockResetChat.mock.invocationCallOrder[0]; + const resetTelemetryOrder = ( + uiTelemetryService.resetLastPromptTokenCount as Mock + ).mock.invocationCallOrder[0]; + const clearOrder = (mockContext.ui.clear as Mock).mock + .invocationCallOrder[0]; + + expect(setDebugMessageOrder).toBeLessThan(resetChatOrder); + expect(resetChatOrder).toBeLessThan(resetTelemetryOrder); + expect(resetTelemetryOrder).toBeLessThan(clearOrder); + }); + + it('should not attempt to reset chat if config service is not available', async () => { + if (!clearCommand.action) { + throw new Error('clearCommand must have an action.'); + } + + const nullConfigContext = createMockCommandContext({ + services: { + config: null, + }, + }); + + await clearCommand.action(nullConfigContext, ''); + + expect(nullConfigContext.ui.setDebugMessage).toHaveBeenCalledWith( + 'Clearing terminal.', + ); + expect(mockResetChat).not.toHaveBeenCalled(); + expect(uiTelemetryService.resetLastPromptTokenCount).toHaveBeenCalledTimes( + 1, + ); + expect(nullConfigContext.ui.clear).toHaveBeenCalledTimes(1); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/clearCommand.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/clearCommand.ts new file mode 100644 index 0000000000000000000000000000000000000000..0bf46af1be30f56512c6894ab91ddaf6ad2b463b --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/clearCommand.ts @@ -0,0 +1,29 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { uiTelemetryService } from '@qwen-code/qwen-code-core'; +import { CommandKind, SlashCommand } from './types.js'; + +export const clearCommand: SlashCommand = { + name: 'clear', + description: 'clear the screen and conversation history', + kind: CommandKind.BUILT_IN, + action: async (context, _args) => { + const geminiClient = context.services.config?.getGeminiClient(); + + if (geminiClient) { + context.ui.setDebugMessage('Clearing terminal and resetting chat.'); + // If resetChat fails, the exception will propagate and halt the command, + // which is the correct behavior to signal a failure to the user. + await geminiClient.resetChat(); + } else { + context.ui.setDebugMessage('Clearing terminal.'); + } + + uiTelemetryService.resetLastPromptTokenCount(); + context.ui.clear(); + }, +}; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/compressCommand.test.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/compressCommand.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..a6d3ab1b9b63aecefa1cae0d76e5c8cec5ddcc27 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/compressCommand.test.ts @@ -0,0 +1,129 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { GeminiClient } from '@qwen-code/qwen-code-core'; +import { vi, describe, it, expect, beforeEach } from 'vitest'; +import { compressCommand } from './compressCommand.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { MessageType } from '../types.js'; + +describe('compressCommand', () => { + let context: ReturnType; + let mockTryCompressChat: ReturnType; + + beforeEach(() => { + mockTryCompressChat = vi.fn(); + context = createMockCommandContext({ + services: { + config: { + getGeminiClient: () => + ({ + tryCompressChat: mockTryCompressChat, + }) as unknown as GeminiClient, + }, + }, + }); + }); + + it('should do nothing if a compression is already pending', async () => { + context.ui.pendingItem = { + type: MessageType.COMPRESSION, + compression: { + isPending: true, + originalTokenCount: null, + newTokenCount: null, + }, + }; + await compressCommand.action!(context, ''); + expect(context.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.ERROR, + text: 'Already compressing, wait for previous request to complete', + }), + expect.any(Number), + ); + expect(context.ui.setPendingItem).not.toHaveBeenCalled(); + expect(mockTryCompressChat).not.toHaveBeenCalled(); + }); + + it('should set pending item, call tryCompressChat, and add result on success', async () => { + const compressedResult = { + originalTokenCount: 200, + newTokenCount: 100, + }; + mockTryCompressChat.mockResolvedValue(compressedResult); + + await compressCommand.action!(context, ''); + + expect(context.ui.setPendingItem).toHaveBeenNthCalledWith( + 1, + expect.objectContaining({ + type: MessageType.COMPRESSION, + compression: { + isPending: true, + originalTokenCount: null, + newTokenCount: null, + }, + }), + ); + + expect(mockTryCompressChat).toHaveBeenCalledWith( + expect.stringMatching(/^compress-\d+$/), + true, + ); + + expect(context.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.COMPRESSION, + compression: { + isPending: false, + originalTokenCount: 200, + newTokenCount: 100, + }, + }), + expect.any(Number), + ); + + expect(context.ui.setPendingItem).toHaveBeenNthCalledWith(2, null); + }); + + it('should add an error message if tryCompressChat returns falsy', async () => { + mockTryCompressChat.mockResolvedValue(null); + + await compressCommand.action!(context, ''); + + expect(context.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.ERROR, + text: 'Failed to compress chat history.', + }), + expect.any(Number), + ); + expect(context.ui.setPendingItem).toHaveBeenCalledWith(null); + }); + + it('should add an error message if tryCompressChat throws', async () => { + const error = new Error('Compression failed'); + mockTryCompressChat.mockRejectedValue(error); + + await compressCommand.action!(context, ''); + + expect(context.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.ERROR, + text: `Failed to compress chat history: ${error.message}`, + }), + expect.any(Number), + ); + expect(context.ui.setPendingItem).toHaveBeenCalledWith(null); + }); + + it('should clear the pending item in a finally block', async () => { + mockTryCompressChat.mockRejectedValue(new Error('some error')); + await compressCommand.action!(context, ''); + expect(context.ui.setPendingItem).toHaveBeenCalledWith(null); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/compressCommand.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/compressCommand.ts new file mode 100644 index 0000000000000000000000000000000000000000..792e8b5b02e5d3970e18a332a3780c11e7f24fd6 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/compressCommand.ts @@ -0,0 +1,78 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { HistoryItemCompression, MessageType } from '../types.js'; +import { CommandKind, SlashCommand } from './types.js'; + +export const compressCommand: SlashCommand = { + name: 'compress', + altNames: ['summarize'], + description: 'Compresses the context by replacing it with a summary.', + kind: CommandKind.BUILT_IN, + action: async (context) => { + const { ui } = context; + if (ui.pendingItem) { + ui.addItem( + { + type: MessageType.ERROR, + text: 'Already compressing, wait for previous request to complete', + }, + Date.now(), + ); + return; + } + + const pendingMessage: HistoryItemCompression = { + type: MessageType.COMPRESSION, + compression: { + isPending: true, + originalTokenCount: null, + newTokenCount: null, + }, + }; + + try { + ui.setPendingItem(pendingMessage); + const promptId = `compress-${Date.now()}`; + const compressed = await context.services.config + ?.getGeminiClient() + ?.tryCompressChat(promptId, true); + if (compressed) { + ui.addItem( + { + type: MessageType.COMPRESSION, + compression: { + isPending: false, + originalTokenCount: compressed.originalTokenCount, + newTokenCount: compressed.newTokenCount, + }, + } as HistoryItemCompression, + Date.now(), + ); + } else { + ui.addItem( + { + type: MessageType.ERROR, + text: 'Failed to compress chat history.', + }, + Date.now(), + ); + } + } catch (e) { + ui.addItem( + { + type: MessageType.ERROR, + text: `Failed to compress chat history: ${ + e instanceof Error ? e.message : String(e) + }`, + }, + Date.now(), + ); + } finally { + ui.setPendingItem(null); + } + }, +}; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/copyCommand.test.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/copyCommand.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..b163b43f46de7bd36963654eb08271f1178c0d1c --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/copyCommand.test.ts @@ -0,0 +1,296 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, Mock } from 'vitest'; +import { copyCommand } from './copyCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { copyToClipboard } from '../utils/commandUtils.js'; + +vi.mock('../utils/commandUtils.js', () => ({ + copyToClipboard: vi.fn(), +})); + +describe('copyCommand', () => { + let mockContext: CommandContext; + let mockCopyToClipboard: Mock; + let mockGetChat: Mock; + let mockGetHistory: Mock; + + beforeEach(() => { + vi.clearAllMocks(); + + mockCopyToClipboard = vi.mocked(copyToClipboard); + mockGetChat = vi.fn(); + mockGetHistory = vi.fn(); + + mockContext = createMockCommandContext({ + services: { + config: { + getGeminiClient: () => ({ + getChat: mockGetChat, + }), + }, + }, + }); + + mockGetChat.mockReturnValue({ + getHistory: mockGetHistory, + }); + }); + + it('should return info message when no history is available', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + mockGetChat.mockReturnValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No output in history', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); + + it('should return info message when history is empty', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + mockGetHistory.mockReturnValue([]); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No output in history', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); + + it('should return info message when no AI messages are found in history', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithUserOnly = [ + { + role: 'user', + parts: [{ text: 'Hello' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithUserOnly); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No output in history', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); + + it('should copy last AI message to clipboard successfully', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithAiMessage = [ + { + role: 'user', + parts: [{ text: 'Hello' }], + }, + { + role: 'model', + parts: [{ text: 'Hi there! How can I help you?' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithAiMessage); + mockCopyToClipboard.mockResolvedValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }); + + expect(mockCopyToClipboard).toHaveBeenCalledWith( + 'Hi there! How can I help you?', + ); + }); + + it('should handle multiple text parts in AI message', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithMultipleParts = [ + { + role: 'model', + parts: [{ text: 'Part 1: ' }, { text: 'Part 2: ' }, { text: 'Part 3' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithMultipleParts); + mockCopyToClipboard.mockResolvedValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(mockCopyToClipboard).toHaveBeenCalledWith('Part 1: Part 2: Part 3'); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }); + }); + + it('should filter out non-text parts', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithMixedParts = [ + { + role: 'model', + parts: [ + { text: 'Text part' }, + { image: 'base64data' }, // Non-text part + { text: ' more text' }, + ], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithMixedParts); + mockCopyToClipboard.mockResolvedValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(mockCopyToClipboard).toHaveBeenCalledWith('Text part more text'); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }); + }); + + it('should get the last AI message when multiple AI messages exist', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithMultipleAiMessages = [ + { + role: 'model', + parts: [{ text: 'First AI response' }], + }, + { + role: 'user', + parts: [{ text: 'User message' }], + }, + { + role: 'model', + parts: [{ text: 'Second AI response' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithMultipleAiMessages); + mockCopyToClipboard.mockResolvedValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(mockCopyToClipboard).toHaveBeenCalledWith('Second AI response'); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }); + }); + + it('should handle clipboard copy error', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithAiMessage = [ + { + role: 'model', + parts: [{ text: 'AI response' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithAiMessage); + const clipboardError = new Error('Clipboard access denied'); + mockCopyToClipboard.mockRejectedValue(clipboardError); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Failed to copy to the clipboard.', + }); + }); + + it('should handle non-Error clipboard errors', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithAiMessage = [ + { + role: 'model', + parts: [{ text: 'AI response' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithAiMessage); + mockCopyToClipboard.mockRejectedValue('String error'); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Failed to copy to the clipboard.', + }); + }); + + it('should return info message when no text parts found in AI message', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithEmptyParts = [ + { + role: 'model', + parts: [{ image: 'base64data' }], // No text parts + }, + ]; + + mockGetHistory.mockReturnValue(historyWithEmptyParts); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last AI output contains no text to copy.', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); + + it('should handle unavailable config service', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const nullConfigContext = createMockCommandContext({ + services: { config: null }, + }); + + const result = await copyCommand.action(nullConfigContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No output in history', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/copyCommand.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/copyCommand.ts new file mode 100644 index 0000000000000000000000000000000000000000..bd330faaec7f20c6c9c196982ff23c89a7199893 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/copyCommand.ts @@ -0,0 +1,67 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { copyToClipboard } from '../utils/commandUtils.js'; +import { + CommandKind, + SlashCommand, + SlashCommandActionReturn, +} from './types.js'; + +export const copyCommand: SlashCommand = { + name: 'copy', + description: 'Copy the last result or code snippet to clipboard', + kind: CommandKind.BUILT_IN, + action: async (context, _args): Promise => { + const chat = await context.services.config?.getGeminiClient()?.getChat(); + const history = chat?.getHistory(); + + // Get the last message from the AI (model role) + const lastAiMessage = history + ? history.filter((item) => item.role === 'model').pop() + : undefined; + + if (!lastAiMessage) { + return { + type: 'message', + messageType: 'info', + content: 'No output in history', + }; + } + // Extract text from the parts + const lastAiOutput = lastAiMessage.parts + ?.filter((part) => part.text) + .map((part) => part.text) + .join(''); + + if (lastAiOutput) { + try { + await copyToClipboard(lastAiOutput); + + return { + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + console.debug(message); + + return { + type: 'message', + messageType: 'error', + content: 'Failed to copy to the clipboard.', + }; + } + } else { + return { + type: 'message', + messageType: 'info', + content: 'Last AI output contains no text to copy.', + }; + } + }, +}; diff --git a/projects/ui/qwen-code/packages/cli/src/ui/commands/corgiCommand.test.ts b/projects/ui/qwen-code/packages/cli/src/ui/commands/corgiCommand.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..3c25e8cd0d9604938ad27cfb808fc72f6dda94ee --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/ui/commands/corgiCommand.test.ts @@ -0,0 +1,34 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { corgiCommand } from './corgiCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; + +describe('corgiCommand', () => { + let mockContext: CommandContext; + + beforeEach(() => { + mockContext = createMockCommandContext(); + vi.spyOn(mockContext.ui, 'toggleCorgiMode'); + }); + + it('should call the toggleCorgiMode function on the UI context', async () => { + if (!corgiCommand.action) { + throw new Error('The corgi command must have an action.'); + } + + await corgiCommand.action(mockContext, ''); + + expect(mockContext.ui.toggleCorgiMode).toHaveBeenCalledTimes(1); + }); + + it('should have the correct name and description', () => { + expect(corgiCommand.name).toBe('corgi'); + expect(corgiCommand.description).toBe('Toggles corgi mode.'); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-closed.sb b/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-closed.sb new file mode 100644 index 0000000000000000000000000000000000000000..2069243ce117832398dd1ea6d944efe6dca48a53 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-closed.sb @@ -0,0 +1,93 @@ +(version 1) + +;; deny everything by default +(deny default) + +;; allow reading files from anywhere on host +(allow file-read*) + +;; allow exec/fork (children inherit policy) +(allow process-exec) +(allow process-fork) + +;; allow signals to self, e.g. SIGPIPE on write to closed pipe +(allow signal (target self)) + +;; allow read access to specific information about system +;; from https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/common.sb;l=273-319;drc=7b3962fe2e5fc9e2ee58000dc8fbf3429d84d3bd +(allow sysctl-read + (sysctl-name "hw.activecpu") + (sysctl-name "hw.busfrequency_compat") + (sysctl-name "hw.byteorder") + (sysctl-name "hw.cacheconfig") + (sysctl-name "hw.cachelinesize_compat") + (sysctl-name "hw.cpufamily") + (sysctl-name "hw.cpufrequency_compat") + (sysctl-name "hw.cputype") + (sysctl-name "hw.l1dcachesize_compat") + (sysctl-name "hw.l1icachesize_compat") + (sysctl-name "hw.l2cachesize_compat") + (sysctl-name "hw.l3cachesize_compat") + (sysctl-name "hw.logicalcpu_max") + (sysctl-name "hw.machine") + (sysctl-name "hw.ncpu") + (sysctl-name "hw.nperflevels") + (sysctl-name "hw.optional.arm.FEAT_BF16") + (sysctl-name "hw.optional.arm.FEAT_DotProd") + (sysctl-name "hw.optional.arm.FEAT_FCMA") + (sysctl-name "hw.optional.arm.FEAT_FHM") + (sysctl-name "hw.optional.arm.FEAT_FP16") + (sysctl-name "hw.optional.arm.FEAT_I8MM") + (sysctl-name "hw.optional.arm.FEAT_JSCVT") + (sysctl-name "hw.optional.arm.FEAT_LSE") + (sysctl-name "hw.optional.arm.FEAT_RDM") + (sysctl-name "hw.optional.arm.FEAT_SHA512") + (sysctl-name "hw.optional.armv8_2_sha512") + (sysctl-name "hw.packages") + (sysctl-name "hw.pagesize_compat") + (sysctl-name "hw.physicalcpu_max") + (sysctl-name "hw.tbfrequency_compat") + (sysctl-name "hw.vectorunit") + (sysctl-name "kern.hostname") + (sysctl-name "kern.maxfilesperproc") + (sysctl-name "kern.osproductversion") + (sysctl-name "kern.osrelease") + (sysctl-name "kern.ostype") + (sysctl-name "kern.osvariant_status") + (sysctl-name "kern.osversion") + (sysctl-name "kern.secure_kernel") + (sysctl-name "kern.usrstack64") + (sysctl-name "kern.version") + (sysctl-name "sysctl.proc_cputype") + (sysctl-name-prefix "hw.perflevel") +) + +;; allow writes to specific paths +(allow file-write* + (subpath (param "TARGET_DIR")) + (subpath (param "TMP_DIR")) + (subpath (param "CACHE_DIR")) + (subpath (string-append (param "HOME_DIR") "/.qwen")) + (subpath (string-append (param "HOME_DIR") "/.npm")) + (subpath (string-append (param "HOME_DIR") "/.cache")) + (subpath (string-append (param "HOME_DIR") "/.gitconfig")) + ;; Allow writes to included directories from --include-directories + (subpath (param "INCLUDE_DIR_0")) + (subpath (param "INCLUDE_DIR_1")) + (subpath (param "INCLUDE_DIR_2")) + (subpath (param "INCLUDE_DIR_3")) + (subpath (param "INCLUDE_DIR_4")) + (literal "/dev/stdout") + (literal "/dev/stderr") + (literal "/dev/null") +) + +;; allow communication with sysmond for process listing (e.g. for pgrep) +(allow mach-lookup (global-name "com.apple.sysmond")) + +;; enable terminal access required by ink +;; fixes setRawMode EPERM failure (at node:tty:81:24) +(allow file-ioctl (regex #"^/dev/tty.*")) + +;; allow inbound network traffic on debugger port +(allow network-inbound (local ip "localhost:9229")) \ No newline at end of file diff --git a/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-open.sb b/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-open.sb new file mode 100644 index 0000000000000000000000000000000000000000..45a9cb3f9fe21cfaa266e7098be62a5077943f61 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-open.sb @@ -0,0 +1,96 @@ +(version 1) + +;; deny everything by default +(deny default) + +;; allow reading files from anywhere on host +(allow file-read*) + +;; allow exec/fork (children inherit policy) +(allow process-exec) +(allow process-fork) + +;; allow signals to self, e.g. SIGPIPE on write to closed pipe +(allow signal (target self)) + +;; allow read access to specific information about system +;; from https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/common.sb;l=273-319;drc=7b3962fe2e5fc9e2ee58000dc8fbf3429d84d3bd +(allow sysctl-read + (sysctl-name "hw.activecpu") + (sysctl-name "hw.busfrequency_compat") + (sysctl-name "hw.byteorder") + (sysctl-name "hw.cacheconfig") + (sysctl-name "hw.cachelinesize_compat") + (sysctl-name "hw.cpufamily") + (sysctl-name "hw.cpufrequency_compat") + (sysctl-name "hw.cputype") + (sysctl-name "hw.l1dcachesize_compat") + (sysctl-name "hw.l1icachesize_compat") + (sysctl-name "hw.l2cachesize_compat") + (sysctl-name "hw.l3cachesize_compat") + (sysctl-name "hw.logicalcpu_max") + (sysctl-name "hw.machine") + (sysctl-name "hw.ncpu") + (sysctl-name "hw.nperflevels") + (sysctl-name "hw.optional.arm.FEAT_BF16") + (sysctl-name "hw.optional.arm.FEAT_DotProd") + (sysctl-name "hw.optional.arm.FEAT_FCMA") + (sysctl-name "hw.optional.arm.FEAT_FHM") + (sysctl-name "hw.optional.arm.FEAT_FP16") + (sysctl-name "hw.optional.arm.FEAT_I8MM") + (sysctl-name "hw.optional.arm.FEAT_JSCVT") + (sysctl-name "hw.optional.arm.FEAT_LSE") + (sysctl-name "hw.optional.arm.FEAT_RDM") + (sysctl-name "hw.optional.arm.FEAT_SHA512") + (sysctl-name "hw.optional.armv8_2_sha512") + (sysctl-name "hw.packages") + (sysctl-name "hw.pagesize_compat") + (sysctl-name "hw.physicalcpu_max") + (sysctl-name "hw.tbfrequency_compat") + (sysctl-name "hw.vectorunit") + (sysctl-name "kern.hostname") + (sysctl-name "kern.maxfilesperproc") + (sysctl-name "kern.osproductversion") + (sysctl-name "kern.osrelease") + (sysctl-name "kern.ostype") + (sysctl-name "kern.osvariant_status") + (sysctl-name "kern.osversion") + (sysctl-name "kern.secure_kernel") + (sysctl-name "kern.usrstack64") + (sysctl-name "kern.version") + (sysctl-name "sysctl.proc_cputype") + (sysctl-name-prefix "hw.perflevel") +) + +;; allow writes to specific paths +(allow file-write* + (subpath (param "TARGET_DIR")) + (subpath (param "TMP_DIR")) + (subpath (param "CACHE_DIR")) + (subpath (string-append (param "HOME_DIR") "/.qwen")) + (subpath (string-append (param "HOME_DIR") "/.npm")) + (subpath (string-append (param "HOME_DIR") "/.cache")) + (subpath (string-append (param "HOME_DIR") "/.gitconfig")) + ;; Allow writes to included directories from --include-directories + (subpath (param "INCLUDE_DIR_0")) + (subpath (param "INCLUDE_DIR_1")) + (subpath (param "INCLUDE_DIR_2")) + (subpath (param "INCLUDE_DIR_3")) + (subpath (param "INCLUDE_DIR_4")) + (literal "/dev/stdout") + (literal "/dev/stderr") + (literal "/dev/null") +) + +;; allow communication with sysmond for process listing (e.g. for pgrep) +(allow mach-lookup (global-name "com.apple.sysmond")) + +;; enable terminal access required by ink +;; fixes setRawMode EPERM failure (at node:tty:81:24) +(allow file-ioctl (regex #"^/dev/tty.*")) + +;; allow inbound network traffic on debugger port +(allow network-inbound (local ip "localhost:9229")) + +;; allow all outbound network traffic +(allow network-outbound) \ No newline at end of file diff --git a/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-proxied.sb b/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-proxied.sb new file mode 100644 index 0000000000000000000000000000000000000000..8affc94dc77f4f50529b0d51d28f1844099afffa --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/sandbox-macos-restrictive-proxied.sb @@ -0,0 +1,98 @@ +(version 1) + +;; deny everything by default +(deny default) + +;; allow reading files from anywhere on host +(allow file-read*) + +;; allow exec/fork (children inherit policy) +(allow process-exec) +(allow process-fork) + +;; allow signals to self, e.g. SIGPIPE on write to closed pipe +(allow signal (target self)) + +;; allow read access to specific information about system +;; from https://source.chromium.org/chromium/chromium/src/+/main:sandbox/policy/mac/common.sb;l=273-319;drc=7b3962fe2e5fc9e2ee58000dc8fbf3429d84d3bd +(allow sysctl-read + (sysctl-name "hw.activecpu") + (sysctl-name "hw.busfrequency_compat") + (sysctl-name "hw.byteorder") + (sysctl-name "hw.cacheconfig") + (sysctl-name "hw.cachelinesize_compat") + (sysctl-name "hw.cpufamily") + (sysctl-name "hw.cpufrequency_compat") + (sysctl-name "hw.cputype") + (sysctl-name "hw.l1dcachesize_compat") + (sysctl-name "hw.l1icachesize_compat") + (sysctl-name "hw.l2cachesize_compat") + (sysctl-name "hw.l3cachesize_compat") + (sysctl-name "hw.logicalcpu_max") + (sysctl-name "hw.machine") + (sysctl-name "hw.ncpu") + (sysctl-name "hw.nperflevels") + (sysctl-name "hw.optional.arm.FEAT_BF16") + (sysctl-name "hw.optional.arm.FEAT_DotProd") + (sysctl-name "hw.optional.arm.FEAT_FCMA") + (sysctl-name "hw.optional.arm.FEAT_FHM") + (sysctl-name "hw.optional.arm.FEAT_FP16") + (sysctl-name "hw.optional.arm.FEAT_I8MM") + (sysctl-name "hw.optional.arm.FEAT_JSCVT") + (sysctl-name "hw.optional.arm.FEAT_LSE") + (sysctl-name "hw.optional.arm.FEAT_RDM") + (sysctl-name "hw.optional.arm.FEAT_SHA512") + (sysctl-name "hw.optional.armv8_2_sha512") + (sysctl-name "hw.packages") + (sysctl-name "hw.pagesize_compat") + (sysctl-name "hw.physicalcpu_max") + (sysctl-name "hw.tbfrequency_compat") + (sysctl-name "hw.vectorunit") + (sysctl-name "kern.hostname") + (sysctl-name "kern.maxfilesperproc") + (sysctl-name "kern.osproductversion") + (sysctl-name "kern.osrelease") + (sysctl-name "kern.ostype") + (sysctl-name "kern.osvariant_status") + (sysctl-name "kern.osversion") + (sysctl-name "kern.secure_kernel") + (sysctl-name "kern.usrstack64") + (sysctl-name "kern.version") + (sysctl-name "sysctl.proc_cputype") + (sysctl-name-prefix "hw.perflevel") +) + +;; allow writes to specific paths +(allow file-write* + (subpath (param "TARGET_DIR")) + (subpath (param "TMP_DIR")) + (subpath (param "CACHE_DIR")) + (subpath (string-append (param "HOME_DIR") "/.qwen")) + (subpath (string-append (param "HOME_DIR") "/.npm")) + (subpath (string-append (param "HOME_DIR") "/.cache")) + (subpath (string-append (param "HOME_DIR") "/.gitconfig")) + ;; Allow writes to included directories from --include-directories + (subpath (param "INCLUDE_DIR_0")) + (subpath (param "INCLUDE_DIR_1")) + (subpath (param "INCLUDE_DIR_2")) + (subpath (param "INCLUDE_DIR_3")) + (subpath (param "INCLUDE_DIR_4")) + (literal "/dev/stdout") + (literal "/dev/stderr") + (literal "/dev/null") +) + +;; allow communication with sysmond for process listing (e.g. for pgrep) +(allow mach-lookup (global-name "com.apple.sysmond")) + +;; enable terminal access required by ink +;; fixes setRawMode EPERM failure (at node:tty:81:24) +(allow file-ioctl (regex #"^/dev/tty.*")) + +;; allow inbound network traffic on debugger port +(allow network-inbound (local ip "localhost:9229")) + +;; allow outbound network traffic through proxy on localhost:8877 +;; set `GEMINI_SANDBOX_PROXY_COMMAND=` to run proxy alongside sandbox +;; proxy must listen on :::8877 (see docs/examples/proxy-script.md) +(allow network-outbound (remote tcp "localhost:8877")) diff --git a/projects/ui/qwen-code/packages/cli/src/utils/sandbox.ts b/projects/ui/qwen-code/packages/cli/src/utils/sandbox.ts new file mode 100644 index 0000000000000000000000000000000000000000..b7e6a464add338d26d8881947334d4b29078f2a7 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/sandbox.ts @@ -0,0 +1,962 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { exec, execSync, spawn, type ChildProcess } from 'node:child_process'; +import os from 'node:os'; +import path from 'node:path'; +import fs from 'node:fs'; +import { readFile } from 'node:fs/promises'; +import { quote, parse } from 'shell-quote'; +import { + USER_SETTINGS_DIR, + SETTINGS_DIRECTORY_NAME, +} from '../config/settings.js'; +import { promisify } from 'util'; +import { Config, SandboxConfig } from '@qwen-code/qwen-code-core'; +import { ConsolePatcher } from '../ui/utils/ConsolePatcher.js'; + +const execAsync = promisify(exec); + +function getContainerPath(hostPath: string): string { + if (os.platform() !== 'win32') { + return hostPath; + } + const withForwardSlashes = hostPath.replace(/\\/g, '/'); + const match = withForwardSlashes.match(/^([A-Z]):\/(.*)/i); + if (match) { + return `/${match[1].toLowerCase()}/${match[2]}`; + } + return hostPath; +} + +const LOCAL_DEV_SANDBOX_IMAGE_NAME = 'qwen-code-sandbox'; +const SANDBOX_NETWORK_NAME = 'qwen-code-sandbox'; +const SANDBOX_PROXY_NAME = 'qwen-code-sandbox-proxy'; +const BUILTIN_SEATBELT_PROFILES = [ + 'permissive-open', + 'permissive-closed', + 'permissive-proxied', + 'restrictive-open', + 'restrictive-closed', + 'restrictive-proxied', +]; + +/** + * Determines whether the sandbox container should be run with the current user's UID and GID. + * This is often necessary on Linux systems (especially Debian/Ubuntu based) when using + * rootful Docker without userns-remap configured, to avoid permission issues with + * mounted volumes. + * + * The behavior is controlled by the `SANDBOX_SET_UID_GID` environment variable: + * - If `SANDBOX_SET_UID_GID` is "1" or "true", this function returns `true`. + * - If `SANDBOX_SET_UID_GID` is "0" or "false", this function returns `false`. + * - If `SANDBOX_SET_UID_GID` is not set: + * - On Debian/Ubuntu Linux, it defaults to `true`. + * - On other OSes, or if OS detection fails, it defaults to `false`. + * + * For more context on running Docker containers as non-root, see: + * https://medium.com/redbubble/running-a-docker-container-as-a-non-root-user-7d2e00f8ee15 + * + * @returns {Promise} A promise that resolves to true if the current user's UID/GID should be used, false otherwise. + */ +async function shouldUseCurrentUserInSandbox(): Promise { + const envVar = process.env['SANDBOX_SET_UID_GID']?.toLowerCase().trim(); + + if (envVar === '1' || envVar === 'true') { + return true; + } + if (envVar === '0' || envVar === 'false') { + return false; + } + + // If environment variable is not explicitly set, check for Debian/Ubuntu Linux + if (os.platform() === 'linux') { + try { + const osReleaseContent = await readFile('/etc/os-release', 'utf8'); + if ( + osReleaseContent.includes('ID=debian') || + osReleaseContent.includes('ID=ubuntu') || + osReleaseContent.match(/^ID_LIKE=.*debian.*/m) || // Covers derivatives + osReleaseContent.match(/^ID_LIKE=.*ubuntu.*/m) // Covers derivatives + ) { + // note here and below we use console.error for informational messages on stderr + console.error( + 'INFO: Defaulting to use current user UID/GID for Debian/Ubuntu-based Linux.', + ); + return true; + } + } catch (_err) { + // Silently ignore if /etc/os-release is not found or unreadable. + // The default (false) will be applied in this case. + console.warn( + 'Warning: Could not read /etc/os-release to auto-detect Debian/Ubuntu for UID/GID default.', + ); + } + } + return false; // Default to false if no other condition is met +} + +// docker does not allow container names to contain ':' or '/', so we +// parse those out to shorten the name +function parseImageName(image: string): string { + const [fullName, tag] = image.split(':'); + const name = fullName.split('/').at(-1) ?? 'unknown-image'; + return tag ? `${name}-${tag}` : name; +} + +function ports(): string[] { + return (process.env['SANDBOX_PORTS'] ?? '') + .split(',') + .filter((p) => p.trim()) + .map((p) => p.trim()); +} + +function entrypoint(workdir: string): string[] { + const isWindows = os.platform() === 'win32'; + const containerWorkdir = getContainerPath(workdir); + const shellCmds = []; + const pathSeparator = isWindows ? ';' : ':'; + + let pathSuffix = ''; + if (process.env['PATH']) { + const paths = process.env['PATH'].split(pathSeparator); + for (const p of paths) { + const containerPath = getContainerPath(p); + if ( + containerPath.toLowerCase().startsWith(containerWorkdir.toLowerCase()) + ) { + pathSuffix += `:${containerPath}`; + } + } + } + if (pathSuffix) { + shellCmds.push(`export PATH="$PATH${pathSuffix}";`); + } + + let pythonPathSuffix = ''; + if (process.env['PYTHONPATH']) { + const paths = process.env['PYTHONPATH'].split(pathSeparator); + for (const p of paths) { + const containerPath = getContainerPath(p); + if ( + containerPath.toLowerCase().startsWith(containerWorkdir.toLowerCase()) + ) { + pythonPathSuffix += `:${containerPath}`; + } + } + } + if (pythonPathSuffix) { + shellCmds.push(`export PYTHONPATH="$PYTHONPATH${pythonPathSuffix}";`); + } + + const projectSandboxBashrc = path.join( + SETTINGS_DIRECTORY_NAME, + 'sandbox.bashrc', + ); + if (fs.existsSync(projectSandboxBashrc)) { + shellCmds.push(`source ${getContainerPath(projectSandboxBashrc)};`); + } + + ports().forEach((p) => + shellCmds.push( + `socat TCP4-LISTEN:${p},bind=$(hostname -i),fork,reuseaddr TCP4:127.0.0.1:${p} 2> /dev/null &`, + ), + ); + + const cliArgs = process.argv.slice(2).map((arg) => quote([arg])); + const cliCmd = + process.env['NODE_ENV'] === 'development' + ? process.env['DEBUG'] + ? 'npm run debug --' + : 'npm rebuild && npm run start --' + : process.env['DEBUG'] + ? `node --inspect-brk=0.0.0.0:${process.env['DEBUG_PORT'] || '9229'} $(which qwen)` + : 'qwen'; + + const args = [...shellCmds, cliCmd, ...cliArgs]; + + return ['bash', '-c', args.join(' ')]; +} + +export async function start_sandbox( + config: SandboxConfig, + nodeArgs: string[] = [], + cliConfig?: Config, +) { + const patcher = new ConsolePatcher({ + debugMode: cliConfig?.getDebugMode() || !!process.env['DEBUG'], + stderr: true, + }); + patcher.patch(); + + try { + if (config.command === 'sandbox-exec') { + // disallow BUILD_SANDBOX + if (process.env['BUILD_SANDBOX']) { + console.error('ERROR: cannot BUILD_SANDBOX when using macOS Seatbelt'); + process.exit(1); + } + const profile = (process.env['SEATBELT_PROFILE'] ??= 'permissive-open'); + let profileFile = new URL(`sandbox-macos-${profile}.sb`, import.meta.url) + .pathname; + // if profile name is not recognized, then look for file under project settings directory + if (!BUILTIN_SEATBELT_PROFILES.includes(profile)) { + profileFile = path.join( + SETTINGS_DIRECTORY_NAME, + `sandbox-macos-${profile}.sb`, + ); + } + if (!fs.existsSync(profileFile)) { + console.error( + `ERROR: missing macos seatbelt profile file '${profileFile}'`, + ); + process.exit(1); + } + // Log on STDERR so it doesn't clutter the output on STDOUT + console.error(`using macos seatbelt (profile: ${profile}) ...`); + // if DEBUG is set, convert to --inspect-brk in NODE_OPTIONS + const nodeOptions = [ + ...(process.env['DEBUG'] ? ['--inspect-brk'] : []), + ...nodeArgs, + ].join(' '); + + const args = [ + '-D', + `TARGET_DIR=${fs.realpathSync(process.cwd())}`, + '-D', + `TMP_DIR=${fs.realpathSync(os.tmpdir())}`, + '-D', + `HOME_DIR=${fs.realpathSync(os.homedir())}`, + '-D', + `CACHE_DIR=${fs.realpathSync(execSync(`getconf DARWIN_USER_CACHE_DIR`).toString().trim())}`, + ]; + + // Add included directories from the workspace context + // Always add 5 INCLUDE_DIR parameters to ensure .sb files can reference them + const MAX_INCLUDE_DIRS = 5; + const targetDir = fs.realpathSync(cliConfig?.getTargetDir() || ''); + const includedDirs: string[] = []; + + if (cliConfig) { + const workspaceContext = cliConfig.getWorkspaceContext(); + const directories = workspaceContext.getDirectories(); + + // Filter out TARGET_DIR + for (const dir of directories) { + const realDir = fs.realpathSync(dir); + if (realDir !== targetDir) { + includedDirs.push(realDir); + } + } + } + + for (let i = 0; i < MAX_INCLUDE_DIRS; i++) { + let dirPath = '/dev/null'; // Default to a safe path that won't cause issues + + if (i < includedDirs.length) { + dirPath = includedDirs[i]; + } + + args.push('-D', `INCLUDE_DIR_${i}=${dirPath}`); + } + + args.push( + '-f', + profileFile, + 'sh', + '-c', + [ + `SANDBOX=sandbox-exec`, + `NODE_OPTIONS="${nodeOptions}"`, + ...process.argv.map((arg) => quote([arg])), + ].join(' '), + ); + // start and set up proxy if GEMINI_SANDBOX_PROXY_COMMAND is set + const proxyCommand = process.env['GEMINI_SANDBOX_PROXY_COMMAND']; + let proxyProcess: ChildProcess | undefined = undefined; + let sandboxProcess: ChildProcess | undefined = undefined; + const sandboxEnv = { ...process.env }; + if (proxyCommand) { + const proxy = + process.env['HTTPS_PROXY'] || + process.env['https_proxy'] || + process.env['HTTP_PROXY'] || + process.env['http_proxy'] || + 'http://localhost:8877'; + sandboxEnv['HTTPS_PROXY'] = proxy; + sandboxEnv['https_proxy'] = proxy; // lower-case can be required, e.g. for curl + sandboxEnv['HTTP_PROXY'] = proxy; + sandboxEnv['http_proxy'] = proxy; + const noProxy = process.env['NO_PROXY'] || process.env['no_proxy']; + if (noProxy) { + sandboxEnv['NO_PROXY'] = noProxy; + sandboxEnv['no_proxy'] = noProxy; + } + proxyProcess = spawn(proxyCommand, { + stdio: ['ignore', 'pipe', 'pipe'], + shell: true, + detached: true, + }); + // install handlers to stop proxy on exit/signal + const stopProxy = () => { + console.log('stopping proxy ...'); + if (proxyProcess?.pid) { + process.kill(-proxyProcess.pid, 'SIGTERM'); + } + }; + process.on('exit', stopProxy); + process.on('SIGINT', stopProxy); + process.on('SIGTERM', stopProxy); + + // commented out as it disrupts ink rendering + // proxyProcess.stdout?.on('data', (data) => { + // console.info(data.toString()); + // }); + proxyProcess.stderr?.on('data', (data) => { + console.error(data.toString()); + }); + proxyProcess.on('close', (code, signal) => { + console.error( + `ERROR: proxy command '${proxyCommand}' exited with code ${code}, signal ${signal}`, + ); + if (sandboxProcess?.pid) { + process.kill(-sandboxProcess.pid, 'SIGTERM'); + } + process.exit(1); + }); + console.log('waiting for proxy to start ...'); + await execAsync( + `until timeout 0.25 curl -s http://localhost:8877; do sleep 0.25; done`, + ); + } + // spawn child and let it inherit stdio + sandboxProcess = spawn(config.command, args, { + stdio: 'inherit', + }); + await new Promise((resolve) => sandboxProcess?.on('close', resolve)); + return; + } + + console.error(`hopping into sandbox (command: ${config.command}) ...`); + + // determine full path for gemini-cli to distinguish linked vs installed setting + const gcPath = fs.realpathSync(process.argv[1]); + + const projectSandboxDockerfile = path.join( + SETTINGS_DIRECTORY_NAME, + 'sandbox.Dockerfile', + ); + const isCustomProjectSandbox = fs.existsSync(projectSandboxDockerfile); + + const image = config.image; + const workdir = path.resolve(process.cwd()); + const containerWorkdir = getContainerPath(workdir); + + // if BUILD_SANDBOX is set, then call scripts/build_sandbox.js under gemini-cli repo + // + // note this can only be done with binary linked from gemini-cli repo + if (process.env['BUILD_SANDBOX']) { + if (!gcPath.includes('gemini-cli/packages/')) { + console.error( + 'ERROR: cannot build sandbox using installed gemini binary; ' + + 'run `npm link ./packages/cli` under gemini-cli repo to switch to linked binary.', + ); + process.exit(1); + } else { + console.error('building sandbox ...'); + const gcRoot = gcPath.split('/packages/')[0]; + // if project folder has sandbox.Dockerfile under project settings folder, use that + let buildArgs = ''; + const projectSandboxDockerfile = path.join( + SETTINGS_DIRECTORY_NAME, + 'sandbox.Dockerfile', + ); + if (isCustomProjectSandbox) { + console.error(`using ${projectSandboxDockerfile} for sandbox`); + buildArgs += `-f ${path.resolve(projectSandboxDockerfile)} -i ${image}`; + } + execSync( + `cd ${gcRoot} && node scripts/build_sandbox.js -s ${buildArgs}`, + { + stdio: 'inherit', + env: { + ...process.env, + GEMINI_SANDBOX: config.command, // in case sandbox is enabled via flags (see config.ts under cli package) + }, + }, + ); + } + } + + // stop if image is missing + if (!(await ensureSandboxImageIsPresent(config.command, image))) { + const remedy = + image === LOCAL_DEV_SANDBOX_IMAGE_NAME + ? 'Try running `npm run build:all` or `npm run build:sandbox` under the gemini-cli repo to build it locally, or check the image name and your network connection.' + : 'Please check the image name, your network connection, or notify gemini-cli-dev@google.com if the issue persists.'; + console.error( + `ERROR: Sandbox image '${image}' is missing or could not be pulled. ${remedy}`, + ); + process.exit(1); + } + + // use interactive mode and auto-remove container on exit + // run init binary inside container to forward signals & reap zombies + const args = ['run', '-i', '--rm', '--init', '--workdir', containerWorkdir]; + + // add custom flags from SANDBOX_FLAGS + if (process.env['SANDBOX_FLAGS']) { + const flags = parse(process.env['SANDBOX_FLAGS'], process.env).filter( + (f): f is string => typeof f === 'string', + ); + args.push(...flags); + } + + // add TTY only if stdin is TTY as well, i.e. for piped input don't init TTY in container + if (process.stdin.isTTY) { + args.push('-t'); + } + + // mount current directory as working directory in sandbox (set via --workdir) + args.push('--volume', `${workdir}:${containerWorkdir}`); + + // mount user settings directory inside container, after creating if missing + // note user/home changes inside sandbox and we mount at BOTH paths for consistency + const userSettingsDirOnHost = USER_SETTINGS_DIR; + const userSettingsDirInSandbox = getContainerPath( + `/home/node/${SETTINGS_DIRECTORY_NAME}`, + ); + if (!fs.existsSync(userSettingsDirOnHost)) { + fs.mkdirSync(userSettingsDirOnHost); + } + args.push( + '--volume', + `${userSettingsDirOnHost}:${userSettingsDirInSandbox}`, + ); + if (userSettingsDirInSandbox !== userSettingsDirOnHost) { + args.push( + '--volume', + `${userSettingsDirOnHost}:${getContainerPath(userSettingsDirOnHost)}`, + ); + } + + // mount os.tmpdir() as os.tmpdir() inside container + args.push('--volume', `${os.tmpdir()}:${getContainerPath(os.tmpdir())}`); + + // mount gcloud config directory if it exists + const gcloudConfigDir = path.join(os.homedir(), '.config', 'gcloud'); + if (fs.existsSync(gcloudConfigDir)) { + args.push( + '--volume', + `${gcloudConfigDir}:${getContainerPath(gcloudConfigDir)}:ro`, + ); + } + + // mount ADC file if GOOGLE_APPLICATION_CREDENTIALS is set + if (process.env['GOOGLE_APPLICATION_CREDENTIALS']) { + const adcFile = process.env['GOOGLE_APPLICATION_CREDENTIALS']; + if (fs.existsSync(adcFile)) { + args.push('--volume', `${adcFile}:${getContainerPath(adcFile)}:ro`); + args.push( + '--env', + `GOOGLE_APPLICATION_CREDENTIALS=${getContainerPath(adcFile)}`, + ); + } + } + + // mount paths listed in SANDBOX_MOUNTS + if (process.env['SANDBOX_MOUNTS']) { + for (let mount of process.env['SANDBOX_MOUNTS'].split(',')) { + if (mount.trim()) { + // parse mount as from:to:opts + let [from, to, opts] = mount.trim().split(':'); + to = to || from; // default to mount at same path inside container + opts = opts || 'ro'; // default to read-only + mount = `${from}:${to}:${opts}`; + // check that from path is absolute + if (!path.isAbsolute(from)) { + console.error( + `ERROR: path '${from}' listed in SANDBOX_MOUNTS must be absolute`, + ); + process.exit(1); + } + // check that from path exists on host + if (!fs.existsSync(from)) { + console.error( + `ERROR: missing mount path '${from}' listed in SANDBOX_MOUNTS`, + ); + process.exit(1); + } + console.error(`SANDBOX_MOUNTS: ${from} -> ${to} (${opts})`); + args.push('--volume', mount); + } + } + } + + // expose env-specified ports on the sandbox + ports().forEach((p) => args.push('--publish', `${p}:${p}`)); + + // if DEBUG is set, expose debugging port + if (process.env['DEBUG']) { + const debugPort = process.env['DEBUG_PORT'] || '9229'; + args.push(`--publish`, `${debugPort}:${debugPort}`); + } + + // copy proxy environment variables, replacing localhost with SANDBOX_PROXY_NAME + // copy as both upper-case and lower-case as is required by some utilities + // GEMINI_SANDBOX_PROXY_COMMAND implies HTTPS_PROXY unless HTTP_PROXY is set + const proxyCommand = process.env['GEMINI_SANDBOX_PROXY_COMMAND']; + + if (proxyCommand) { + let proxy = + process.env['HTTPS_PROXY'] || + process.env['https_proxy'] || + process.env['HTTP_PROXY'] || + process.env['http_proxy'] || + 'http://localhost:8877'; + proxy = proxy.replace('localhost', SANDBOX_PROXY_NAME); + if (proxy) { + args.push('--env', `HTTPS_PROXY=${proxy}`); + args.push('--env', `https_proxy=${proxy}`); // lower-case can be required, e.g. for curl + args.push('--env', `HTTP_PROXY=${proxy}`); + args.push('--env', `http_proxy=${proxy}`); + } + const noProxy = process.env['NO_PROXY'] || process.env['no_proxy']; + if (noProxy) { + args.push('--env', `NO_PROXY=${noProxy}`); + args.push('--env', `no_proxy=${noProxy}`); + } + + // if using proxy, switch to internal networking through proxy + if (proxy) { + execSync( + `${config.command} network inspect ${SANDBOX_NETWORK_NAME} || ${config.command} network create --internal ${SANDBOX_NETWORK_NAME}`, + ); + args.push('--network', SANDBOX_NETWORK_NAME); + // if proxy command is set, create a separate network w/ host access (i.e. non-internal) + // we will run proxy in its own container connected to both host network and internal network + // this allows proxy to work even on rootless podman on macos with host<->vm<->container isolation + if (proxyCommand) { + execSync( + `${config.command} network inspect ${SANDBOX_PROXY_NAME} || ${config.command} network create ${SANDBOX_PROXY_NAME}`, + ); + } + } + } + + // name container after image, plus numeric suffix to avoid conflicts + const imageName = parseImageName(image); + let index = 0; + const containerNameCheck = execSync( + `${config.command} ps -a --format "{{.Names}}"`, + ) + .toString() + .trim(); + while (containerNameCheck.includes(`${imageName}-${index}`)) { + index++; + } + const containerName = `${imageName}-${index}`; + args.push('--name', containerName, '--hostname', containerName); + + // copy GEMINI_API_KEY(s) + if (process.env['GEMINI_API_KEY']) { + args.push('--env', `GEMINI_API_KEY=${process.env['GEMINI_API_KEY']}`); + } + if (process.env['GOOGLE_API_KEY']) { + args.push('--env', `GOOGLE_API_KEY=${process.env['GOOGLE_API_KEY']}`); + } + + // copy OPENAI_API_KEY and related env vars for Qwen + if (process.env['OPENAI_API_KEY']) { + args.push('--env', `OPENAI_API_KEY=${process.env['OPENAI_API_KEY']}`); + } + // copy TAVILY_API_KEY for web search tool + if (process.env['TAVILY_API_KEY']) { + args.push('--env', `TAVILY_API_KEY=${process.env['TAVILY_API_KEY']}`); + } + if (process.env['OPENAI_BASE_URL']) { + args.push('--env', `OPENAI_BASE_URL=${process.env['OPENAI_BASE_URL']}`); + } + if (process.env['OPENAI_MODEL']) { + args.push('--env', `OPENAI_MODEL=${process.env['OPENAI_MODEL']}`); + } + + // copy GOOGLE_GENAI_USE_VERTEXAI + if (process.env['GOOGLE_GENAI_USE_VERTEXAI']) { + args.push( + '--env', + `GOOGLE_GENAI_USE_VERTEXAI=${process.env['GOOGLE_GENAI_USE_VERTEXAI']}`, + ); + } + + // copy GOOGLE_GENAI_USE_GCA + if (process.env['GOOGLE_GENAI_USE_GCA']) { + args.push( + '--env', + `GOOGLE_GENAI_USE_GCA=${process.env['GOOGLE_GENAI_USE_GCA']}`, + ); + } + + // copy GOOGLE_CLOUD_PROJECT + if (process.env['GOOGLE_CLOUD_PROJECT']) { + args.push( + '--env', + `GOOGLE_CLOUD_PROJECT=${process.env['GOOGLE_CLOUD_PROJECT']}`, + ); + } + + // copy GOOGLE_CLOUD_LOCATION + if (process.env['GOOGLE_CLOUD_LOCATION']) { + args.push( + '--env', + `GOOGLE_CLOUD_LOCATION=${process.env['GOOGLE_CLOUD_LOCATION']}`, + ); + } + + // copy GEMINI_MODEL + if (process.env['GEMINI_MODEL']) { + args.push('--env', `GEMINI_MODEL=${process.env['GEMINI_MODEL']}`); + } + + // copy TERM and COLORTERM to try to maintain terminal setup + if (process.env['TERM']) { + args.push('--env', `TERM=${process.env['TERM']}`); + } + if (process.env['COLORTERM']) { + args.push('--env', `COLORTERM=${process.env['COLORTERM']}`); + } + + // Pass through IDE mode environment variables + for (const envVar of [ + 'QWEN_CODE_IDE_SERVER_PORT', + 'QWEN_CODE_IDE_WORKSPACE_PATH', + 'TERM_PROGRAM', + ]) { + if (process.env[envVar]) { + args.push('--env', `${envVar}=${process.env[envVar]}`); + } + } + + // copy VIRTUAL_ENV if under working directory + // also mount-replace VIRTUAL_ENV directory with /sandbox.venv + // sandbox can then set up this new VIRTUAL_ENV directory using sandbox.bashrc (see below) + // directory will be empty if not set up, which is still preferable to having host binaries + if ( + process.env['VIRTUAL_ENV'] + ?.toLowerCase() + .startsWith(workdir.toLowerCase()) + ) { + const sandboxVenvPath = path.resolve( + SETTINGS_DIRECTORY_NAME, + 'sandbox.venv', + ); + if (!fs.existsSync(sandboxVenvPath)) { + fs.mkdirSync(sandboxVenvPath, { recursive: true }); + } + args.push( + '--volume', + `${sandboxVenvPath}:${getContainerPath(process.env['VIRTUAL_ENV'])}`, + ); + args.push( + '--env', + `VIRTUAL_ENV=${getContainerPath(process.env['VIRTUAL_ENV'])}`, + ); + } + + // copy additional environment variables from SANDBOX_ENV + if (process.env['SANDBOX_ENV']) { + for (let env of process.env['SANDBOX_ENV'].split(',')) { + if ((env = env.trim())) { + if (env.includes('=')) { + console.error(`SANDBOX_ENV: ${env}`); + args.push('--env', env); + } else { + console.error( + 'ERROR: SANDBOX_ENV must be a comma-separated list of key=value pairs', + ); + process.exit(1); + } + } + } + } + + // copy NODE_OPTIONS + const existingNodeOptions = process.env['NODE_OPTIONS'] || ''; + const allNodeOptions = [ + ...(existingNodeOptions ? [existingNodeOptions] : []), + ...nodeArgs, + ].join(' '); + + if (allNodeOptions.length > 0) { + args.push('--env', `NODE_OPTIONS="${allNodeOptions}"`); + } + + // set SANDBOX as container name + args.push('--env', `SANDBOX=${containerName}`); + + // for podman only, use empty --authfile to skip unnecessary auth refresh overhead + if (config.command === 'podman') { + const emptyAuthFilePath = path.join(os.tmpdir(), 'empty_auth.json'); + fs.writeFileSync(emptyAuthFilePath, '{}', 'utf-8'); + args.push('--authfile', emptyAuthFilePath); + } + + // Determine if the current user's UID/GID should be passed to the sandbox. + // See shouldUseCurrentUserInSandbox for more details. + let userFlag = ''; + const finalEntrypoint = entrypoint(workdir); + + if (process.env['GEMINI_CLI_INTEGRATION_TEST'] === 'true') { + args.push('--user', 'root'); + userFlag = '--user root'; + } else if (await shouldUseCurrentUserInSandbox()) { + // For the user-creation logic to work, the container must start as root. + // The entrypoint script then handles dropping privileges to the correct user. + args.push('--user', 'root'); + + const uid = execSync('id -u').toString().trim(); + const gid = execSync('id -g').toString().trim(); + + // Instead of passing --user to the main sandbox container, we let it + // start as root, then create a user with the host's UID/GID, and + // finally switch to that user to run the gemini process. This is + // necessary on Linux to ensure the user exists within the + // container's /etc/passwd file, which is required by os.userInfo(). + const username = 'gemini'; + const homeDir = getContainerPath(os.homedir()); + + const setupUserCommands = [ + // Use -f with groupadd to avoid errors if the group already exists. + `groupadd -f -g ${gid} ${username}`, + // Create user only if it doesn't exist. Use -o for non-unique UID. + `id -u ${username} &>/dev/null || useradd -o -u ${uid} -g ${gid} -d ${homeDir} -s /bin/bash ${username}`, + ].join(' && '); + + const originalCommand = finalEntrypoint[2]; + const escapedOriginalCommand = originalCommand.replace(/'/g, "'\\''"); + + // Use `su -p` to preserve the environment. + const suCommand = `su -p ${username} -c '${escapedOriginalCommand}'`; + + // The entrypoint is always `['bash', '-c', '']`, so we modify the command part. + finalEntrypoint[2] = `${setupUserCommands} && ${suCommand}`; + + // We still need userFlag for the simpler proxy container, which does not have this issue. + userFlag = `--user ${uid}:${gid}`; + // When forcing a UID in the sandbox, $HOME can be reset to '/', so we copy $HOME as well. + args.push('--env', `HOME=${os.homedir()}`); + } + + // push container image name + args.push(image); + + // push container entrypoint (including args) + args.push(...finalEntrypoint); + + // start and set up proxy if GEMINI_SANDBOX_PROXY_COMMAND is set + let proxyProcess: ChildProcess | undefined = undefined; + let sandboxProcess: ChildProcess | undefined = undefined; + + if (proxyCommand) { + // run proxyCommand in its own container + const proxyContainerCommand = `${config.command} run --rm --init ${userFlag} --name ${SANDBOX_PROXY_NAME} --network ${SANDBOX_PROXY_NAME} -p 8877:8877 -v ${process.cwd()}:${workdir} --workdir ${workdir} ${image} ${proxyCommand}`; + proxyProcess = spawn(proxyContainerCommand, { + stdio: ['ignore', 'pipe', 'pipe'], + shell: true, + detached: true, + }); + // install handlers to stop proxy on exit/signal + const stopProxy = () => { + console.log('stopping proxy container ...'); + execSync(`${config.command} rm -f ${SANDBOX_PROXY_NAME}`); + }; + process.on('exit', stopProxy); + process.on('SIGINT', stopProxy); + process.on('SIGTERM', stopProxy); + + // commented out as it disrupts ink rendering + // proxyProcess.stdout?.on('data', (data) => { + // console.info(data.toString()); + // }); + proxyProcess.stderr?.on('data', (data) => { + console.error(data.toString().trim()); + }); + proxyProcess.on('close', (code, signal) => { + console.error( + `ERROR: proxy container command '${proxyContainerCommand}' exited with code ${code}, signal ${signal}`, + ); + if (sandboxProcess?.pid) { + process.kill(-sandboxProcess.pid, 'SIGTERM'); + } + process.exit(1); + }); + console.log('waiting for proxy to start ...'); + await execAsync( + `until timeout 0.25 curl -s http://localhost:8877; do sleep 0.25; done`, + ); + // connect proxy container to sandbox network + // (workaround for older versions of docker that don't support multiple --network args) + await execAsync( + `${config.command} network connect ${SANDBOX_NETWORK_NAME} ${SANDBOX_PROXY_NAME}`, + ); + } + + // spawn child and let it inherit stdio + sandboxProcess = spawn(config.command, args, { + stdio: 'inherit', + }); + + sandboxProcess.on('error', (err) => { + console.error('Sandbox process error:', err); + }); + + await new Promise((resolve) => { + sandboxProcess?.on('close', (code, signal) => { + if (code !== 0) { + console.log( + `Sandbox process exited with code: ${code}, signal: ${signal}`, + ); + } + resolve(); + }); + }); + } finally { + patcher.cleanup(); + } +} + +// Helper functions to ensure sandbox image is present +async function imageExists(sandbox: string, image: string): Promise { + return new Promise((resolve) => { + const args = ['images', '-q', image]; + const checkProcess = spawn(sandbox, args); + + let stdoutData = ''; + if (checkProcess.stdout) { + checkProcess.stdout.on('data', (data) => { + stdoutData += data.toString(); + }); + } + + checkProcess.on('error', (err) => { + console.warn( + `Failed to start '${sandbox}' command for image check: ${err.message}`, + ); + resolve(false); + }); + + checkProcess.on('close', (code) => { + // Non-zero code might indicate docker daemon not running, etc. + // The primary success indicator is non-empty stdoutData. + if (code !== 0) { + // console.warn(`'${sandbox} images -q ${image}' exited with code ${code}.`); + } + resolve(stdoutData.trim() !== ''); + }); + }); +} + +async function pullImage(sandbox: string, image: string): Promise { + console.info(`Attempting to pull image ${image} using ${sandbox}...`); + return new Promise((resolve) => { + const args = ['pull', image]; + const pullProcess = spawn(sandbox, args, { stdio: 'pipe' }); + + let stderrData = ''; + + const onStdoutData = (data: Buffer) => { + console.info(data.toString().trim()); // Show pull progress + }; + + const onStderrData = (data: Buffer) => { + stderrData += data.toString(); + console.error(data.toString().trim()); // Show pull errors/info from the command itself + }; + + const onError = (err: Error) => { + console.warn( + `Failed to start '${sandbox} pull ${image}' command: ${err.message}`, + ); + cleanup(); + resolve(false); + }; + + const onClose = (code: number | null) => { + if (code === 0) { + console.info(`Successfully pulled image ${image}.`); + cleanup(); + resolve(true); + } else { + console.warn( + `Failed to pull image ${image}. '${sandbox} pull ${image}' exited with code ${code}.`, + ); + if (stderrData.trim()) { + // Details already printed by the stderr listener above + } + cleanup(); + resolve(false); + } + }; + + const cleanup = () => { + if (pullProcess.stdout) { + pullProcess.stdout.removeListener('data', onStdoutData); + } + if (pullProcess.stderr) { + pullProcess.stderr.removeListener('data', onStderrData); + } + pullProcess.removeListener('error', onError); + pullProcess.removeListener('close', onClose); + if (pullProcess.connected) { + pullProcess.disconnect(); + } + }; + + if (pullProcess.stdout) { + pullProcess.stdout.on('data', onStdoutData); + } + if (pullProcess.stderr) { + pullProcess.stderr.on('data', onStderrData); + } + pullProcess.on('error', onError); + pullProcess.on('close', onClose); + }); +} + +async function ensureSandboxImageIsPresent( + sandbox: string, + image: string, +): Promise { + console.info(`Checking for sandbox image: ${image}`); + if (await imageExists(sandbox, image)) { + console.info(`Sandbox image ${image} found locally.`); + return true; + } + + console.info(`Sandbox image ${image} not found locally.`); + if (image === LOCAL_DEV_SANDBOX_IMAGE_NAME) { + // user needs to build the image themselves + return false; + } + + if (await pullImage(sandbox, image)) { + // After attempting to pull, check again to be certain + if (await imageExists(sandbox, image)) { + console.info(`Sandbox image ${image} is now available after pulling.`); + return true; + } else { + console.warn( + `Sandbox image ${image} still not found after a pull attempt. This might indicate an issue with the image name or registry, or the pull command reported success but failed to make the image available.`, + ); + return false; + } + } + + console.error( + `Failed to obtain sandbox image ${image} after check and pull attempt.`, + ); + return false; // Pull command failed or image still not present +} diff --git a/projects/ui/qwen-code/packages/cli/src/utils/settingsUtils.test.ts b/projects/ui/qwen-code/packages/cli/src/utils/settingsUtils.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..39c611f8407209bfae8f6ce023877ecd383b875f --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/settingsUtils.test.ts @@ -0,0 +1,797 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { + // Schema utilities + getSettingsByCategory, + getSettingDefinition, + requiresRestart, + getDefaultValue, + getRestartRequiredSettings, + getEffectiveValue, + getAllSettingKeys, + getSettingsByType, + getSettingsRequiringRestart, + isValidSettingKey, + getSettingCategory, + shouldShowInDialog, + getDialogSettingsByCategory, + getDialogSettingsByType, + getDialogSettingKeys, + // Business logic utilities + getSettingValue, + isSettingModified, + settingExistsInScope, + setPendingSettingValue, + hasRestartRequiredSettings, + getRestartRequiredFromModified, + getDisplayValue, + isDefaultValue, + isValueInherited, + getEffectiveDisplayValue, +} from './settingsUtils.js'; + +describe('SettingsUtils', () => { + describe('Schema Utilities', () => { + describe('getSettingsByCategory', () => { + it('should group settings by category', () => { + const categories = getSettingsByCategory(); + + expect(categories).toHaveProperty('General'); + expect(categories).toHaveProperty('Accessibility'); + expect(categories).toHaveProperty('Checkpointing'); + expect(categories).toHaveProperty('File Filtering'); + expect(categories).toHaveProperty('UI'); + expect(categories).toHaveProperty('Mode'); + expect(categories).toHaveProperty('Updates'); + }); + + it('should include key property in grouped settings', () => { + const categories = getSettingsByCategory(); + + Object.entries(categories).forEach(([_category, settings]) => { + settings.forEach((setting) => { + expect(setting.key).toBeDefined(); + }); + }); + }); + }); + + describe('getSettingDefinition', () => { + it('should return definition for valid setting', () => { + const definition = getSettingDefinition('showMemoryUsage'); + expect(definition).toBeDefined(); + expect(definition?.label).toBe('Show Memory Usage'); + }); + + it('should return undefined for invalid setting', () => { + const definition = getSettingDefinition('invalidSetting'); + expect(definition).toBeUndefined(); + }); + }); + + describe('requiresRestart', () => { + it('should return true for settings that require restart', () => { + expect(requiresRestart('autoConfigureMaxOldSpaceSize')).toBe(true); + expect(requiresRestart('checkpointing.enabled')).toBe(true); + }); + + it('should return false for settings that do not require restart', () => { + expect(requiresRestart('showMemoryUsage')).toBe(false); + expect(requiresRestart('hideTips')).toBe(false); + }); + + it('should return false for invalid settings', () => { + expect(requiresRestart('invalidSetting')).toBe(false); + }); + }); + + describe('getDefaultValue', () => { + it('should return correct default values', () => { + expect(getDefaultValue('showMemoryUsage')).toBe(false); + expect(getDefaultValue('fileFiltering.enableRecursiveFileSearch')).toBe( + true, + ); + }); + + it('should return undefined for invalid settings', () => { + expect(getDefaultValue('invalidSetting')).toBeUndefined(); + }); + }); + + describe('getRestartRequiredSettings', () => { + it('should return all settings that require restart', () => { + const restartSettings = getRestartRequiredSettings(); + expect(restartSettings).toContain('autoConfigureMaxOldSpaceSize'); + expect(restartSettings).toContain('checkpointing.enabled'); + expect(restartSettings).not.toContain('showMemoryUsage'); + }); + }); + + describe('getEffectiveValue', () => { + it('should return value from settings when set', () => { + const settings = { showMemoryUsage: true }; + const mergedSettings = { showMemoryUsage: false }; + + const value = getEffectiveValue( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(value).toBe(true); + }); + + it('should return value from merged settings when not set in current scope', () => { + const settings = {}; + const mergedSettings = { showMemoryUsage: true }; + + const value = getEffectiveValue( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(value).toBe(true); + }); + + it('should return default value when not set anywhere', () => { + const settings = {}; + const mergedSettings = {}; + + const value = getEffectiveValue( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(value).toBe(false); // default value + }); + + it('should handle nested settings correctly', () => { + const settings = { + accessibility: { disableLoadingPhrases: true }, + }; + const mergedSettings = { + accessibility: { disableLoadingPhrases: false }, + }; + + const value = getEffectiveValue( + 'accessibility.disableLoadingPhrases', + settings, + mergedSettings, + ); + expect(value).toBe(true); + }); + + it('should return undefined for invalid settings', () => { + const settings = {}; + const mergedSettings = {}; + + const value = getEffectiveValue( + 'invalidSetting', + settings, + mergedSettings, + ); + expect(value).toBeUndefined(); + }); + }); + + describe('getAllSettingKeys', () => { + it('should return all setting keys', () => { + const keys = getAllSettingKeys(); + expect(keys).toContain('showMemoryUsage'); + expect(keys).toContain('accessibility.disableLoadingPhrases'); + expect(keys).toContain('checkpointing.enabled'); + }); + }); + + describe('getSettingsByType', () => { + it('should return only boolean settings', () => { + const booleanSettings = getSettingsByType('boolean'); + expect(booleanSettings.length).toBeGreaterThan(0); + booleanSettings.forEach((setting) => { + expect(setting.type).toBe('boolean'); + }); + }); + }); + + describe('getSettingsRequiringRestart', () => { + it('should return only settings that require restart', () => { + const restartSettings = getSettingsRequiringRestart(); + expect(restartSettings.length).toBeGreaterThan(0); + restartSettings.forEach((setting) => { + expect(setting.requiresRestart).toBe(true); + }); + }); + }); + + describe('isValidSettingKey', () => { + it('should return true for valid setting keys', () => { + expect(isValidSettingKey('showMemoryUsage')).toBe(true); + expect(isValidSettingKey('accessibility.disableLoadingPhrases')).toBe( + true, + ); + }); + + it('should return false for invalid setting keys', () => { + expect(isValidSettingKey('invalidSetting')).toBe(false); + expect(isValidSettingKey('')).toBe(false); + }); + }); + + describe('getSettingCategory', () => { + it('should return correct category for valid settings', () => { + expect(getSettingCategory('showMemoryUsage')).toBe('UI'); + expect(getSettingCategory('accessibility.disableLoadingPhrases')).toBe( + 'Accessibility', + ); + }); + + it('should return undefined for invalid settings', () => { + expect(getSettingCategory('invalidSetting')).toBeUndefined(); + }); + }); + + describe('shouldShowInDialog', () => { + it('should return true for settings marked to show in dialog', () => { + expect(shouldShowInDialog('showMemoryUsage')).toBe(true); + expect(shouldShowInDialog('vimMode')).toBe(true); + expect(shouldShowInDialog('hideWindowTitle')).toBe(true); + expect(shouldShowInDialog('usageStatisticsEnabled')).toBe(false); + }); + + it('should return false for settings marked to hide from dialog', () => { + expect(shouldShowInDialog('selectedAuthType')).toBe(false); + expect(shouldShowInDialog('coreTools')).toBe(false); + expect(shouldShowInDialog('customThemes')).toBe(false); + expect(shouldShowInDialog('theme')).toBe(false); // Changed to false + expect(shouldShowInDialog('preferredEditor')).toBe(false); // Changed to false + }); + + it('should return true for invalid settings (default behavior)', () => { + expect(shouldShowInDialog('invalidSetting')).toBe(true); + }); + }); + + describe('getDialogSettingsByCategory', () => { + it('should only return settings marked for dialog display', async () => { + const categories = getDialogSettingsByCategory(); + + // Should include UI settings that are marked for dialog + expect(categories['UI']).toBeDefined(); + const uiSettings = categories['UI']; + const uiKeys = uiSettings.map((s) => s.key); + expect(uiKeys).toContain('showMemoryUsage'); + expect(uiKeys).toContain('hideWindowTitle'); + expect(uiKeys).not.toContain('customThemes'); // This is marked false + expect(uiKeys).not.toContain('theme'); // This is now marked false + }); + + it('should not include Advanced category settings', () => { + const categories = getDialogSettingsByCategory(); + + // Advanced settings should be filtered out + expect(categories['Advanced']).toBeUndefined(); + }); + + it('should include settings with showInDialog=true', () => { + const categories = getDialogSettingsByCategory(); + + const allSettings = Object.values(categories).flat(); + const allKeys = allSettings.map((s) => s.key); + + expect(allKeys).toContain('vimMode'); + expect(allKeys).toContain('ideMode'); + expect(allKeys).toContain('disableAutoUpdate'); + expect(allKeys).toContain('showMemoryUsage'); + expect(allKeys).not.toContain('usageStatisticsEnabled'); + expect(allKeys).not.toContain('selectedAuthType'); + expect(allKeys).not.toContain('coreTools'); + expect(allKeys).not.toContain('theme'); // Now hidden + expect(allKeys).not.toContain('preferredEditor'); // Now hidden + }); + }); + + describe('getDialogSettingsByType', () => { + it('should return only boolean dialog settings', () => { + const booleanSettings = getDialogSettingsByType('boolean'); + + const keys = booleanSettings.map((s) => s.key); + expect(keys).toContain('showMemoryUsage'); + expect(keys).toContain('vimMode'); + expect(keys).toContain('hideWindowTitle'); + expect(keys).not.toContain('usageStatisticsEnabled'); + expect(keys).not.toContain('selectedAuthType'); // Advanced setting + expect(keys).not.toContain('useExternalAuth'); // Advanced setting + }); + + it('should return only string dialog settings', () => { + const stringSettings = getDialogSettingsByType('string'); + + const keys = stringSettings.map((s) => s.key); + // Note: theme and preferredEditor are now hidden from dialog + expect(keys).not.toContain('theme'); // Now marked false + expect(keys).not.toContain('preferredEditor'); // Now marked false + expect(keys).not.toContain('selectedAuthType'); // Advanced setting + + // Most string settings are now hidden, so let's just check they exclude advanced ones + expect(keys.every((key) => !key.startsWith('tool'))).toBe(true); // No tool-related settings + }); + }); + + describe('getDialogSettingKeys', () => { + it('should return only settings marked for dialog display', () => { + const dialogKeys = getDialogSettingKeys(); + + // Should include settings marked for dialog + expect(dialogKeys).toContain('showMemoryUsage'); + expect(dialogKeys).toContain('vimMode'); + expect(dialogKeys).toContain('hideWindowTitle'); + expect(dialogKeys).not.toContain('usageStatisticsEnabled'); + expect(dialogKeys).toContain('ideMode'); + expect(dialogKeys).toContain('disableAutoUpdate'); + + // Should include nested settings marked for dialog + expect(dialogKeys).toContain('fileFiltering.respectGitIgnore'); + expect(dialogKeys).toContain('fileFiltering.respectGeminiIgnore'); + expect(dialogKeys).toContain('fileFiltering.enableRecursiveFileSearch'); + + // Should NOT include settings marked as hidden + expect(dialogKeys).not.toContain('theme'); // Hidden + expect(dialogKeys).not.toContain('customThemes'); // Hidden + expect(dialogKeys).not.toContain('preferredEditor'); // Hidden + expect(dialogKeys).not.toContain('selectedAuthType'); // Advanced + expect(dialogKeys).not.toContain('coreTools'); // Advanced + expect(dialogKeys).not.toContain('mcpServers'); // Advanced + expect(dialogKeys).not.toContain('telemetry'); // Advanced + }); + + it('should return fewer keys than getAllSettingKeys', () => { + const allKeys = getAllSettingKeys(); + const dialogKeys = getDialogSettingKeys(); + + expect(dialogKeys.length).toBeLessThan(allKeys.length); + expect(dialogKeys.length).toBeGreaterThan(0); + }); + + it('should handle nested settings display correctly', () => { + // Test the specific issue with fileFiltering.respectGitIgnore + const key = 'fileFiltering.respectGitIgnore'; + const initialSettings = {}; + const pendingSettings = {}; + + // Set the nested setting to true + const updatedPendingSettings = setPendingSettingValue( + key, + true, + pendingSettings, + ); + + // Check if the setting exists in pending settings + const existsInPending = settingExistsInScope( + key, + updatedPendingSettings, + ); + expect(existsInPending).toBe(true); + + // Get the value from pending settings + const valueFromPending = getSettingValue( + key, + updatedPendingSettings, + {}, + ); + expect(valueFromPending).toBe(true); + + // Test getDisplayValue should show the pending change + const displayValue = getDisplayValue( + key, + initialSettings, + {}, + new Set(), + updatedPendingSettings, + ); + expect(displayValue).toBe('true'); // Should show true (no * since value matches default) + + // Test that modified settings also show the * indicator + const modifiedSettings = new Set([key]); + const displayValueWithModified = getDisplayValue( + key, + initialSettings, + {}, + modifiedSettings, + {}, + ); + expect(displayValueWithModified).toBe('true*'); // Should show true* because it's in modified settings and default is true + }); + }); + }); + + describe('Business Logic Utilities', () => { + describe('getSettingValue', () => { + it('should return value from settings when set', () => { + const settings = { showMemoryUsage: true }; + const mergedSettings = { showMemoryUsage: false }; + + const value = getSettingValue( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(value).toBe(true); + }); + + it('should return value from merged settings when not set in current scope', () => { + const settings = {}; + const mergedSettings = { showMemoryUsage: true }; + + const value = getSettingValue( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(value).toBe(true); + }); + + it('should return default value for invalid setting', () => { + const settings = {}; + const mergedSettings = {}; + + const value = getSettingValue( + 'invalidSetting', + settings, + mergedSettings, + ); + expect(value).toBe(false); // Default fallback + }); + }); + + describe('isSettingModified', () => { + it('should return true when value differs from default', () => { + expect(isSettingModified('showMemoryUsage', true)).toBe(true); + expect( + isSettingModified('fileFiltering.enableRecursiveFileSearch', false), + ).toBe(true); + }); + + it('should return false when value matches default', () => { + expect(isSettingModified('showMemoryUsage', false)).toBe(false); + expect( + isSettingModified('fileFiltering.enableRecursiveFileSearch', true), + ).toBe(false); + }); + }); + + describe('settingExistsInScope', () => { + it('should return true for top-level settings that exist', () => { + const settings = { showMemoryUsage: true }; + expect(settingExistsInScope('showMemoryUsage', settings)).toBe(true); + }); + + it('should return false for top-level settings that do not exist', () => { + const settings = {}; + expect(settingExistsInScope('showMemoryUsage', settings)).toBe(false); + }); + + it('should return true for nested settings that exist', () => { + const settings = { + accessibility: { disableLoadingPhrases: true }, + }; + expect( + settingExistsInScope('accessibility.disableLoadingPhrases', settings), + ).toBe(true); + }); + + it('should return false for nested settings that do not exist', () => { + const settings = {}; + expect( + settingExistsInScope('accessibility.disableLoadingPhrases', settings), + ).toBe(false); + }); + + it('should return false when parent exists but child does not', () => { + const settings = { accessibility: {} }; + expect( + settingExistsInScope('accessibility.disableLoadingPhrases', settings), + ).toBe(false); + }); + }); + + describe('setPendingSettingValue', () => { + it('should set top-level setting value', () => { + const pendingSettings = {}; + const result = setPendingSettingValue( + 'showMemoryUsage', + true, + pendingSettings, + ); + + expect(result.showMemoryUsage).toBe(true); + }); + + it('should set nested setting value', () => { + const pendingSettings = {}; + const result = setPendingSettingValue( + 'accessibility.disableLoadingPhrases', + true, + pendingSettings, + ); + + expect(result.accessibility?.disableLoadingPhrases).toBe(true); + }); + + it('should preserve existing nested settings', () => { + const pendingSettings = { + accessibility: { disableLoadingPhrases: false }, + }; + const result = setPendingSettingValue( + 'accessibility.disableLoadingPhrases', + true, + pendingSettings, + ); + + expect(result.accessibility?.disableLoadingPhrases).toBe(true); + }); + + it('should not mutate original settings', () => { + const pendingSettings = {}; + setPendingSettingValue('showMemoryUsage', true, pendingSettings); + + expect(pendingSettings).toEqual({}); + }); + }); + + describe('hasRestartRequiredSettings', () => { + it('should return true when modified settings require restart', () => { + const modifiedSettings = new Set([ + 'autoConfigureMaxOldSpaceSize', + 'showMemoryUsage', + ]); + expect(hasRestartRequiredSettings(modifiedSettings)).toBe(true); + }); + + it('should return false when no modified settings require restart', () => { + const modifiedSettings = new Set([ + 'showMemoryUsage', + 'hideTips', + ]); + expect(hasRestartRequiredSettings(modifiedSettings)).toBe(false); + }); + + it('should return false for empty set', () => { + const modifiedSettings = new Set(); + expect(hasRestartRequiredSettings(modifiedSettings)).toBe(false); + }); + }); + + describe('getRestartRequiredFromModified', () => { + it('should return only settings that require restart', () => { + const modifiedSettings = new Set([ + 'autoConfigureMaxOldSpaceSize', + 'showMemoryUsage', + 'checkpointing.enabled', + ]); + const result = getRestartRequiredFromModified(modifiedSettings); + + expect(result).toContain('autoConfigureMaxOldSpaceSize'); + expect(result).toContain('checkpointing.enabled'); + expect(result).not.toContain('showMemoryUsage'); + }); + + it('should return empty array when no settings require restart', () => { + const modifiedSettings = new Set([ + 'showMemoryUsage', + 'hideTips', + ]); + const result = getRestartRequiredFromModified(modifiedSettings); + + expect(result).toEqual([]); + }); + }); + + describe('getDisplayValue', () => { + it('should show value without * when setting matches default', () => { + const settings = { showMemoryUsage: false }; // false matches default, so no * + const mergedSettings = { showMemoryUsage: false }; + const modifiedSettings = new Set(); + + const result = getDisplayValue( + 'showMemoryUsage', + settings, + mergedSettings, + modifiedSettings, + ); + expect(result).toBe('false*'); + }); + + it('should show default value when setting is not in scope', () => { + const settings = {}; // no setting in scope + const mergedSettings = { showMemoryUsage: false }; + const modifiedSettings = new Set(); + + const result = getDisplayValue( + 'showMemoryUsage', + settings, + mergedSettings, + modifiedSettings, + ); + expect(result).toBe('false'); // shows default value + }); + + it('should show value with * when changed from default', () => { + const settings = { showMemoryUsage: true }; // true is different from default (false) + const mergedSettings = { showMemoryUsage: true }; + const modifiedSettings = new Set(); + + const result = getDisplayValue( + 'showMemoryUsage', + settings, + mergedSettings, + modifiedSettings, + ); + expect(result).toBe('true*'); + }); + + it('should show default value without * when setting does not exist in scope', () => { + const settings = {}; // setting doesn't exist in scope, show default + const mergedSettings = { showMemoryUsage: false }; + const modifiedSettings = new Set(); + + const result = getDisplayValue( + 'showMemoryUsage', + settings, + mergedSettings, + modifiedSettings, + ); + expect(result).toBe('false'); // default value (false) without * + }); + + it('should show value with * when user changes from default', () => { + const settings = {}; // setting doesn't exist in scope originally + const mergedSettings = { showMemoryUsage: false }; + const modifiedSettings = new Set(['showMemoryUsage']); + const pendingSettings = { showMemoryUsage: true }; // user changed to true + + const result = getDisplayValue( + 'showMemoryUsage', + settings, + mergedSettings, + modifiedSettings, + pendingSettings, + ); + expect(result).toBe('true*'); // changed from default (false) to true + }); + }); + + describe('isDefaultValue', () => { + it('should return true when setting does not exist in scope', () => { + const settings = {}; // setting doesn't exist + + const result = isDefaultValue('showMemoryUsage', settings); + expect(result).toBe(true); + }); + + it('should return false when setting exists in scope', () => { + const settings = { showMemoryUsage: true }; // setting exists + + const result = isDefaultValue('showMemoryUsage', settings); + expect(result).toBe(false); + }); + + it('should return true when nested setting does not exist in scope', () => { + const settings = {}; // nested setting doesn't exist + + const result = isDefaultValue( + 'accessibility.disableLoadingPhrases', + settings, + ); + expect(result).toBe(true); + }); + + it('should return false when nested setting exists in scope', () => { + const settings = { accessibility: { disableLoadingPhrases: true } }; // nested setting exists + + const result = isDefaultValue( + 'accessibility.disableLoadingPhrases', + settings, + ); + expect(result).toBe(false); + }); + }); + + describe('isValueInherited', () => { + it('should return false for top-level settings that exist in scope', () => { + const settings = { showMemoryUsage: true }; + const mergedSettings = { showMemoryUsage: true }; + + const result = isValueInherited( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(result).toBe(false); + }); + + it('should return true for top-level settings that do not exist in scope', () => { + const settings = {}; + const mergedSettings = { showMemoryUsage: true }; + + const result = isValueInherited( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(result).toBe(true); + }); + + it('should return false for nested settings that exist in scope', () => { + const settings = { + accessibility: { disableLoadingPhrases: true }, + }; + const mergedSettings = { + accessibility: { disableLoadingPhrases: true }, + }; + + const result = isValueInherited( + 'accessibility.disableLoadingPhrases', + settings, + mergedSettings, + ); + expect(result).toBe(false); + }); + + it('should return true for nested settings that do not exist in scope', () => { + const settings = {}; + const mergedSettings = { + accessibility: { disableLoadingPhrases: true }, + }; + + const result = isValueInherited( + 'accessibility.disableLoadingPhrases', + settings, + mergedSettings, + ); + expect(result).toBe(true); + }); + }); + + describe('getEffectiveDisplayValue', () => { + it('should return value from settings when available', () => { + const settings = { showMemoryUsage: true }; + const mergedSettings = { showMemoryUsage: false }; + + const result = getEffectiveDisplayValue( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(result).toBe(true); + }); + + it('should return value from merged settings when not in scope', () => { + const settings = {}; + const mergedSettings = { showMemoryUsage: true }; + + const result = getEffectiveDisplayValue( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(result).toBe(true); + }); + + it('should return default value for undefined values', () => { + const settings = {}; + const mergedSettings = {}; + + const result = getEffectiveDisplayValue( + 'showMemoryUsage', + settings, + mergedSettings, + ); + expect(result).toBe(false); // Default value + }); + }); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/utils/settingsUtils.ts b/projects/ui/qwen-code/packages/cli/src/utils/settingsUtils.ts new file mode 100644 index 0000000000000000000000000000000000000000..6b5a55c14b2137810fad96391f97ed43573e2255 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/settingsUtils.ts @@ -0,0 +1,489 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Settings, SettingScope, LoadedSettings } from '../config/settings.js'; +import { + SETTINGS_SCHEMA, + SettingDefinition, + SettingsSchema, +} from '../config/settingsSchema.js'; + +// The schema is now nested, but many parts of the UI and logic work better +// with a flattened structure and dot-notation keys. This section flattens the +// schema into a map for easier lookups. + +function flattenSchema( + schema: SettingsSchema, + prefix = '', +): Record { + let result: Record = {}; + for (const key in schema) { + const newKey = prefix ? `${prefix}.${key}` : key; + const definition = schema[key]; + result[newKey] = { ...definition, key: newKey }; + if (definition.properties) { + result = { ...result, ...flattenSchema(definition.properties, newKey) }; + } + } + return result; +} + +const FLATTENED_SCHEMA = flattenSchema(SETTINGS_SCHEMA); + +/** + * Get all settings grouped by category + */ +export function getSettingsByCategory(): Record< + string, + Array +> { + const categories: Record< + string, + Array + > = {}; + + Object.values(FLATTENED_SCHEMA).forEach((definition) => { + const category = definition.category; + if (!categories[category]) { + categories[category] = []; + } + categories[category].push(definition); + }); + + return categories; +} + +/** + * Get a setting definition by key + */ +export function getSettingDefinition( + key: string, +): (SettingDefinition & { key: string }) | undefined { + return FLATTENED_SCHEMA[key]; +} + +/** + * Check if a setting requires restart + */ +export function requiresRestart(key: string): boolean { + return FLATTENED_SCHEMA[key]?.requiresRestart ?? false; +} + +/** + * Get the default value for a setting + */ +export function getDefaultValue(key: string): SettingDefinition['default'] { + return FLATTENED_SCHEMA[key]?.default; +} + +/** + * Get all setting keys that require restart + */ +export function getRestartRequiredSettings(): string[] { + return Object.values(FLATTENED_SCHEMA) + .filter((definition) => definition.requiresRestart) + .map((definition) => definition.key); +} + +/** + * Recursively gets a value from a nested object using a key path array. + */ +export function getNestedValue( + obj: Record, + path: string[], +): unknown { + const [first, ...rest] = path; + if (!first || !(first in obj)) { + return undefined; + } + const value = obj[first]; + if (rest.length === 0) { + return value; + } + if (value && typeof value === 'object' && value !== null) { + return getNestedValue(value as Record, rest); + } + return undefined; +} + +/** + * Get the effective value for a setting, considering inheritance from higher scopes + * Always returns a value (never undefined) - falls back to default if not set anywhere + */ +export function getEffectiveValue( + key: string, + settings: Settings, + mergedSettings: Settings, +): SettingDefinition['default'] { + const definition = getSettingDefinition(key); + if (!definition) { + return undefined; + } + + const path = key.split('.'); + + // Check the current scope's settings first + let value = getNestedValue(settings as Record, path); + if (value !== undefined) { + return value as SettingDefinition['default']; + } + + // Check the merged settings for an inherited value + value = getNestedValue(mergedSettings as Record, path); + if (value !== undefined) { + return value as SettingDefinition['default']; + } + + // Return default value if no value is set anywhere + return definition.default; +} + +/** + * Get all setting keys from the schema + */ +export function getAllSettingKeys(): string[] { + return Object.keys(FLATTENED_SCHEMA); +} + +/** + * Get settings by type + */ +export function getSettingsByType( + type: SettingDefinition['type'], +): Array { + return Object.values(FLATTENED_SCHEMA).filter( + (definition) => definition.type === type, + ); +} + +/** + * Get settings that require restart + */ +export function getSettingsRequiringRestart(): Array< + SettingDefinition & { + key: string; + } +> { + return Object.values(FLATTENED_SCHEMA).filter( + (definition) => definition.requiresRestart, + ); +} + +/** + * Validate if a setting key exists in the schema + */ +export function isValidSettingKey(key: string): boolean { + return key in FLATTENED_SCHEMA; +} + +/** + * Get the category for a setting + */ +export function getSettingCategory(key: string): string | undefined { + return FLATTENED_SCHEMA[key]?.category; +} + +/** + * Check if a setting should be shown in the settings dialog + */ +export function shouldShowInDialog(key: string): boolean { + return FLATTENED_SCHEMA[key]?.showInDialog ?? true; // Default to true for backward compatibility +} + +/** + * Get all settings that should be shown in the dialog, grouped by category + */ +export function getDialogSettingsByCategory(): Record< + string, + Array +> { + const categories: Record< + string, + Array + > = {}; + + Object.values(FLATTENED_SCHEMA) + .filter((definition) => definition.showInDialog !== false) + .forEach((definition) => { + const category = definition.category; + if (!categories[category]) { + categories[category] = []; + } + categories[category].push(definition); + }); + + return categories; +} + +/** + * Get settings by type that should be shown in the dialog + */ +export function getDialogSettingsByType( + type: SettingDefinition['type'], +): Array { + return Object.values(FLATTENED_SCHEMA).filter( + (definition) => + definition.type === type && definition.showInDialog !== false, + ); +} + +/** + * Get all setting keys that should be shown in the dialog + */ +export function getDialogSettingKeys(): string[] { + return Object.values(FLATTENED_SCHEMA) + .filter((definition) => definition.showInDialog !== false) + .map((definition) => definition.key); +} + +// ============================================================================ +// BUSINESS LOGIC UTILITIES (Higher-level utilities for setting operations) +// ============================================================================ + +/** + * Get the current value for a setting in a specific scope + * Always returns a value (never undefined) - falls back to default if not set anywhere + */ +export function getSettingValue( + key: string, + settings: Settings, + mergedSettings: Settings, +): boolean { + const definition = getSettingDefinition(key); + if (!definition) { + return false; // Default fallback for invalid settings + } + + const value = getEffectiveValue(key, settings, mergedSettings); + // Ensure we return a boolean value, converting from the more general type + if (typeof value === 'boolean') { + return value; + } + // Fall back to default value, ensuring it's a boolean + const defaultValue = definition.default; + if (typeof defaultValue === 'boolean') { + return defaultValue; + } + return false; // Final fallback +} + +/** + * Check if a setting value is modified from its default + */ +export function isSettingModified(key: string, value: boolean): boolean { + const defaultValue = getDefaultValue(key); + // Handle type comparison properly + if (typeof defaultValue === 'boolean') { + return value !== defaultValue; + } + // If default is not a boolean, consider it modified if value is true + return value === true; +} + +/** + * Check if a setting exists in the original settings file for a scope + */ +export function settingExistsInScope( + key: string, + scopeSettings: Settings, +): boolean { + const path = key.split('.'); + const value = getNestedValue(scopeSettings as Record, path); + return value !== undefined; +} + +/** + * Recursively sets a value in a nested object using a key path array. + */ +function setNestedValue( + obj: Record, + path: string[], + value: unknown, +): Record { + const [first, ...rest] = path; + if (!first) { + return obj; + } + + if (rest.length === 0) { + obj[first] = value; + return obj; + } + + if (!obj[first] || typeof obj[first] !== 'object') { + obj[first] = {}; + } + + setNestedValue(obj[first] as Record, rest, value); + return obj; +} + +/** + * Set a setting value in the pending settings + */ +export function setPendingSettingValue( + key: string, + value: boolean, + pendingSettings: Settings, +): Settings { + const path = key.split('.'); + const newSettings = JSON.parse(JSON.stringify(pendingSettings)); + setNestedValue(newSettings, path, value); + return newSettings; +} + +/** + * Generic setter: Set a setting value (boolean, number, string, etc.) in the pending settings + */ +export function setPendingSettingValueAny( + key: string, + value: unknown, + pendingSettings: Settings, +): Settings { + const path = key.split('.'); + const newSettings = structuredClone(pendingSettings); + setNestedValue(newSettings, path, value); + return newSettings; +} + +/** + * Check if any modified settings require a restart + */ +export function hasRestartRequiredSettings( + modifiedSettings: Set, +): boolean { + return Array.from(modifiedSettings).some((key) => requiresRestart(key)); +} + +/** + * Get the restart required settings from a set of modified settings + */ +export function getRestartRequiredFromModified( + modifiedSettings: Set, +): string[] { + return Array.from(modifiedSettings).filter((key) => requiresRestart(key)); +} + +/** + * Save modified settings to the appropriate scope + */ +export function saveModifiedSettings( + modifiedSettings: Set, + pendingSettings: Settings, + loadedSettings: LoadedSettings, + scope: SettingScope, +): void { + modifiedSettings.forEach((settingKey) => { + const path = settingKey.split('.'); + const value = getNestedValue( + pendingSettings as Record, + path, + ); + + if (value === undefined) { + return; + } + + const existsInOriginalFile = settingExistsInScope( + settingKey, + loadedSettings.forScope(scope).settings, + ); + + const isDefaultValue = value === getDefaultValue(settingKey); + + if (existsInOriginalFile || !isDefaultValue) { + // This is tricky because setValue only works on top-level keys. + // We need to set the whole parent object. + const [parentKey] = path; + if (parentKey) { + const newParentValue = setPendingSettingValueAny( + settingKey, + value, + loadedSettings.forScope(scope).settings, + )[parentKey as keyof Settings]; + + loadedSettings.setValue( + scope, + parentKey as keyof Settings, + newParentValue, + ); + } + } + }); +} + +/** + * Get the display value for a setting, showing current scope value with default change indicator + */ +export function getDisplayValue( + key: string, + settings: Settings, + _mergedSettings: Settings, + modifiedSettings: Set, + pendingSettings?: Settings, +): string { + // Prioritize pending changes if user has modified this setting + let value: boolean; + if (pendingSettings && settingExistsInScope(key, pendingSettings)) { + // Show the value from the pending (unsaved) edits when it exists + value = getSettingValue(key, pendingSettings, {}); + } else if (settingExistsInScope(key, settings)) { + // Show the value defined at the current scope if present + value = getSettingValue(key, settings, {}); + } else { + // Fall back to the schema default when the key is unset in this scope + const defaultValue = getDefaultValue(key); + value = typeof defaultValue === 'boolean' ? defaultValue : false; + } + + const valueString = String(value); + + // Check if value is different from default OR if it's in modified settings OR if there are pending changes + const defaultValue = getDefaultValue(key); + const isChangedFromDefault = + typeof defaultValue === 'boolean' ? value !== defaultValue : value === true; + const isInModifiedSettings = modifiedSettings.has(key); + + // Mark as modified if setting exists in current scope OR is in modified settings + if (settingExistsInScope(key, settings) || isInModifiedSettings) { + return `${valueString}*`; // * indicates setting is set in current scope + } + if (isChangedFromDefault || isInModifiedSettings) { + return `${valueString}*`; // * indicates changed from default value + } + + return valueString; +} + +/** + * Check if a setting doesn't exist in current scope (should be greyed out) + */ +export function isDefaultValue(key: string, settings: Settings): boolean { + return !settingExistsInScope(key, settings); +} + +/** + * Check if a setting value is inherited (not set at current scope) + */ +export function isValueInherited( + key: string, + settings: Settings, + _mergedSettings: Settings, +): boolean { + return !settingExistsInScope(key, settings); +} + +/** + * Get the effective value for display, considering inheritance + * Always returns a boolean value (never undefined) + */ +export function getEffectiveDisplayValue( + key: string, + settings: Settings, + mergedSettings: Settings, +): boolean { + return getSettingValue(key, settings, mergedSettings); +} diff --git a/projects/ui/qwen-code/packages/cli/src/utils/spawnWrapper.ts b/projects/ui/qwen-code/packages/cli/src/utils/spawnWrapper.ts new file mode 100644 index 0000000000000000000000000000000000000000..3f3cca944b752f8bcbc36bc93bb05138fffd8ae7 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/spawnWrapper.ts @@ -0,0 +1,9 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { spawn } from 'child_process'; + +export const spawnWrapper = spawn; diff --git a/projects/ui/qwen-code/packages/cli/src/utils/startupWarnings.test.ts b/projects/ui/qwen-code/packages/cli/src/utils/startupWarnings.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..a22f84fe1be2502109b24b6ebb2e731774cd0cb6 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/startupWarnings.test.ts @@ -0,0 +1,75 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { getStartupWarnings } from './startupWarnings.js'; +import * as fs from 'fs/promises'; +import { getErrorMessage } from '@qwen-code/qwen-code-core'; + +vi.mock('fs/promises'); +vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + getErrorMessage: vi.fn(), + }; +}); + +describe.skip('startupWarnings', () => { + beforeEach(() => { + vi.resetAllMocks(); + }); + + it('should return warnings from the file and delete it', async () => { + const mockWarnings = 'Warning 1\nWarning 2'; + vi.spyOn(fs, 'access').mockResolvedValue(); + vi.spyOn(fs, 'readFile').mockResolvedValue(mockWarnings); + vi.spyOn(fs, 'unlink').mockResolvedValue(); + + const warnings = await getStartupWarnings(); + + expect(fs.access).toHaveBeenCalled(); + expect(fs.readFile).toHaveBeenCalled(); + expect(fs.unlink).toHaveBeenCalled(); + expect(warnings).toEqual(['Warning 1', 'Warning 2']); + }); + + it('should return an empty array if the file does not exist', async () => { + const error = new Error('File not found'); + (error as Error & { code: string }).code = 'ENOENT'; + vi.spyOn(fs, 'access').mockRejectedValue(error); + + const warnings = await getStartupWarnings(); + + expect(warnings).toEqual([]); + }); + + it('should return an error message if reading the file fails', async () => { + const error = new Error('Permission denied'); + vi.spyOn(fs, 'access').mockRejectedValue(error); + vi.mocked(getErrorMessage).mockReturnValue('Permission denied'); + + const warnings = await getStartupWarnings(); + + expect(warnings).toEqual([ + 'Error checking/reading warnings file: Permission denied', + ]); + }); + + it('should return a warning if deleting the file fails', async () => { + const mockWarnings = 'Warning 1'; + vi.spyOn(fs, 'access').mockResolvedValue(); + vi.spyOn(fs, 'readFile').mockResolvedValue(mockWarnings); + vi.spyOn(fs, 'unlink').mockRejectedValue(new Error('Permission denied')); + + const warnings = await getStartupWarnings(); + + expect(warnings).toEqual([ + 'Warning 1', + 'Warning: Could not delete temporary warnings file.', + ]); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/utils/startupWarnings.ts b/projects/ui/qwen-code/packages/cli/src/utils/startupWarnings.ts new file mode 100644 index 0000000000000000000000000000000000000000..89b55bc3a0bdc782999b75a6cc813df0e70bc7b1 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/startupWarnings.ts @@ -0,0 +1,40 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import fs from 'fs/promises'; +import os from 'os'; +import { join as pathJoin } from 'node:path'; +import { getErrorMessage } from '@qwen-code/qwen-code-core'; + +const warningsFilePath = pathJoin(os.tmpdir(), 'qwen-code-warnings.txt'); + +export async function getStartupWarnings(): Promise { + try { + await fs.access(warningsFilePath); // Check if file exists + const warningsContent = await fs.readFile(warningsFilePath, 'utf-8'); + const warnings = warningsContent + .split('\n') + .filter((line) => line.trim() !== ''); + try { + await fs.unlink(warningsFilePath); + } catch { + warnings.push('Warning: Could not delete temporary warnings file.'); + } + return warnings; + } catch (err: unknown) { + // If fs.access throws, it means the file doesn't exist or is not accessible. + // This is not an error in the context of fetching warnings, so return empty. + // Only return an error message if it's not a "file not found" type error. + // However, the original logic returned an error message for any fs.existsSync failure. + // To maintain closer parity while making it async, we'll check the error code. + // ENOENT is "Error NO ENTry" (file not found). + if (err instanceof Error && 'code' in err && err.code === 'ENOENT') { + return []; // File not found, no warnings to return. + } + // For other errors (permissions, etc.), return the error message. + return [`Error checking/reading warnings file: ${getErrorMessage(err)}`]; + } +} diff --git a/projects/ui/qwen-code/packages/cli/src/utils/updateEventEmitter.ts b/projects/ui/qwen-code/packages/cli/src/utils/updateEventEmitter.ts new file mode 100644 index 0000000000000000000000000000000000000000..a60ef039759da59d1c4a601dce77445b7386d033 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/updateEventEmitter.ts @@ -0,0 +1,13 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { EventEmitter } from 'events'; + +/** + * A shared event emitter for application-wide communication + * between decoupled parts of the CLI. + */ +export const updateEventEmitter = new EventEmitter(); diff --git a/projects/ui/qwen-code/packages/cli/src/utils/userStartupWarnings.test.ts b/projects/ui/qwen-code/packages/cli/src/utils/userStartupWarnings.test.ts new file mode 100644 index 0000000000000000000000000000000000000000..6d9b89899dee9bae36cf013c6b855eb72e6cbd5d --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/userStartupWarnings.test.ts @@ -0,0 +1,87 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { getUserStartupWarnings } from './userStartupWarnings.js'; +import * as os from 'os'; +import fs from 'fs/promises'; +import path from 'path'; + +// Mock os.homedir to control the home directory in tests +vi.mock('os', async (importOriginal) => { + const actualOs = await importOriginal(); + return { + ...actualOs, + homedir: vi.fn(), + }; +}); + +describe('getUserStartupWarnings', () => { + let testRootDir: string; + let homeDir: string; + + beforeEach(async () => { + testRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'warnings-test-')); + homeDir = path.join(testRootDir, 'home'); + await fs.mkdir(homeDir, { recursive: true }); + vi.mocked(os.homedir).mockReturnValue(homeDir); + }); + + afterEach(async () => { + await fs.rm(testRootDir, { recursive: true, force: true }); + vi.clearAllMocks(); + }); + + describe('home directory check', () => { + it('should return a warning when running in home directory', async () => { + const warnings = await getUserStartupWarnings(homeDir); + expect(warnings).toContainEqual( + expect.stringContaining('home directory'), + ); + }); + + it('should not return a warning when running in a project directory', async () => { + const projectDir = path.join(testRootDir, 'project'); + await fs.mkdir(projectDir); + const warnings = await getUserStartupWarnings(projectDir); + expect(warnings).not.toContainEqual( + expect.stringContaining('home directory'), + ); + }); + }); + + describe('root directory check', () => { + it('should return a warning when running in a root directory', async () => { + const rootDir = path.parse(testRootDir).root; + const warnings = await getUserStartupWarnings(rootDir); + expect(warnings).toContainEqual( + expect.stringContaining('root directory'), + ); + expect(warnings).toContainEqual( + expect.stringContaining('folder structure will be used'), + ); + }); + + it('should not return a warning when running in a non-root directory', async () => { + const projectDir = path.join(testRootDir, 'project'); + await fs.mkdir(projectDir); + const warnings = await getUserStartupWarnings(projectDir); + expect(warnings).not.toContainEqual( + expect.stringContaining('root directory'), + ); + }); + }); + + describe('error handling', () => { + it('should handle errors when checking directory', async () => { + const nonExistentPath = path.join(testRootDir, 'non-existent'); + const warnings = await getUserStartupWarnings(nonExistentPath); + const expectedWarning = + 'Could not verify the current directory due to a file system error.'; + expect(warnings).toEqual([expectedWarning, expectedWarning]); + }); + }); +}); diff --git a/projects/ui/qwen-code/packages/cli/src/utils/userStartupWarnings.ts b/projects/ui/qwen-code/packages/cli/src/utils/userStartupWarnings.ts new file mode 100644 index 0000000000000000000000000000000000000000..20a3534b6ef5e7abb014972a195ba62a486afa26 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/userStartupWarnings.ts @@ -0,0 +1,69 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import fs from 'fs/promises'; +import * as os from 'os'; +import path from 'path'; + +type WarningCheck = { + id: string; + check: (workspaceRoot: string) => Promise; +}; + +// Individual warning checks +const homeDirectoryCheck: WarningCheck = { + id: 'home-directory', + check: async (workspaceRoot: string) => { + try { + const [workspaceRealPath, homeRealPath] = await Promise.all([ + fs.realpath(workspaceRoot), + fs.realpath(os.homedir()), + ]); + + if (workspaceRealPath === homeRealPath) { + return 'You are running Qwen Code in your home directory. It is recommended to run in a project-specific directory.'; + } + return null; + } catch (_err: unknown) { + return 'Could not verify the current directory due to a file system error.'; + } + }, +}; + +const rootDirectoryCheck: WarningCheck = { + id: 'root-directory', + check: async (workspaceRoot: string) => { + try { + const workspaceRealPath = await fs.realpath(workspaceRoot); + const errorMessage = + 'Warning: You are running Qwen Code in the root directory. Your entire folder structure will be used for context. It is strongly recommended to run in a project-specific directory.'; + + // Check for Unix root directory + if (path.dirname(workspaceRealPath) === workspaceRealPath) { + return errorMessage; + } + + return null; + } catch (_err: unknown) { + return 'Could not verify the current directory due to a file system error.'; + } + }, +}; + +// All warning checks +const WARNING_CHECKS: readonly WarningCheck[] = [ + homeDirectoryCheck, + rootDirectoryCheck, +]; + +export async function getUserStartupWarnings( + workspaceRoot: string, +): Promise { + const results = await Promise.all( + WARNING_CHECKS.map((check) => check.check(workspaceRoot)), + ); + return results.filter((msg) => msg !== null); +} diff --git a/projects/ui/qwen-code/packages/cli/src/utils/version.ts b/projects/ui/qwen-code/packages/cli/src/utils/version.ts new file mode 100644 index 0000000000000000000000000000000000000000..0318f380beaf956239863c9cbfd1a629157b0516 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/utils/version.ts @@ -0,0 +1,12 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { getPackageJson } from './package.js'; + +export async function getCliVersion(): Promise { + const pkgJson = await getPackageJson(); + return process.env['CLI_VERSION'] || pkgJson?.version || 'unknown'; +} diff --git a/projects/ui/qwen-code/packages/cli/src/zed-integration/acp.ts b/projects/ui/qwen-code/packages/cli/src/zed-integration/acp.ts new file mode 100644 index 0000000000000000000000000000000000000000..eef4e1ee2863e117223eb719549e63a0073551f8 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/zed-integration/acp.ts @@ -0,0 +1,366 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/* ACP defines a schema for a simple (experimental) JSON-RPC protocol that allows GUI applications to interact with agents. */ + +import { z } from 'zod'; +import * as schema from './schema.js'; +export * from './schema.js'; + +import { WritableStream, ReadableStream } from 'node:stream/web'; + +export class AgentSideConnection implements Client { + #connection: Connection; + + constructor( + toAgent: (conn: Client) => Agent, + input: WritableStream, + output: ReadableStream, + ) { + const agent = toAgent(this); + + const handler = async ( + method: string, + params: unknown, + ): Promise => { + switch (method) { + case schema.AGENT_METHODS.initialize: { + const validatedParams = schema.initializeRequestSchema.parse(params); + return agent.initialize(validatedParams); + } + case schema.AGENT_METHODS.session_new: { + const validatedParams = schema.newSessionRequestSchema.parse(params); + return agent.newSession(validatedParams); + } + case schema.AGENT_METHODS.session_load: { + if (!agent.loadSession) { + throw RequestError.methodNotFound(); + } + const validatedParams = schema.loadSessionRequestSchema.parse(params); + return agent.loadSession(validatedParams); + } + case schema.AGENT_METHODS.authenticate: { + const validatedParams = + schema.authenticateRequestSchema.parse(params); + return agent.authenticate(validatedParams); + } + case schema.AGENT_METHODS.session_prompt: { + const validatedParams = schema.promptRequestSchema.parse(params); + return agent.prompt(validatedParams); + } + case schema.AGENT_METHODS.session_cancel: { + const validatedParams = schema.cancelNotificationSchema.parse(params); + return agent.cancel(validatedParams); + } + default: + throw RequestError.methodNotFound(method); + } + }; + + this.#connection = new Connection(handler, input, output); + } + + /** + * Streams new content to the client including text, tool calls, etc. + */ + async sessionUpdate(params: schema.SessionNotification): Promise { + return await this.#connection.sendNotification( + schema.CLIENT_METHODS.session_update, + params, + ); + } + + /** + * Request permission before running a tool + * + * The agent specifies a series of permission options with different granularity, + * and the client returns the chosen one. + */ + async requestPermission( + params: schema.RequestPermissionRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.session_request_permission, + params, + ); + } + + async readTextFile( + params: schema.ReadTextFileRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.fs_read_text_file, + params, + ); + } + + async writeTextFile( + params: schema.WriteTextFileRequest, + ): Promise { + return await this.#connection.sendRequest( + schema.CLIENT_METHODS.fs_write_text_file, + params, + ); + } +} + +type AnyMessage = AnyRequest | AnyResponse | AnyNotification; + +type AnyRequest = { + jsonrpc: '2.0'; + id: string | number; + method: string; + params?: unknown; +}; + +type AnyResponse = { + jsonrpc: '2.0'; + id: string | number; +} & Result; + +type AnyNotification = { + jsonrpc: '2.0'; + method: string; + params?: unknown; +}; + +type Result = + | { + result: T; + } + | { + error: ErrorResponse; + }; + +type ErrorResponse = { + code: number; + message: string; + data?: unknown; +}; + +type PendingResponse = { + resolve: (response: unknown) => void; + reject: (error: ErrorResponse) => void; +}; + +type MethodHandler = (method: string, params: unknown) => Promise; + +class Connection { + #pendingResponses: Map = new Map(); + #nextRequestId: number = 0; + #handler: MethodHandler; + #peerInput: WritableStream; + #writeQueue: Promise = Promise.resolve(); + #textEncoder: TextEncoder; + + constructor( + handler: MethodHandler, + peerInput: WritableStream, + peerOutput: ReadableStream, + ) { + this.#handler = handler; + this.#peerInput = peerInput; + this.#textEncoder = new TextEncoder(); + this.#receive(peerOutput); + } + + async #receive(output: ReadableStream) { + let content = ''; + const decoder = new TextDecoder(); + for await (const chunk of output) { + content += decoder.decode(chunk, { stream: true }); + const lines = content.split('\n'); + content = lines.pop() || ''; + + for (const line of lines) { + const trimmedLine = line.trim(); + + if (trimmedLine) { + const message = JSON.parse(trimmedLine); + this.#processMessage(message); + } + } + } + } + + async #processMessage(message: AnyMessage) { + if ('method' in message && 'id' in message) { + // It's a request + const response = await this.#tryCallHandler( + message.method, + message.params, + ); + + await this.#sendMessage({ + jsonrpc: '2.0', + id: message.id, + ...response, + }); + } else if ('method' in message) { + // It's a notification + await this.#tryCallHandler(message.method, message.params); + } else if ('id' in message) { + // It's a response + this.#handleResponse(message as AnyResponse); + } + } + + async #tryCallHandler( + method: string, + params?: unknown, + ): Promise> { + try { + const result = await this.#handler(method, params); + return { result: result ?? null }; + } catch (error: unknown) { + if (error instanceof RequestError) { + return error.toResult(); + } + + if (error instanceof z.ZodError) { + return RequestError.invalidParams( + JSON.stringify(error.format(), undefined, 2), + ).toResult(); + } + + let details; + + if (error instanceof Error) { + details = error.message; + } else if ( + typeof error === 'object' && + error != null && + 'message' in error && + typeof error.message === 'string' + ) { + details = error.message; + } + + return RequestError.internalError(details).toResult(); + } + } + + #handleResponse(response: AnyResponse) { + const pendingResponse = this.#pendingResponses.get(response.id); + if (pendingResponse) { + if ('result' in response) { + pendingResponse.resolve(response.result); + } else if ('error' in response) { + pendingResponse.reject(response.error); + } + this.#pendingResponses.delete(response.id); + } + } + + async sendRequest(method: string, params?: Req): Promise { + const id = this.#nextRequestId++; + const responsePromise = new Promise((resolve, reject) => { + this.#pendingResponses.set(id, { resolve, reject }); + }); + await this.#sendMessage({ jsonrpc: '2.0', id, method, params }); + return responsePromise as Promise; + } + + async sendNotification(method: string, params?: N): Promise { + await this.#sendMessage({ jsonrpc: '2.0', method, params }); + } + + async #sendMessage(json: AnyMessage) { + const content = JSON.stringify(json) + '\n'; + this.#writeQueue = this.#writeQueue + .then(async () => { + const writer = this.#peerInput.getWriter(); + try { + await writer.write(this.#textEncoder.encode(content)); + } finally { + writer.releaseLock(); + } + }) + .catch((error) => { + // Continue processing writes on error + console.error('ACP write error:', error); + }); + return this.#writeQueue; + } +} + +export class RequestError extends Error { + data?: { details?: string }; + + constructor( + public code: number, + message: string, + details?: string, + ) { + super(message); + this.name = 'RequestError'; + if (details) { + this.data = { details }; + } + } + + static parseError(details?: string): RequestError { + return new RequestError(-32700, 'Parse error', details); + } + + static invalidRequest(details?: string): RequestError { + return new RequestError(-32600, 'Invalid request', details); + } + + static methodNotFound(details?: string): RequestError { + return new RequestError(-32601, 'Method not found', details); + } + + static invalidParams(details?: string): RequestError { + return new RequestError(-32602, 'Invalid params', details); + } + + static internalError(details?: string): RequestError { + return new RequestError(-32603, 'Internal error', details); + } + + static authRequired(details?: string): RequestError { + return new RequestError(-32000, 'Authentication required', details); + } + + toResult(): Result { + return { + error: { + code: this.code, + message: this.message, + data: this.data, + }, + }; + } +} + +export interface Client { + requestPermission( + params: schema.RequestPermissionRequest, + ): Promise; + sessionUpdate(params: schema.SessionNotification): Promise; + writeTextFile( + params: schema.WriteTextFileRequest, + ): Promise; + readTextFile( + params: schema.ReadTextFileRequest, + ): Promise; +} + +export interface Agent { + initialize( + params: schema.InitializeRequest, + ): Promise; + newSession( + params: schema.NewSessionRequest, + ): Promise; + loadSession?( + params: schema.LoadSessionRequest, + ): Promise; + authenticate(params: schema.AuthenticateRequest): Promise; + prompt(params: schema.PromptRequest): Promise; + cancel(params: schema.CancelNotification): Promise; +} diff --git a/projects/ui/qwen-code/packages/cli/src/zed-integration/fileSystemService.ts b/projects/ui/qwen-code/packages/cli/src/zed-integration/fileSystemService.ts new file mode 100644 index 0000000000000000000000000000000000000000..1cdc6a34f27150e5fd8b1697df2cf4d0baf5d979 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/zed-integration/fileSystemService.ts @@ -0,0 +1,47 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { FileSystemService } from '@qwen-code/qwen-code-core'; +import * as acp from './acp.js'; + +/** + * ACP client-based implementation of FileSystemService + */ +export class AcpFileSystemService implements FileSystemService { + constructor( + private readonly client: acp.Client, + private readonly sessionId: string, + private readonly capabilities: acp.FileSystemCapability, + private readonly fallback: FileSystemService, + ) {} + + async readTextFile(filePath: string): Promise { + if (!this.capabilities.readTextFile) { + return this.fallback.readTextFile(filePath); + } + + const response = await this.client.readTextFile({ + path: filePath, + sessionId: this.sessionId, + line: null, + limit: null, + }); + + return response.content; + } + + async writeTextFile(filePath: string, content: string): Promise { + if (!this.capabilities.writeTextFile) { + return this.fallback.writeTextFile(filePath, content); + } + + await this.client.writeTextFile({ + path: filePath, + content, + sessionId: this.sessionId, + }); + } +} diff --git a/projects/ui/qwen-code/packages/cli/src/zed-integration/schema.ts b/projects/ui/qwen-code/packages/cli/src/zed-integration/schema.ts new file mode 100644 index 0000000000000000000000000000000000000000..b35cc47d5cb98293cf7d1520d893be77250d7fd3 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/zed-integration/schema.ts @@ -0,0 +1,466 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { z } from 'zod'; + +export const AGENT_METHODS = { + authenticate: 'authenticate', + initialize: 'initialize', + session_cancel: 'session/cancel', + session_load: 'session/load', + session_new: 'session/new', + session_prompt: 'session/prompt', +}; + +export const CLIENT_METHODS = { + fs_read_text_file: 'fs/read_text_file', + fs_write_text_file: 'fs/write_text_file', + session_request_permission: 'session/request_permission', + session_update: 'session/update', +}; + +export const PROTOCOL_VERSION = 1; + +export type WriteTextFileRequest = z.infer; + +export type ReadTextFileRequest = z.infer; + +export type PermissionOptionKind = z.infer; + +export type Role = z.infer; + +export type TextResourceContents = z.infer; + +export type BlobResourceContents = z.infer; + +export type ToolKind = z.infer; + +export type ToolCallStatus = z.infer; + +export type WriteTextFileResponse = z.infer; + +export type ReadTextFileResponse = z.infer; + +export type RequestPermissionOutcome = z.infer< + typeof requestPermissionOutcomeSchema +>; + +export type CancelNotification = z.infer; + +export type AuthenticateRequest = z.infer; + +export type AuthenticateResponse = z.infer; + +export type NewSessionResponse = z.infer; + +export type LoadSessionResponse = z.infer; + +export type StopReason = z.infer; + +export type PromptResponse = z.infer; + +export type ToolCallLocation = z.infer; + +export type PlanEntry = z.infer; + +export type PermissionOption = z.infer; + +export type Annotations = z.infer; + +export type RequestPermissionResponse = z.infer< + typeof requestPermissionResponseSchema +>; + +export type FileSystemCapability = z.infer; + +export type EnvVariable = z.infer; + +export type McpServer = z.infer; + +export type AgentCapabilities = z.infer; + +export type AuthMethod = z.infer; + +export type PromptCapabilities = z.infer; + +export type ClientResponse = z.infer; + +export type ClientNotification = z.infer; + +export type EmbeddedResourceResource = z.infer< + typeof embeddedResourceResourceSchema +>; + +export type NewSessionRequest = z.infer; + +export type LoadSessionRequest = z.infer; + +export type InitializeResponse = z.infer; + +export type ContentBlock = z.infer; + +export type ToolCallContent = z.infer; + +export type ToolCall = z.infer; + +export type ClientCapabilities = z.infer; + +export type PromptRequest = z.infer; + +export type SessionUpdate = z.infer; + +export type AgentResponse = z.infer; + +export type RequestPermissionRequest = z.infer< + typeof requestPermissionRequestSchema +>; + +export type InitializeRequest = z.infer; + +export type SessionNotification = z.infer; + +export type ClientRequest = z.infer; + +export type AgentRequest = z.infer; + +export type AgentNotification = z.infer; + +export const writeTextFileRequestSchema = z.object({ + content: z.string(), + path: z.string(), + sessionId: z.string(), +}); + +export const readTextFileRequestSchema = z.object({ + limit: z.number().optional().nullable(), + line: z.number().optional().nullable(), + path: z.string(), + sessionId: z.string(), +}); + +export const permissionOptionKindSchema = z.union([ + z.literal('allow_once'), + z.literal('allow_always'), + z.literal('reject_once'), + z.literal('reject_always'), +]); + +export const roleSchema = z.union([z.literal('assistant'), z.literal('user')]); + +export const textResourceContentsSchema = z.object({ + mimeType: z.string().optional().nullable(), + text: z.string(), + uri: z.string(), +}); + +export const blobResourceContentsSchema = z.object({ + blob: z.string(), + mimeType: z.string().optional().nullable(), + uri: z.string(), +}); + +export const toolKindSchema = z.union([ + z.literal('read'), + z.literal('edit'), + z.literal('delete'), + z.literal('move'), + z.literal('search'), + z.literal('execute'), + z.literal('think'), + z.literal('fetch'), + z.literal('other'), +]); + +export const toolCallStatusSchema = z.union([ + z.literal('pending'), + z.literal('in_progress'), + z.literal('completed'), + z.literal('failed'), +]); + +export const writeTextFileResponseSchema = z.null(); + +export const readTextFileResponseSchema = z.object({ + content: z.string(), +}); + +export const requestPermissionOutcomeSchema = z.union([ + z.object({ + outcome: z.literal('cancelled'), + }), + z.object({ + optionId: z.string(), + outcome: z.literal('selected'), + }), +]); + +export const cancelNotificationSchema = z.object({ + sessionId: z.string(), +}); + +export const authenticateRequestSchema = z.object({ + methodId: z.string(), +}); + +export const authenticateResponseSchema = z.null(); + +export const newSessionResponseSchema = z.object({ + sessionId: z.string(), +}); + +export const loadSessionResponseSchema = z.null(); + +export const stopReasonSchema = z.union([ + z.literal('end_turn'), + z.literal('max_tokens'), + z.literal('refusal'), + z.literal('cancelled'), +]); + +export const promptResponseSchema = z.object({ + stopReason: stopReasonSchema, +}); + +export const toolCallLocationSchema = z.object({ + line: z.number().optional().nullable(), + path: z.string(), +}); + +export const planEntrySchema = z.object({ + content: z.string(), + priority: z.union([z.literal('high'), z.literal('medium'), z.literal('low')]), + status: z.union([ + z.literal('pending'), + z.literal('in_progress'), + z.literal('completed'), + ]), +}); + +export const permissionOptionSchema = z.object({ + kind: permissionOptionKindSchema, + name: z.string(), + optionId: z.string(), +}); + +export const annotationsSchema = z.object({ + audience: z.array(roleSchema).optional().nullable(), + lastModified: z.string().optional().nullable(), + priority: z.number().optional().nullable(), +}); + +export const requestPermissionResponseSchema = z.object({ + outcome: requestPermissionOutcomeSchema, +}); + +export const fileSystemCapabilitySchema = z.object({ + readTextFile: z.boolean(), + writeTextFile: z.boolean(), +}); + +export const envVariableSchema = z.object({ + name: z.string(), + value: z.string(), +}); + +export const mcpServerSchema = z.object({ + args: z.array(z.string()), + command: z.string(), + env: z.array(envVariableSchema), + name: z.string(), +}); + +export const promptCapabilitiesSchema = z.object({ + audio: z.boolean().optional(), + embeddedContext: z.boolean().optional(), + image: z.boolean().optional(), +}); + +export const agentCapabilitiesSchema = z.object({ + loadSession: z.boolean().optional(), + promptCapabilities: promptCapabilitiesSchema.optional(), +}); + +export const authMethodSchema = z.object({ + description: z.string().nullable(), + id: z.string(), + name: z.string(), +}); + +export const clientResponseSchema = z.union([ + writeTextFileResponseSchema, + readTextFileResponseSchema, + requestPermissionResponseSchema, +]); + +export const clientNotificationSchema = cancelNotificationSchema; + +export const embeddedResourceResourceSchema = z.union([ + textResourceContentsSchema, + blobResourceContentsSchema, +]); + +export const newSessionRequestSchema = z.object({ + cwd: z.string(), + mcpServers: z.array(mcpServerSchema), +}); + +export const loadSessionRequestSchema = z.object({ + cwd: z.string(), + mcpServers: z.array(mcpServerSchema), + sessionId: z.string(), +}); + +export const initializeResponseSchema = z.object({ + agentCapabilities: agentCapabilitiesSchema, + authMethods: z.array(authMethodSchema), + protocolVersion: z.number(), +}); + +export const contentBlockSchema = z.union([ + z.object({ + annotations: annotationsSchema.optional().nullable(), + text: z.string(), + type: z.literal('text'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + data: z.string(), + mimeType: z.string(), + type: z.literal('image'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + data: z.string(), + mimeType: z.string(), + type: z.literal('audio'), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + description: z.string().optional().nullable(), + mimeType: z.string().optional().nullable(), + name: z.string(), + size: z.number().optional().nullable(), + title: z.string().optional().nullable(), + type: z.literal('resource_link'), + uri: z.string(), + }), + z.object({ + annotations: annotationsSchema.optional().nullable(), + resource: embeddedResourceResourceSchema, + type: z.literal('resource'), + }), +]); + +export const toolCallContentSchema = z.union([ + z.object({ + content: contentBlockSchema, + type: z.literal('content'), + }), + z.object({ + newText: z.string(), + oldText: z.string().nullable(), + path: z.string(), + type: z.literal('diff'), + }), +]); + +export const toolCallSchema = z.object({ + content: z.array(toolCallContentSchema).optional(), + kind: toolKindSchema, + locations: z.array(toolCallLocationSchema).optional(), + rawInput: z.unknown().optional(), + status: toolCallStatusSchema, + title: z.string(), + toolCallId: z.string(), +}); + +export const clientCapabilitiesSchema = z.object({ + fs: fileSystemCapabilitySchema, +}); + +export const promptRequestSchema = z.object({ + prompt: z.array(contentBlockSchema), + sessionId: z.string(), +}); + +export const sessionUpdateSchema = z.union([ + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('user_message_chunk'), + }), + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('agent_message_chunk'), + }), + z.object({ + content: contentBlockSchema, + sessionUpdate: z.literal('agent_thought_chunk'), + }), + z.object({ + content: z.array(toolCallContentSchema).optional(), + kind: toolKindSchema, + locations: z.array(toolCallLocationSchema).optional(), + rawInput: z.unknown().optional(), + sessionUpdate: z.literal('tool_call'), + status: toolCallStatusSchema, + title: z.string(), + toolCallId: z.string(), + }), + z.object({ + content: z.array(toolCallContentSchema).optional().nullable(), + kind: toolKindSchema.optional().nullable(), + locations: z.array(toolCallLocationSchema).optional().nullable(), + rawInput: z.unknown().optional(), + sessionUpdate: z.literal('tool_call_update'), + status: toolCallStatusSchema.optional().nullable(), + title: z.string().optional().nullable(), + toolCallId: z.string(), + }), + z.object({ + entries: z.array(planEntrySchema), + sessionUpdate: z.literal('plan'), + }), +]); + +export const agentResponseSchema = z.union([ + initializeResponseSchema, + authenticateResponseSchema, + newSessionResponseSchema, + loadSessionResponseSchema, + promptResponseSchema, +]); + +export const requestPermissionRequestSchema = z.object({ + options: z.array(permissionOptionSchema), + sessionId: z.string(), + toolCall: toolCallSchema, +}); + +export const initializeRequestSchema = z.object({ + clientCapabilities: clientCapabilitiesSchema, + protocolVersion: z.number(), +}); + +export const sessionNotificationSchema = z.object({ + sessionId: z.string(), + update: sessionUpdateSchema, +}); + +export const clientRequestSchema = z.union([ + writeTextFileRequestSchema, + readTextFileRequestSchema, + requestPermissionRequestSchema, +]); + +export const agentRequestSchema = z.union([ + initializeRequestSchema, + authenticateRequestSchema, + newSessionRequestSchema, + loadSessionRequestSchema, + promptRequestSchema, +]); + +export const agentNotificationSchema = sessionNotificationSchema; diff --git a/projects/ui/qwen-code/packages/cli/src/zed-integration/zedIntegration.ts b/projects/ui/qwen-code/packages/cli/src/zed-integration/zedIntegration.ts new file mode 100644 index 0000000000000000000000000000000000000000..bc7aa7c0cdff76c5f8e27c37a228d9c3d5e65d17 --- /dev/null +++ b/projects/ui/qwen-code/packages/cli/src/zed-integration/zedIntegration.ts @@ -0,0 +1,944 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { ReadableStream, WritableStream } from 'node:stream/web'; + +import { + AuthType, + clearCachedCredentialFile, + Config, + GeminiChat, + logToolCall, + ToolResult, + convertToFunctionResponse, + getErrorMessage, + getErrorStatus, + isNodeError, + isWithinRoot, + MCPServerConfig, + ToolCallConfirmationDetails, + ToolConfirmationOutcome, + DiscoveredMCPTool, +} from '@qwen-code/qwen-code-core'; +import { AcpFileSystemService } from './fileSystemService.js'; +import { Content, Part, FunctionCall, PartListUnion } from '@google/genai'; +import * as fs from 'fs/promises'; +import { Readable, Writable } from 'node:stream'; +import * as path from 'path'; +import { z } from 'zod'; +import { LoadedSettings, SettingScope } from '../config/settings.js'; +import * as acp from './acp.js'; + +import { randomUUID } from 'crypto'; +import { CliArgs, loadCliConfig } from '../config/config.js'; +import { Extension } from '../config/extension.js'; + +export async function runZedIntegration( + config: Config, + settings: LoadedSettings, + extensions: Extension[], + argv: CliArgs, +) { + const stdout = Writable.toWeb(process.stdout) as WritableStream; + const stdin = Readable.toWeb(process.stdin) as ReadableStream; + + // Stdout is used to send messages to the client, so console.log/console.info + // messages to stderr so that they don't interfere with ACP. + console.log = console.error; + console.info = console.error; + console.debug = console.error; + + new acp.AgentSideConnection( + (client: acp.Client) => + new GeminiAgent(config, settings, extensions, argv, client), + stdout, + stdin, + ); +} + +class GeminiAgent { + private sessions: Map = new Map(); + private clientCapabilities: acp.ClientCapabilities | undefined; + + constructor( + private config: Config, + private settings: LoadedSettings, + private extensions: Extension[], + private argv: CliArgs, + private client: acp.Client, + ) {} + + async initialize( + args: acp.InitializeRequest, + ): Promise { + this.clientCapabilities = args.clientCapabilities; + const authMethods = [ + { + id: AuthType.LOGIN_WITH_GOOGLE, + name: 'Log in with Google', + description: null, + }, + { + id: AuthType.USE_GEMINI, + name: 'Use Gemini API key', + description: + 'Requires setting the `GEMINI_API_KEY` environment variable', + }, + { + id: AuthType.USE_VERTEX_AI, + name: 'Vertex AI', + description: null, + }, + ]; + + return { + protocolVersion: acp.PROTOCOL_VERSION, + authMethods, + agentCapabilities: { + loadSession: false, + promptCapabilities: { + image: true, + audio: true, + embeddedContext: true, + }, + }, + }; + } + + async authenticate({ methodId }: acp.AuthenticateRequest): Promise { + const method = z.nativeEnum(AuthType).parse(methodId); + + await clearCachedCredentialFile(); + await this.config.refreshAuth(method); + this.settings.setValue(SettingScope.User, 'selectedAuthType', method); + } + + async newSession({ + cwd, + mcpServers, + }: acp.NewSessionRequest): Promise { + const sessionId = randomUUID(); + const config = await this.newSessionConfig(sessionId, cwd, mcpServers); + + let isAuthenticated = false; + if (this.settings.merged.selectedAuthType) { + try { + await config.refreshAuth(this.settings.merged.selectedAuthType); + isAuthenticated = true; + } catch (e) { + console.error(`Authentication failed: ${e}`); + } + } + + if (!isAuthenticated) { + throw acp.RequestError.authRequired(); + } + + if (this.clientCapabilities?.fs) { + const acpFileSystemService = new AcpFileSystemService( + this.client, + sessionId, + this.clientCapabilities.fs, + config.getFileSystemService(), + ); + config.setFileSystemService(acpFileSystemService); + } + + const geminiClient = config.getGeminiClient(); + const chat = await geminiClient.startChat(); + const session = new Session(sessionId, chat, config, this.client); + this.sessions.set(sessionId, session); + + return { + sessionId, + }; + } + + async newSessionConfig( + sessionId: string, + cwd: string, + mcpServers: acp.McpServer[], + ): Promise { + const mergedMcpServers = { ...this.settings.merged.mcpServers }; + + for (const { command, args, env: rawEnv, name } of mcpServers) { + const env: Record = {}; + for (const { name: envName, value } of rawEnv) { + env[envName] = value; + } + mergedMcpServers[name] = new MCPServerConfig(command, args, env, cwd); + } + + const settings = { ...this.settings.merged, mcpServers: mergedMcpServers }; + + const config = await loadCliConfig( + settings, + this.extensions, + sessionId, + this.argv, + cwd, + ); + + await config.initialize(); + return config; + } + + async cancel(params: acp.CancelNotification): Promise { + const session = this.sessions.get(params.sessionId); + if (!session) { + throw new Error(`Session not found: ${params.sessionId}`); + } + await session.cancelPendingPrompt(); + } + + async prompt(params: acp.PromptRequest): Promise { + const session = this.sessions.get(params.sessionId); + if (!session) { + throw new Error(`Session not found: ${params.sessionId}`); + } + return session.prompt(params); + } +} + +class Session { + private pendingPrompt: AbortController | null = null; + + constructor( + private readonly id: string, + private readonly chat: GeminiChat, + private readonly config: Config, + private readonly client: acp.Client, + ) {} + + async cancelPendingPrompt(): Promise { + if (!this.pendingPrompt) { + throw new Error('Not currently generating'); + } + + this.pendingPrompt.abort(); + this.pendingPrompt = null; + } + + async prompt(params: acp.PromptRequest): Promise { + this.pendingPrompt?.abort(); + const pendingSend = new AbortController(); + this.pendingPrompt = pendingSend; + + const promptId = Math.random().toString(16).slice(2); + const chat = this.chat; + + const parts = await this.#resolvePrompt(params.prompt, pendingSend.signal); + + let nextMessage: Content | null = { role: 'user', parts }; + + while (nextMessage !== null) { + if (pendingSend.signal.aborted) { + chat.addHistory(nextMessage); + return { stopReason: 'cancelled' }; + } + + const functionCalls: FunctionCall[] = []; + + try { + const responseStream = await chat.sendMessageStream( + { + message: nextMessage?.parts ?? [], + config: { + abortSignal: pendingSend.signal, + }, + }, + promptId, + ); + nextMessage = null; + + for await (const resp of responseStream) { + if (pendingSend.signal.aborted) { + return { stopReason: 'cancelled' }; + } + + if (resp.candidates && resp.candidates.length > 0) { + const candidate = resp.candidates[0]; + for (const part of candidate.content?.parts ?? []) { + if (!part.text) { + continue; + } + + const content: acp.ContentBlock = { + type: 'text', + text: part.text, + }; + + this.sendUpdate({ + sessionUpdate: part.thought + ? 'agent_thought_chunk' + : 'agent_message_chunk', + content, + }); + } + } + + if (resp.functionCalls) { + functionCalls.push(...resp.functionCalls); + } + } + } catch (error) { + if (getErrorStatus(error) === 429) { + throw new acp.RequestError( + 429, + 'Rate limit exceeded. Try again later.', + ); + } + + throw error; + } + + if (functionCalls.length > 0) { + const toolResponseParts: Part[] = []; + + for (const fc of functionCalls) { + const response = await this.runTool(pendingSend.signal, promptId, fc); + + const parts = Array.isArray(response) ? response : [response]; + + for (const part of parts) { + if (typeof part === 'string') { + toolResponseParts.push({ text: part }); + } else if (part) { + toolResponseParts.push(part); + } + } + } + + nextMessage = { role: 'user', parts: toolResponseParts }; + } + } + + return { stopReason: 'end_turn' }; + } + + private async sendUpdate(update: acp.SessionUpdate): Promise { + const params: acp.SessionNotification = { + sessionId: this.id, + update, + }; + + await this.client.sessionUpdate(params); + } + + private async runTool( + abortSignal: AbortSignal, + promptId: string, + fc: FunctionCall, + ): Promise { + const callId = fc.id ?? `${fc.name}-${Date.now()}`; + const args = (fc.args ?? {}) as Record; + + const startTime = Date.now(); + + const errorResponse = (error: Error) => { + const durationMs = Date.now() - startTime; + logToolCall(this.config, { + 'event.name': 'tool_call', + 'event.timestamp': new Date().toISOString(), + prompt_id: promptId, + function_name: fc.name ?? '', + function_args: args, + duration_ms: durationMs, + success: false, + error: error.message, + tool_type: + typeof tool !== 'undefined' && tool instanceof DiscoveredMCPTool + ? 'mcp' + : 'native', + }); + + return [ + { + functionResponse: { + id: callId, + name: fc.name ?? '', + response: { error: error.message }, + }, + }, + ]; + }; + + if (!fc.name) { + return errorResponse(new Error('Missing function name')); + } + + const toolRegistry = this.config.getToolRegistry(); + const tool = toolRegistry.getTool(fc.name as string); + + if (!tool) { + return errorResponse( + new Error(`Tool "${fc.name}" not found in registry.`), + ); + } + + try { + const invocation = tool.build(args); + + const confirmationDetails = + await invocation.shouldConfirmExecute(abortSignal); + + if (confirmationDetails) { + const content: acp.ToolCallContent[] = []; + + if (confirmationDetails.type === 'edit') { + content.push({ + type: 'diff', + path: confirmationDetails.fileName, + oldText: confirmationDetails.originalContent, + newText: confirmationDetails.newContent, + }); + } + + const params: acp.RequestPermissionRequest = { + sessionId: this.id, + options: toPermissionOptions(confirmationDetails), + toolCall: { + toolCallId: callId, + status: 'pending', + title: invocation.getDescription(), + content, + locations: invocation.toolLocations(), + kind: tool.kind, + }, + }; + + const output = await this.client.requestPermission(params); + const outcome = + output.outcome.outcome === 'cancelled' + ? ToolConfirmationOutcome.Cancel + : z + .nativeEnum(ToolConfirmationOutcome) + .parse(output.outcome.optionId); + + await confirmationDetails.onConfirm(outcome); + + switch (outcome) { + case ToolConfirmationOutcome.Cancel: + return errorResponse( + new Error(`Tool "${fc.name}" was canceled by the user.`), + ); + case ToolConfirmationOutcome.ProceedOnce: + case ToolConfirmationOutcome.ProceedAlways: + case ToolConfirmationOutcome.ProceedAlwaysServer: + case ToolConfirmationOutcome.ProceedAlwaysTool: + case ToolConfirmationOutcome.ModifyWithEditor: + break; + default: { + const resultOutcome: never = outcome; + throw new Error(`Unexpected: ${resultOutcome}`); + } + } + } else { + await this.sendUpdate({ + sessionUpdate: 'tool_call', + toolCallId: callId, + status: 'in_progress', + title: invocation.getDescription(), + content: [], + locations: invocation.toolLocations(), + kind: tool.kind, + }); + } + + const toolResult: ToolResult = await invocation.execute(abortSignal); + const content = toToolCallContent(toolResult); + + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'completed', + content: content ? [content] : [], + }); + + const durationMs = Date.now() - startTime; + logToolCall(this.config, { + 'event.name': 'tool_call', + 'event.timestamp': new Date().toISOString(), + function_name: fc.name, + function_args: args, + duration_ms: durationMs, + success: true, + prompt_id: promptId, + tool_type: + typeof tool !== 'undefined' && tool instanceof DiscoveredMCPTool + ? 'mcp' + : 'native', + }); + + return convertToFunctionResponse(fc.name, callId, toolResult.llmContent); + } catch (e) { + const error = e instanceof Error ? e : new Error(String(e)); + + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'failed', + content: [ + { type: 'content', content: { type: 'text', text: error.message } }, + ], + }); + + return errorResponse(error); + } + } + + async #resolvePrompt( + message: acp.ContentBlock[], + abortSignal: AbortSignal, + ): Promise { + const FILE_URI_SCHEME = 'file://'; + + const embeddedContext: acp.EmbeddedResourceResource[] = []; + + const parts = message.map((part) => { + switch (part.type) { + case 'text': + return { text: part.text }; + case 'image': + case 'audio': + return { + inlineData: { + mimeType: part.mimeType, + data: part.data, + }, + }; + case 'resource_link': { + if (part.uri.startsWith(FILE_URI_SCHEME)) { + return { + fileData: { + mimeData: part.mimeType, + name: part.name, + fileUri: part.uri.slice(FILE_URI_SCHEME.length), + }, + }; + } else { + return { text: `@${part.uri}` }; + } + } + case 'resource': { + embeddedContext.push(part.resource); + return { text: `@${part.resource.uri}` }; + } + default: { + const unreachable: never = part; + throw new Error(`Unexpected chunk type: '${unreachable}'`); + } + } + }); + + const atPathCommandParts = parts.filter((part) => 'fileData' in part); + + if (atPathCommandParts.length === 0 && embeddedContext.length === 0) { + return parts; + } + + const atPathToResolvedSpecMap = new Map(); + + // Get centralized file discovery service + const fileDiscovery = this.config.getFileService(); + const respectGitIgnore = this.config.getFileFilteringRespectGitIgnore(); + + const pathSpecsToRead: string[] = []; + const contentLabelsForDisplay: string[] = []; + const ignoredPaths: string[] = []; + + const toolRegistry = this.config.getToolRegistry(); + const readManyFilesTool = toolRegistry.getTool('read_many_files'); + const globTool = toolRegistry.getTool('glob'); + + if (!readManyFilesTool) { + throw new Error('Error: read_many_files tool not found.'); + } + + for (const atPathPart of atPathCommandParts) { + const pathName = atPathPart.fileData!.fileUri; + // Check if path should be ignored by git + if (fileDiscovery.shouldGitIgnoreFile(pathName)) { + ignoredPaths.push(pathName); + const reason = respectGitIgnore + ? 'git-ignored and will be skipped' + : 'ignored by custom patterns'; + console.warn(`Path ${pathName} is ${reason}.`); + continue; + } + let currentPathSpec = pathName; + let resolvedSuccessfully = false; + try { + const absolutePath = path.resolve(this.config.getTargetDir(), pathName); + if (isWithinRoot(absolutePath, this.config.getTargetDir())) { + const stats = await fs.stat(absolutePath); + if (stats.isDirectory()) { + currentPathSpec = pathName.endsWith('/') + ? `${pathName}**` + : `${pathName}/**`; + this.debug( + `Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`, + ); + } else { + this.debug(`Path ${pathName} resolved to file: ${currentPathSpec}`); + } + resolvedSuccessfully = true; + } else { + this.debug( + `Path ${pathName} is outside the project directory. Skipping.`, + ); + } + } catch (error) { + if (isNodeError(error) && error.code === 'ENOENT') { + if (this.config.getEnableRecursiveFileSearch() && globTool) { + this.debug( + `Path ${pathName} not found directly, attempting glob search.`, + ); + try { + const globResult = await globTool.buildAndExecute( + { + pattern: `**/*${pathName}*`, + path: this.config.getTargetDir(), + }, + abortSignal, + ); + if ( + globResult.llmContent && + typeof globResult.llmContent === 'string' && + !globResult.llmContent.startsWith('No files found') && + !globResult.llmContent.startsWith('Error:') + ) { + const lines = globResult.llmContent.split('\n'); + if (lines.length > 1 && lines[1]) { + const firstMatchAbsolute = lines[1].trim(); + currentPathSpec = path.relative( + this.config.getTargetDir(), + firstMatchAbsolute, + ); + this.debug( + `Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`, + ); + resolvedSuccessfully = true; + } else { + this.debug( + `Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`, + ); + } + } else { + this.debug( + `Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`, + ); + } + } catch (globError) { + console.error( + `Error during glob search for ${pathName}: ${getErrorMessage(globError)}`, + ); + } + } else { + this.debug( + `Glob tool not found. Path ${pathName} will be skipped.`, + ); + } + } else { + console.error( + `Error stating path ${pathName}. Path ${pathName} will be skipped.`, + ); + } + } + if (resolvedSuccessfully) { + pathSpecsToRead.push(currentPathSpec); + atPathToResolvedSpecMap.set(pathName, currentPathSpec); + contentLabelsForDisplay.push(pathName); + } + } + + // Construct the initial part of the query for the LLM + let initialQueryText = ''; + for (let i = 0; i < parts.length; i++) { + const chunk = parts[i]; + if ('text' in chunk) { + initialQueryText += chunk.text; + } else { + // type === 'atPath' + const resolvedSpec = + chunk.fileData && atPathToResolvedSpecMap.get(chunk.fileData.fileUri); + if ( + i > 0 && + initialQueryText.length > 0 && + !initialQueryText.endsWith(' ') && + resolvedSpec + ) { + // Add space if previous part was text and didn't end with space, or if previous was @path + const prevPart = parts[i - 1]; + if ( + 'text' in prevPart || + ('fileData' in prevPart && + atPathToResolvedSpecMap.has(prevPart.fileData!.fileUri)) + ) { + initialQueryText += ' '; + } + } + if (resolvedSpec) { + initialQueryText += `@${resolvedSpec}`; + } else { + // If not resolved for reading (e.g. lone @ or invalid path that was skipped), + // add the original @-string back, ensuring spacing if it's not the first element. + if ( + i > 0 && + initialQueryText.length > 0 && + !initialQueryText.endsWith(' ') && + !chunk.fileData?.fileUri.startsWith(' ') + ) { + initialQueryText += ' '; + } + if (chunk.fileData?.fileUri) { + initialQueryText += `@${chunk.fileData.fileUri}`; + } + } + } + } + initialQueryText = initialQueryText.trim(); + // Inform user about ignored paths + if (ignoredPaths.length > 0) { + const ignoreType = respectGitIgnore ? 'git-ignored' : 'custom-ignored'; + this.debug( + `Ignored ${ignoredPaths.length} ${ignoreType} files: ${ignoredPaths.join(', ')}`, + ); + } + + const processedQueryParts: Part[] = [{ text: initialQueryText }]; + + if (pathSpecsToRead.length === 0 && embeddedContext.length === 0) { + // Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText + console.warn('No valid file paths found in @ commands to read.'); + return [{ text: initialQueryText }]; + } + + if (pathSpecsToRead.length > 0) { + const toolArgs = { + paths: pathSpecsToRead, + respectGitIgnore, // Use configuration setting + }; + + const callId = `${readManyFilesTool.name}-${Date.now()}`; + + try { + const invocation = readManyFilesTool.build(toolArgs); + + await this.sendUpdate({ + sessionUpdate: 'tool_call', + toolCallId: callId, + status: 'in_progress', + title: invocation.getDescription(), + content: [], + locations: invocation.toolLocations(), + kind: readManyFilesTool.kind, + }); + + const result = await invocation.execute(abortSignal); + const content = toToolCallContent(result) || { + type: 'content', + content: { + type: 'text', + text: `Successfully read: ${contentLabelsForDisplay.join(', ')}`, + }, + }; + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'completed', + content: content ? [content] : [], + }); + if (Array.isArray(result.llmContent)) { + const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/; + processedQueryParts.push({ + text: '\n--- Content from referenced files ---', + }); + for (const part of result.llmContent) { + if (typeof part === 'string') { + const match = fileContentRegex.exec(part); + if (match) { + const filePathSpecInContent = match[1]; // This is a resolved pathSpec + const fileActualContent = match[2].trim(); + processedQueryParts.push({ + text: `\nContent from @${filePathSpecInContent}:\n`, + }); + processedQueryParts.push({ text: fileActualContent }); + } else { + processedQueryParts.push({ text: part }); + } + } else { + // part is a Part object. + processedQueryParts.push(part); + } + } + } else { + console.warn( + 'read_many_files tool returned no content or empty content.', + ); + } + } catch (error: unknown) { + await this.sendUpdate({ + sessionUpdate: 'tool_call_update', + toolCallId: callId, + status: 'failed', + content: [ + { + type: 'content', + content: { + type: 'text', + text: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`, + }, + }, + ], + }); + + throw error; + } + } + + if (embeddedContext.length > 0) { + processedQueryParts.push({ + text: '\n--- Content from referenced context ---', + }); + + for (const contextPart of embeddedContext) { + processedQueryParts.push({ + text: `\nContent from @${contextPart.uri}:\n`, + }); + if ('text' in contextPart) { + processedQueryParts.push({ + text: contextPart.text, + }); + } else { + processedQueryParts.push({ + inlineData: { + mimeType: contextPart.mimeType ?? 'application/octet-stream', + data: contextPart.blob, + }, + }); + } + } + } + + return processedQueryParts; + } + + debug(msg: string) { + if (this.config.getDebugMode()) { + console.warn(msg); + } + } +} + +function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null { + if (toolResult.error?.message) { + throw new Error(toolResult.error.message); + } + + if (toolResult.returnDisplay) { + if (typeof toolResult.returnDisplay === 'string') { + return { + type: 'content', + content: { type: 'text', text: toolResult.returnDisplay }, + }; + } else if ( + 'type' in toolResult.returnDisplay && + toolResult.returnDisplay.type === 'todo_list' + ) { + // Handle TodoResultDisplay - convert to text representation + const todoText = toolResult.returnDisplay.todos + .map((todo) => { + const statusIcon = { + pending: '○', + in_progress: '◐', + completed: '●', + }[todo.status]; + return `${statusIcon} ${todo.content}`; + }) + .join('\n'); + + return { + type: 'content', + content: { type: 'text', text: todoText }, + }; + } else if ('fileDiff' in toolResult.returnDisplay) { + // Handle FileDiff + return { + type: 'diff', + path: toolResult.returnDisplay.fileName, + oldText: toolResult.returnDisplay.originalContent, + newText: toolResult.returnDisplay.newContent, + }; + } + } + return null; +} + +const basicPermissionOptions = [ + { + optionId: ToolConfirmationOutcome.ProceedOnce, + name: 'Allow', + kind: 'allow_once', + }, + { + optionId: ToolConfirmationOutcome.Cancel, + name: 'Reject', + kind: 'reject_once', + }, +] as const; + +function toPermissionOptions( + confirmation: ToolCallConfirmationDetails, +): acp.PermissionOption[] { + switch (confirmation.type) { + case 'edit': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: 'Allow All Edits', + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'exec': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: `Always Allow ${confirmation.rootCommand}`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'mcp': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlwaysServer, + name: `Always Allow ${confirmation.serverName}`, + kind: 'allow_always', + }, + { + optionId: ToolConfirmationOutcome.ProceedAlwaysTool, + name: `Always Allow ${confirmation.toolName}`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + case 'info': + return [ + { + optionId: ToolConfirmationOutcome.ProceedAlways, + name: `Always Allow`, + kind: 'allow_always', + }, + ...basicPermissionOptions, + ]; + default: { + const unreachable: never = confirmation; + throw new Error(`Unexpected: ${unreachable}`); + } + } +} diff --git a/projects/ui/serena-new/.env.example b/projects/ui/serena-new/.env.example new file mode 100644 index 0000000000000000000000000000000000000000..e2b161b182d6f3d6e0ae9220f514d295d15d099a --- /dev/null +++ b/projects/ui/serena-new/.env.example @@ -0,0 +1,2 @@ +GOOGLE_API_KEY= +ANTHROPIC_API_KEY= diff --git a/secrets/dataops/.env b/secrets/dataops/.env new file mode 100644 index 0000000000000000000000000000000000000000..4a170301d41e8954a2531b88959b5b351d557d68 --- /dev/null +++ b/secrets/dataops/.env @@ -0,0 +1,8 @@ +SCYLLA_USER= +SCYLLA_PASS= +DFLY_PASS=torrent_cluster_auth + +# --- Hugging Face auth --- +# Set your personal access token with write access to private repos +# Obtain from https://huggingface.co/settings/tokens +# HUGGINGFACE_HUB_TOKEN= diff --git a/secrets/dataops/central_stack.yaml b/secrets/dataops/central_stack.yaml new file mode 100644 index 0000000000000000000000000000000000000000..837d89a861b362da370ed945b4b9eb725e389522 --- /dev/null +++ b/secrets/dataops/central_stack.yaml @@ -0,0 +1,48 @@ +# Central stack endpoints (vast1) +central_host: 172.17.0.3 + +# Core +qdrant_host: 172.17.0.3 +qdrant_http: 17000 +qdrant_grpc: 17001 + +gremlin_host: 172.17.0.3 +gremlin_port: 17002 + +dragonfly_host: 172.17.0.3 +dragonfly_ports: [18000, 18001, 18002] + +redis_host: 172.17.0.3 +redis_ports: [18010, 18011, 18012] + +# Additional components to provision +etcd_host: 172.17.0.3 +etcd_client: 2379 + +minio_host: 172.17.0.3 +minio_api: 9000 +minio_console: 9001 + +pulsar_host: 172.17.0.3 +pulsar_bin: 6650 +pulsar_http: 8080 + +postgres_host: 172.17.0.3 +postgres_port: 5432 + +milvus_host: 172.17.0.3 +milvus_grpc: 19530 +milvus_http: 9091 + +opensearch_host: 172.17.0.3 +opensearch_http: 9200 + +meilisearch_host: 172.17.0.3 +meilisearch_http: 7700 + +influxdb_host: 172.17.0.3 +influxdb_http: 8086 + +ipfs_host: 172.17.0.3 +ipfs_api: 5001 +ipfs_gateway: 8080 diff --git a/secrets/dataops/clickhouse_connection.md b/secrets/dataops/clickhouse_connection.md new file mode 100644 index 0000000000000000000000000000000000000000..ea6e4c64734a7bd79a672cb223f3fcce876387b0 --- /dev/null +++ b/secrets/dataops/clickhouse_connection.md @@ -0,0 +1,184 @@ +# ClickHouse Database Connection Guide + +## Connection Details +- **Host**: localhost +- **Port**: 9000 (HTTP), 9004 (HTTPS), 9009 (Native TCP) +- **Protocol**: HTTP/TCP (ClickHouse native) +- **Default Database**: default +- **Health Check**: `clickhouse-client --query "SELECT version()"` + +## Authentication +- **Default User**: default (no password) +- **Access**: Localhost only, no authentication required +- **Security**: Development mode - add authentication for production + +## ClickHouse-CLI Examples +```bash +# Connect to ClickHouse +clickhouse-client + +# Show databases +SHOW DATABASES; + +# Create nova database +CREATE DATABASE nova_analytics; + +# Basic query +SELECT version(), now(), currentDatabase(); + +# Table operations +CREATE TABLE nova_analytics.events ( + timestamp DateTime, + event_type String, + data String +) ENGINE = MergeTree() +ORDER BY timestamp; + +INSERT INTO nova_analytics.events VALUES +(now(), 'session_start', '{"user": "test"}'); + +SELECT * FROM nova_analytics.events; +``` + +## HTTP API Examples +```bash +# Health check via HTTP +curl "http://localhost:8123/?query=SELECT%20version()" + +# Create table via HTTP +curl -X POST "http://localhost:8123/?query=CREATE%20TABLE%20test%20(id%20UInt32)%20ENGINE%20%3D%20Memory" + +# Insert data +curl -X POST "http://localhost:8123/?query=INSERT%20INTO%20test%20VALUES%20(1)%2C%20(2)%2C%20(3)" + +# Query data +curl "http://localhost:8123/?query=SELECT%20*%20FROM%20test" +``` + +## Python Client Example +```python +from clickhouse_driver import Client + +# Connect to ClickHouse +client = Client( + host='localhost', + port=9000, + user='default', + database='default' +) + +# Execute queries +version = client.execute('SELECT version()') +print(f"ClickHouse version: {version[0][0]}") + +# Create analytics table +client.execute(''' +CREATE TABLE IF NOT EXISTS nova_analytics.metrics ( + timestamp DateTime, + metric_name String, + value Float64, + tags String +) ENGINE = MergeTree() +ORDER BY (metric_name, timestamp) +''') + +# Insert metrics data +client.execute(''' +INSERT INTO nova_analytics.metrics VALUES +(now(), 'memory_usage', 75.5, '{"host": "vast1"}'), +(now(), 'cpu_usage', 45.2, '{"host": "vast1"}') +''') + +# Query analytics +results = client.execute(''' +SELECT + metric_name, + avg(value) as avg_value, + max(timestamp) as last_seen +FROM nova_analytics.metrics +GROUP BY metric_name +''') + +for row in results: + print(f"Metric: {row[0]}, Avg: {row[1]:.2f}, Last: {row[2]}") +``` + +## Configuration Notes +- **Data Directory**: `/data/data/clickhouse/data/` +- **Log Directory**: `/data/data/clickhouse/logs/` +- **Max Memory**: 50GB (configurable) +- **Backup Location**: `/data/adaptai/backups/clickhouse/` +- **Port Configuration**: + - 8123: HTTP interface + - 9000: Native TCP interface + - 9004: HTTPS interface + - 9009: Native TCP with SSL + +## Performance Tuning +```sql +-- Monitor system metrics +SELECT * FROM system.metrics LIMIT 10; + +-- Check query performance +SELECT + query, + elapsed, + read_rows, + memory_usage +FROM system.processes; + +-- Table sizes and parts +SELECT + database, + table, + sum(bytes) as size_bytes +FROM system.parts +GROUP BY database, table +ORDER BY size_bytes DESC; +``` + +## Health Checks +```bash +# Basic connectivity +clickhouse-client --query "SELECT 1" + +# System health +clickhouse-client --query "SELECT * FROM system.metrics WHERE metric LIKE '%memory%'" + +# Disk usage +clickhouse-client --query " +SELECT + name, + free_space, + total_space, + formatReadableSize(free_space) as free, + formatReadableSize(total_space) as total +FROM system.disks +" + +# Active queries +clickhouse-client --query "SELECT query, elapsed FROM system.processes" +``` + +## Security +- ❗ Localhost binding only +- ❗ No authentication configured (development mode) +- ❗ Add password authentication for production +- ❗ Monitor disk usage on /data partition +- ❗ Regular backups recommended +- ❗ Consider enabling SSL for encrypted connections + +## Backup Procedures +```bash +# Create backup +clickhouse-client --query "BACKUP DATABASE nova_analytics TO '/data/adaptai/backups/clickhouse/nova_analytics_backup'" + +# Restore backup +clickhouse-client --query "RESTORE DATABASE nova_analytics FROM '/data/adaptai/backups/clickhouse/nova_analytics_backup'" + +# Manual file backup +sudo rsync -av /data/data/clickhouse/data/ /data/adaptai/backups/clickhouse/full_backup/ +``` + +--- +**Last Updated:** September 4, 2025 \ No newline at end of file diff --git a/secrets/dataops/connections.yaml b/secrets/dataops/connections.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f84d0486f5f8e9c38ac2279f9f6e7fc56626398 --- /dev/null +++ b/secrets/dataops/connections.yaml @@ -0,0 +1,64 @@ +# DataOps Connections and Credentials +# Managed by Atlas (Head of DataOps) +# Updated: $(date -u +%Y-%m-%dT%H:%M:%SZ) + +qdrant: + host: qdrant.dbops.local + http_port: 17000 + grpc_port: 17001 + api_key: null + +janusgraph: + gremlin_host: gremlin.dbops.local + gremlin_port: 17002 + backend: scylladb + scylla: + hosts: ["10.0.10.11","10.0.10.12","10.0.10.13"] + port: 9042 + keyspace: janusgraph + username: "scylla_user" + password: "scylla_pass" + +redis_cluster: + nodes: + - host: redis-1.dbops.local + port: 18010 + - host: redis-2.dbops.local + port: 18011 + - host: redis-3.dbops.local + port: 18012 + password: "torrent_cluster_auth" + +dragonfly_cluster: + nodes: + - host: dragonfly-1.dbops.local + port: 18000 + - host: dragonfly-2.dbops.local + port: 18001 + - host: dragonfly-3.dbops.local + port: 18002 + password: "torrent_cluster_auth" + +nats: + url: nats://localhost:4222 + user: null + pass: null + +pulsar: + url: pulsar://localhost:6655 + token: null + +mlops: + training_url: http://localhost:3000/training + inference_url: http://localhost:3000/inference + +# E-FIRE-1 +efire: + orchestrator: + enabled: true + agents: + api_developer: { enabled: true } + content_creator: { enabled: true } + +scylla_user: ${SCYLLA_USER} +scylla_pass: ${SCYLLA_PASS} diff --git a/secrets/dataops/dataops_master_connections.md b/secrets/dataops/dataops_master_connections.md new file mode 100644 index 0000000000000000000000000000000000000000..fd62fe3ab41ef47b2db8fa389c1860dddb65b43d --- /dev/null +++ b/secrets/dataops/dataops_master_connections.md @@ -0,0 +1,190 @@ +# DataOps Master Connections Sheet + +## Service Overview +| Service | Port | Type | Status | Authentication | Data Location | +|---------|------|------|---------|----------------|---------------| +| **Qdrant** | 17000 | Vector DB | ✅ Operational | None | `/data/qdrant/storage/` | +| **DragonFly** | 18000-18002 | Cache Cluster | ✅ Operational | None | `/data/dragonfly/node*/data/` | +| **Redis Cluster** | 18010-18012 | Memory Store | ✅ Operational | None | `/data/redis/node*/data/` | +| **JanusGraph** | 17002 | Graph DB | ✅ Operational | None | `/data/janusgraph/data/` | +| **NATS** | 18222 | Messaging | ✅ Operational | None | N/A | +| **ClickHouse** | 9000 | Analytics DB | ✅ Operational | None | `/data/data/clickhouse/data/` | +| **MeiliSearch** | 17005 | Search Engine | ✅ Operational | Master Key | `/data/data/meilisearch/data/` | +| **PostgreSQL** | 5432 | SQL DB | ✅ Operational | postgres/None | `/data/postgres/data/` | +| **MongoDB** | 27017 | Document DB | ✅ Operational | None | `/data/mongodb/data/` | +| **ChromaDB** | 8000 | Vector Store | ✅ Operational | None | `/data/chromadb/data/` | + +## Quick Connection Commands +```bash +# Vector Databases +curl http://localhost:17000/collections # Qdrant +curl http://localhost:8000/api/v1/heartbeat # ChromaDB + +# Cache/Memory Stores +redis-cli -p 18000 ping # DragonFly Master +redis-cli -p 18010 ping # Redis Cluster Node 1 +curl http://localhost:18222/connz # NATS Server + +# Graph Database +curl -X POST http://localhost:17002 -d '{"gremlin": "g.V().limit(1)"}' # JanusGraph + +# Analytics & Search +clickhouse-client --query "SELECT version()" # ClickHouse +curl http://localhost:17005/health # MeiliSearch + +# Traditional Databases +psql -h localhost -p 5432 -U postgres # PostgreSQL +mongosh --port 27017 # MongoDB +``` + +## Authentication Summary +- **No Authentication**: Qdrant, DragonFly, Redis Cluster, JanusGraph, PostgreSQL, MongoDB, ChromaDB, NATS +- **Master Key**: MeiliSearch (`VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM`) +- **Development Mode**: All services (add authentication for production) + +## Health Check Script +```bash +#!/bin/bash +# dataops_health_check.sh + +echo "=== DataOps Service Health Check ===" + +# Vector DBs +echo "Qdrant: $(curl -s http://localhost:17000/collections | jq .status 2>/dev/null || echo '❌')" +echo "ChromaDB: $(curl -s http://localhost:8000/api/v1/heartbeat | jq . 2>/dev/null || echo '❌')" + +# Cache Stores +echo "DragonFly: $(redis-cli -p 18000 ping 2>/dev/null || echo '❌')" +echo "Redis Cluster: $(redis-cli -p 18010 ping 2>/dev/null || echo '❌')" +echo "NATS: $(curl -s http://localhost:18222/healthz 2>/dev/null | grep -q OK && echo '✅' || echo '❌')" + +# Graph & Analytics +echo "JanusGraph: $(netstat -tln | grep 17002 >/dev/null && echo '✅' || echo '❌')" +echo "ClickHouse: $(clickhouse-client --query 'SELECT 1' 2>/dev/null && echo '✅' || echo '❌')" + +# Search +echo "MeiliSearch: $(curl -s http://localhost:17005/health | jq .status 2>/dev/null || echo '❌')" + +# Traditional DBs +echo "PostgreSQL: $(pg_isready -h localhost -p 5432 2>/dev/null && echo '✅' || echo '❌')" +echo "MongoDB: $(mongosh --eval 'db.adminCommand("ping")' --quiet 2>/dev/null && echo '✅' || echo '❌')" + +echo "=== Health Check Complete ===" +``` + +## Connection Strings +### Python Examples +```python +# Vector Stores +qdrant_client = QdrantClient(host="localhost", port=17000) +chroma_client = chromadb.HttpClient(host="localhost", port=8000) + +# Cache Stores +dragonfly = redis.Redis(host="localhost", port=18000) +redis_cluster = RedisCluster(startup_nodes=[{"host": "localhost", "port": 18010}]) + +# Messaging +nats_client = nats.connect("nats://localhost:18222") + +# Graph DB +janusgraph = DriverRemoteConnection('ws://localhost:17002/gremlin', 'g') + +# Analytics +from clickhouse_driver import Client +import nats + +# Search +meilisearch = Client('http://localhost:17005', 'VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM') + +# Traditional DBs +postgres = psycopg2.connect(host="localhost", port=5432, user="postgres") +mongo = MongoClient("localhost", 27017) +``` + +### JavaScript/Node.js Examples +```javascript +const nats = require('nats'); +// Cache Stores +const redis = require('redis'); +const dragonfly = redis.createClient({ port: 18000 }); + +// Messaging +const nats = require('nats'); +const nats_client = nats.connect({ servers: ["nats://localhost:18222"] }); + +// Search +const { MeiliSearch } = require('meilisearch'); +const meilisearch = new MeiliSearch({ + host: 'http://localhost:17005', + apiKey: 'VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM' +}); + +// Traditional DBs +const { Pool } = require('pg'); +const postgres = new Pool({ host: 'localhost', port: 5432, user: 'postgres' }); + +const { MongoClient } = require('mongodb'); +const mongo = new MongoClient('mongodb://localhost:27017'); +``` + +## Security Notes +- 🔒 All services bound to localhost only +- 🔒 No external network exposure +- 🔒 Development mode - add authentication for production +- 🔒 Regular backups recommended +- 🔒 Monitor disk usage on /data partition + +## Backup Procedures +```bash +# Full DataOps backup +sudo mkdir -p /data/adaptai/backups/dataops/$(date +%Y%m%d) + +# Backup each service +sudo rsync -av /data/qdrant/storage/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/qdrant/ +sudo rsync -av /data/dragonfly/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/dragonfly/ +sudo rsync -av /data/redis/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/redis/ +sudo rsync -av /data/janusgraph/data/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/janusgraph/ +sudo rsync -av /data/data/clickhouse/data/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/clickhouse/ +sudo rsync -av /data/data/meilisearch/data/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/meilisearch/ +sudo rsync -av /data/postgres/data/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/postgres/ +sudo rsync -av /data/mongodb/data/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/mongodb/ +sudo rsync -av /data/chromadb/data/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/chromadb/ +sudo rsync -av /data/nats/ /data/adaptai/backups/dataops/$(date +%Y%m%d)/nats/ + +# Database dumps +pg_dumpall -h localhost -p 5432 -U postgres > /data/adaptai/backups/dataops/$(date +%Y%m%d)/postgres_full.sql +mongodump --host localhost --port 27017 --out /data/adaptai/backups/dataops/$(date +%Y%m%d)/mongodb_dump/ +``` + +## Monitoring & Alerts +```bash +# Disk space monitoring +df -h /data | grep -E "(Filesystem|/data)" + +# Memory usage +free -h + +# Service monitoring +ps aux | grep -E "(qdrant|dragonfly|redis|janusgraph|clickhouse|meilisearch|postgres|mongo|chroma|nats)" + +# Port monitoring +netstat -tln | grep -E "(17000|18000|18010|17002|9000|17005|5432|27017|8000|18222)" +``` + +## Emergency Recovery +```bash +# Stop all services +sudo pkill -f "qdrant|dragonfly|redis-server|java.*janus|clickhouse|meilisearch|postgres|mongod|chroma|nats-server" + +# Restore from backup +sudo rsync -av /data/adaptai/backups/dataops/LATEST/ /data/ + +# Restart services (refer to individual connection guides) +# ... +``` + +## Last Updated +- **Date**: September 4, 2025 +- **By**: Atlas, Head of DataOps +- **Environment**: Vast1 Server - Development +- **Status**: All services operational and verified \ No newline at end of file diff --git a/secrets/dataops/dragonfly_connection.md b/secrets/dataops/dragonfly_connection.md new file mode 100644 index 0000000000000000000000000000000000000000..0b73f1e2a916510aa6b94b1cb5bd08e18747cd1f --- /dev/null +++ b/secrets/dataops/dragonfly_connection.md @@ -0,0 +1,84 @@ +# DragonFly Cache Cluster Connection Guide + +## Cluster Nodes +- **Node 1**: localhost:18000 (Master) +- **Node 2**: localhost:18001 (Replica) +- **Node 3**: localhost:18002 (Replica) + +## Connection Details +- **Protocol**: Redis-compatible +- **Max Memory**: 50GB per node +- **Persistence**: RDB snapshots + +## Redis-CLI Examples +```bash +# Connect to master node +redis-cli -p 18000 + +# Cluster info +redis-cli -p 18000 info memory + +# Set/get example +redis-cli -p 18000 SET nova:session:123 '{"data": "test"}' +redis-cli -p 18000 GET nova:session:123 + +# Monitor all nodes +redis-cli -p 18000 monitor +redis-cli -p 18001 monitor +redis-cli -p 18002 monitor +``` + +## Python Client Example +```python +import redis + +# Connect to DragonFly cluster +# DragonFly is Redis-compatible, use standard redis client + +# Master node connection +master = redis.Redis(host='localhost', port=18000, decode_responses=True) + +# Replica connections +replica1 = redis.Redis(host='localhost', port=18001, decode_responses=True) +replica2 = redis.Redis(host='localhost', port=18002, decode_responses=True) + +# Basic operations +master.set('nova:working_memory', 'cached_data', ex=3600) # 1 hour expiration +value = master.get('nova:working_memory') +print(f"Cached value: {value}") + +# Pipeline for batch operations +pipe = master.pipeline() +pipe.set('key1', 'value1') +pipe.set('key2', 'value2') +pipe.execute() +``` + +## Health Checks +```bash +# Check all nodes +redis-cli -p 18000 ping # Should return PONG +redis-cli -p 18001 ping +redis-cli -p 18002 ping + +# Memory usage +redis-cli -p 18000 info memory | grep used_memory_human + +# Persistence status +redis-cli -p 18000 info persistence | grep rdb_last_save_time +``` + +## Configuration Notes +- **Data Directory**: `/data/dragonfly/node*/data/` +- **Snapshot Frequency**: Automatic based on changes +- **Max Memory**: 50GB per node (configurable) +- **Replication**: Async replication between nodes + +## Security +- ❗ Localhost binding only +- ❗ No authentication required +- ❗ Monitor memory usage to prevent OOM +- ❗ Regular snapshot verification recommended + +--- +**Last Updated:** September 4, 2025 \ No newline at end of file diff --git a/secrets/dataops/effective_connections.yaml b/secrets/dataops/effective_connections.yaml new file mode 100644 index 0000000000000000000000000000000000000000..146c806205545be6ee12a828be8e2a304fa02646 --- /dev/null +++ b/secrets/dataops/effective_connections.yaml @@ -0,0 +1,68 @@ +# Effective connections file (generated) +# Mode: central +# Generated: 2025-09-04T04:32:56.526021Z + +# DataOps Connections and Credentials +# Managed by Atlas (Head of DataOps) +# Updated: $(date -u +%Y-%m-%dT%H:%M:%SZ) + +qdrant: + host: qdrant.dbops.local + http_port: 17000 + grpc_port: 17001 + api_key: null + +janusgraph: + gremlin_host: gremlin.dbops.local + gremlin_port: 17002 + backend: scylladb + scylla: + hosts: ["10.0.10.11","10.0.10.12","10.0.10.13"] + port: 9042 + keyspace: janusgraph + username: "scylla_user" + password: "scylla_pass" + +redis_cluster: + nodes: + - host: redis-1.dbops.local + port: 18010 + - host: redis-2.dbops.local + port: 18011 + - host: redis-3.dbops.local + port: 18012 + password: "torrent_cluster_auth" + +dragonfly_cluster: + nodes: + - host: dragonfly-1.dbops.local + port: 18000 + - host: dragonfly-2.dbops.local + port: 18001 + - host: dragonfly-3.dbops.local + port: 18002 + password: "torrent_cluster_auth" + +nats: + url: nats://localhost:4222 + user: null + pass: null + +pulsar: + url: pulsar://localhost:6655 + token: null + +mlops: + training_url: http://localhost:3000/training + inference_url: http://localhost:3000/inference + +# E-FIRE-1 +efire: + orchestrator: + enabled: true + agents: + api_developer: { enabled: true } + content_creator: { enabled: true } + +scylla_user: ${SCYLLA_USER} +scylla_pass: ${SCYLLA_PASS} diff --git a/secrets/dataops/janusgraph_cassandra_connection.md b/secrets/dataops/janusgraph_cassandra_connection.md new file mode 100644 index 0000000000000000000000000000000000000000..b7c4b71043fb8ed251791d4a1637e16ffc85b504 --- /dev/null +++ b/secrets/dataops/janusgraph_cassandra_connection.md @@ -0,0 +1,74 @@ +# JanusGraph + Cassandra Connection Guide + +## Connection Details +- **JanusGraph Host**: localhost +- **JanusGraph Port**: 17002 (Gremlin Server) +- **ScyllaDB Host**: 127.0.0.1 +- **ScyllaDB Port**: 17542 (CQL Proxy) +- **Keyspace**: janusgraph + +## Authentication +- No authentication required (localhost binding only) +- Cassandra uses default configuration + +## Gremlin Console Example +```bash +# Connect to Gremlin Server +gremlin> :remote connect tinkerpop.server conf/remote.yaml session + +# Basic graph operations +gremlin> g = traversal().withRemote('conf/remote-objects.yaml') +gremlin> g.V().has('name', 'test').values() +``` + +## Python Client Example +```python +from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection +from gremlin_python.process.anonymous_traversal import traversal +from gremlin_python.process.graph_traversal import __ + +# Connect to JanusGraph +connection = DriverRemoteConnection('ws://localhost:17002/gremlin', 'g') +g = traversal().withRemote(connection) + +# Create vertex +v = g.addV('person').property('name', 'john').next() +print(f"Created vertex: {v}") + +# Query vertices +people = g.V().hasLabel('person').valueMap().toList() +print(f"People: {people}") + +connection.close() +``` + +## ScyllaDB CQLSH Access +```bash +# Connect to ScyllaDB via HAProxy +cqlsh localhost 17542 + +# Show keyspaces +cqlsh> DESCRIBE KEYSPACES; + +# Use janusgraph keyspace +cqlsh> USE janusgraph; + +# Show tables +cqlsh> DESCRIBE TABLES; +``` + +## Configuration Notes +- **Storage Backend**: ScyllaDB (Cassandra-compatible) +- **Compression**: Enabled (LZ4) +- **Consistency**: ONE (for performance) +- **Data Directory**: `/data/adaptai/platform/dbops/data/scylla/` (ScyllaDB data) +- **Backup**: Manual snapshots recommended + +## Security +- ❗ Localhost binding only +- ❗ No authentication on Cassandra +- ❗ Regular compaction monitoring needed +- ❗ Backup graph schema regularly + +--- +**Last Updated:** September 4, 2025 \ No newline at end of file diff --git a/secrets/dataops/meilisearch.yaml b/secrets/dataops/meilisearch.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d08a013a763e541bdf3e44b7a94d7a0c9b7b77b --- /dev/null +++ b/secrets/dataops/meilisearch.yaml @@ -0,0 +1 @@ +master_key: AJiyAJA0cQHBD9R2EG6FbyL9foHUmNWLYGjZioV+Aw22w9lYpH29ZSZ_j0ZMkb4_ diff --git a/secrets/dataops/meilisearch_connection.md b/secrets/dataops/meilisearch_connection.md new file mode 100644 index 0000000000000000000000000000000000000000..bf38bf3d330c295214e3ed77335556454f3d55b4 --- /dev/null +++ b/secrets/dataops/meilisearch_connection.md @@ -0,0 +1,248 @@ +# MeiliSearch Connection Guide + +## Connection Details +- **Host**: localhost +- **Port**: 17005 (HTTP) +- **Protocol**: HTTP/REST API +- **Health Check**: `curl http://localhost:17005/health` +- **Dashboard**: http://localhost:17005 (if web interface enabled) + +## Authentication +- **Master Key**: `VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM` (auto-generated) +- **Environment**: Development (no authentication required for localhost) +- **Security**: Add master key for production use + +## REST API Examples +```bash +# Health check (no auth required) +curl http://localhost:17005/health + +# Get version info +curl http://localhost:17005/version + +# List indexes (requires master key) +curl -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + http://localhost:17005/indexes + +# Create index for Nova memories +curl -X POST "http://localhost:17005/indexes" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + -d '{ + "uid": "nova_memories", + "primaryKey": "memory_id" + }' + +# Add documents to index +curl -X POST "http://localhost:17005/indexes/nova_memories/documents" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + -d '[ + { + "memory_id": "mem_001", + "content": "User authentication successful", + "timestamp": "2025-08-29T01:00:00Z", + "type": "authentication", + "importance": 8 + }, + { + "memory_id": "mem_002", + "content": "Database connection established", + "timestamp": "2025-08-29T01:05:00Z", + "type": "system", + "importance": 9 + } + ]' + +# Search memories +curl -X POST "http://localhost:17005/indexes/nova_memories/search" \ + -H "Content-Type: application/json" \ + -d '{ + "q": "authentication database", + "limit": 10, + "offset": 0 + }' +``` + +## Python Client Example +```python +from meilisearch import Client +from meilisearch.index import Index + +# Connect to MeiliSearch +client = Client( + 'http://localhost:17005', + 'VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM' # Master key +) + +# Get server health +health = client.health() +print(f"MeiliSearch health: {health['status']}") + +# Create index for Nova session data +index = client.index('nova_sessions') + +# Configure searchable attributes +index.update_settings({ + 'searchableAttributes': ['session_id', 'user_id', 'actions', 'outcome'], + 'filterableAttributes': ['timestamp', 'status', 'duration'], + 'sortableAttributes': ['timestamp', 'importance'] +}) + +# Add session documents +session_data = [ + { + "session_id": "sess_202508290100", + "user_id": "user_123", + "timestamp": "2025-08-29T01:00:00Z", + "actions": ["login", "query", "logout"], + "duration": 120, + "status": "completed", + "outcome": "success", + "importance": 7 + }, + { + "session_id": "sess_202508290105", + "user_id": "user_456", + "timestamp": "2025-08-29T01:05:00Z", + "actions": ["search", "update", "save"], + "duration": 85, + "status": "completed", + "outcome": "partial_success", + "importance": 6 + } +] + +index.add_documents(session_data) + +# Search sessions +results = index.search('search update', { + 'filter': 'status = "completed"', + 'sort': ['timestamp:desc'], + 'limit': 5 +}) + +print(f"Found {len(results['hits'])} sessions:") +for hit in results['hits']: + print(f"- {hit['session_id']}: {hit['outcome']}") +``` + +## JavaScript/Node.js Example +```javascript +const { MeiliSearch } = require('meilisearch'); + +const client = new MeiliSearch({ + host: 'http://localhost:17005', + apiKey: 'VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM' +}); + +// Create index for user queries +async function setupNovaSearch() { + const index = client.index('user_queries'); + + await index.updateSettings({ + searchableAttributes: ['query_text', 'intent', 'response_summary'], + filterableAttributes: ['timestamp', 'success_rate', 'complexity'], + sortableAttributes: ['timestamp', 'usage_count'] + }); + + const queries = [ + { + query_id: 'q_001', + query_text: 'How to configure database connections', + intent: 'technical_configuration', + response_summary: 'Database configuration requires proper connection strings and authentication', + timestamp: '2025-08-29T01:10:00Z', + success_rate: 0.95, + complexity: 'medium', + usage_count: 15 + } + ]; + + await index.addDocuments(queries); + + // Search for similar queries + const results = await index.search('database configuration', { + filter: 'complexity = "medium"', + limit: 10 + }); + + console.log(`Found ${results.hits.length} relevant queries`); +} +``` + +## Configuration Notes +- **Data Directory**: `/data/data/meilisearch/data/` +- **Log Directory**: `/data/data/meilisearch/logs/` +- **Max Memory**: Auto-managed (configurable) +- **Backup Location**: `/data/adaptai/backups/meilisearch/` +- **Index Settings**: Optimized for text search and filtering + +## Index Management +```bash +# Get index stats +curl -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + http://localhost:17005/indexes/nova_memories/stats + +# Update index settings +curl -X PATCH "http://localhost:17005/indexes/nova_memories/settings" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + -d '{ + "rankingRules": [ + "words", "typo", "proximity", "attribute", "sort", "exactness" + ], + "stopWords": ["the", "a", "an", "and", "or", "in"] + }' + +# Delete index +curl -X DELETE "http://localhost:17005/indexes/nova_memories" \ + -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" +``` + +## Health Monitoring +```bash +# Basic health check +curl -s http://localhost:17005/health | jq . + +# System metrics +curl -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + http://localhost:17005/stats + +# Check all indexes +curl -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + http://localhost:17005/indexes | jq . + +# Monitor task queue +curl -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + http://localhost:17005/tasks | jq . +``` + +## Security +- ❗ Localhost binding only +- ❗ Master key: `VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM` +- ❗ Development mode - consider rotating master key for production +- ❗ Monitor disk usage on /data partition +- ❗ Regular backups recommended +- ❗ Consider rate limiting for production use + +## Backup Procedures +```bash +# Manual backup (stop service first) +sudo systemctl stop meilisearch +sudo rsync -av /data/data/meilisearch/data/ /data/adaptai/backups/meilisearch/full_backup/ +sudo systemctl start meilisearch + +# Export index data (per index) +curl -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + http://localhost:17005/indexes/nova_memories/documents \ + > /data/adaptai/backups/meilisearch/nova_memories_export.json + +# Import index data +curl -X POST "http://localhost:17005/indexes/nova_memories/documents" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer VEtAgT0a284o9WMsVHI0567fO6pc5BvqvKeqyhrVzTM" \ + --data-binary @/data/adaptai/backups/meilisearch/nova_memories_export.json +``` + +``` \ No newline at end of file diff --git a/secrets/dataops/nova_services_connection.md b/secrets/dataops/nova_services_connection.md new file mode 100644 index 0000000000000000000000000000000000000000..be3ac161227ef459711e3e9f009de4c8f7e157c4 --- /dev/null +++ b/secrets/dataops/nova_services_connection.md @@ -0,0 +1,125 @@ +# Nova Required Services Connection Guide + +## Service Matrix +| Service | Port | Purpose | Status | +|---------|------|---------|---------| +| PostgreSQL | 5432 | Structured data | ✅ Operational | +| MongoDB | 27017 | Document storage | ✅ Operational | +| ChromaDB | 8000 | Semantic memory | ✅ Operational | +| Redis (Default) | 6379 | Working memory | ✅ Operational | + +## PostgreSQL Connection +```bash +# Connect to PostgreSQL +psql -h localhost -p 5432 -U postgres + +# List databases +\l + +# Basic SQL +SELECT version(); +CREATE DATABASE nova_core; +``` + +**Python Example:** +```python +import psycopg2 + +conn = psycopg2.connect( + host="localhost", + port=5432, + user="postgres", + database="nova_core" +) + +cur = conn.cursor() +cur.execute("SELECT NOW();") +print(f"PostgreSQL time: {cur.fetchone()}") +``` + +## MongoDB Connection +```bash +# Connect to MongoDB +mongosh --port 27017 + +# Show databases +show dbs + +# Use nova database +use nova + +# Basic operations +db.sessions.insertOne({session_id: "test", data: {}}) +``` + +**Python Example:** +```python +from pymongo import MongoClient + +client = MongoClient("localhost", 27017) +db = client.nova +collection = db.sessions + +# Insert document +result = collection.insert_one({ + "session_id": "test_123", + "timestamp": "2025-08-24", + "data": {"status": "active"} +}) +print(f"Inserted ID: {result.inserted_id}") +``` + +## ChromaDB Connection +```bash +# Health check +curl http://localhost:8000/api/v1/heartbeat + +# List collections +curl http://localhost:8000/api/v1/collections +``` + +**Python Example:** +```python +import chromadb + +client = chromadb.HttpClient(host="localhost", port=8000) + +# Create collection +collection = client.create_collection("nova_memories") + +# Add embeddings +collection.add( + documents=["Memory example 1", "Memory example 2"], + metadatas=[{"type": "fact"}, {"type": "experience"}], + ids=["id1", "id2"] +) + +# Query similar memories +results = collection.query( + query_texts=["example memory"], + n_results=2 +) +print(f"Similar memories: {results}") +``` + +## Health Monitoring +```bash +# PostgreSQL +pg_isready -h localhost -p 5432 + +# MongoDB +mongosh --eval "db.adminCommand('ping')" --quiet + +# ChromaDB +curl -s http://localhost:8000/api/v1/heartbeat | jq . +``` + +## Security Notes +- ❗ All services bound to localhost only +- ❗ No authentication configured (development) +- ❗ Regular backup procedures needed +- ❗ Monitor disk usage on /data partition +- ❗ Consider adding authentication for production + +--- +**Last Updated:** September 4, 2025 \ No newline at end of file diff --git a/secrets/dataops/ports.yaml b/secrets/dataops/ports.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d8662e827cf8a6ee5ee6d8d9e540177bb5302fe --- /dev/null +++ b/secrets/dataops/ports.yaml @@ -0,0 +1,52 @@ +qdrant: + http: 17000 + grpc: 17001 +gremlin: + ws: 17002 +scylla: + # Policy port for clients; proxied to native 9042 on cluster + cql: 17542 +dragonfly: + nodes: + - 18000 + - 18001 + - 18002 +redis_cluster: + nodes: + - 18010 + - 18011 + - 18012 + +# --- Port Policy & Reserved Assignments --- +# 17xxx = databases/storage/engines (data-plane) +# 18xxx = comms/coordination/tasking (control-plane) + +postgres: + tcp: 17532 +milvus: + grpc: 17530 + http: 17591 +meilisearch: + http: 17700 +opensearch: + http: 17920 +elasticsearch: + http: 17921 +neo4j: + bolt: 17687 +influxdb: + http: 17806 +minio: + api: 17580 + console: 17581 +ipfs: + api: 17501 + +# Comms / Coordination +etcd: + client: 18150 +nats: + client: 18222 +pulsar: + broker: 18650 + admin_http: 18880 diff --git a/secrets/dataops/qdrant_connection.md b/secrets/dataops/qdrant_connection.md new file mode 100644 index 0000000000000000000000000000000000000000..34aaaba238ec890853ebc3a6f1f61755990e6b36 --- /dev/null +++ b/secrets/dataops/qdrant_connection.md @@ -0,0 +1,57 @@ +# Qdrant Vector Database Connection Guide + +## Connection Details +- **Host**: localhost +- **Port**: 17000 +- **Protocol**: HTTP +- **Health Check**: `curl http://localhost:17000/collections` + +## Authentication +- No authentication required (localhost binding only) +- All operations are local to the server + +## Python Client Example +```python +from qdrant_client import QdrantClient + +client = QdrantClient(host="localhost", port=17000) + +# Check health +collections = client.get_collections() +print(f"Available collections: {collections}") + +# Create collection (if needed) +client.create_collection( + collection_name="nova_memory", + vectors_config=VectorParams(size=1536, distance=Distance.COSINE) +) +``` + +## REST API Examples +```bash +# List collections +curl http://localhost:17000/collections + +# Get collection info +curl http://localhost:17000/collections/nova_memory + +# Search vectors +curl -X POST http://localhost:17000/collections/nova_memory/points/search \ + -H "Content-Type: application/json" \ + -d '{"vector": [0.1, 0.2, ...], "limit": 10}' +``` + +## Configuration Notes +- Data directory: `/data/qdrant/storage/` +- Max memory: 50GB (configurable) +- No external network exposure +- Backup location: `/data/adaptai/backups/qdrant/` + +## Security +- ❗ Localhost binding only +- ❗ No authentication mechanism +- ❗ Regular backups recommended +- ❗ Monitor disk usage on /data partition + +--- +**Last Updated:** September 4, 2025 \ No newline at end of file diff --git a/secrets/dataops/redis_cluster_connection.md b/secrets/dataops/redis_cluster_connection.md new file mode 100644 index 0000000000000000000000000000000000000000..962531bfc231311bd1eca008b2f988bfe753f631 --- /dev/null +++ b/secrets/dataops/redis_cluster_connection.md @@ -0,0 +1,78 @@ +# Redis Cluster Connection Guide + +## Cluster Nodes +- **Node 1**: localhost:18010 +- **Node 2**: localhost:18011 +- **Node 3**: localhost:18012 + +## Connection Details +- **Protocol**: Redis Cluster +- **Max Memory**: 20GB per node +- **Persistence**: AOF + RDB +- **Cluster Enabled**: Yes + +## Redis-CLI Examples +```bash +# Connect to cluster +redis-cli -c -p 18010 + +# Cluster info +redis-cli -p 18010 cluster info + +# Cluster nodes +redis-cli -p 18010 cluster nodes +``` + +## Python Client Example +```python +import redis +from redis.cluster import RedisCluster + +# Connect to Redis Cluster +cluster = RedisCluster( + startup_nodes=[ + {"host": "localhost", "port": 18010}, + {"host": "localhost", "port": 18011}, + {"host": "localhost", "port": 18012} + ], + decode_responses=True +) + +# Cluster operations +cluster.set('cluster:key', 'cluster_value') +value = cluster.get('cluster:key') +print(f"Cluster value: {value}") +``` + +## Health Checks +```bash +# Check cluster health +redis-cli -p 18010 cluster info | grep cluster_state + +# Check all nodes +redis-cli -p 18010 ping # Should return PONG +redis-cli -p 18011 ping +redis-cli -p 18012 ping + +# Memory usage +redis-cli -p 18010 info memory | grep used_memory_human + +# Persistence status +redis-cli -p 18010 info persistence | grep aof_enabled +``` + +## Configuration Notes +- **Data Directory**: `/data/redis/node*/data/` +- **Append Only File**: Enabled for durability +- **Max Memory**: 20GB per node +- **Cluster Replication**: Each node has replicas + +## Security +- ❗ Localhost binding only +- ❗ No authentication required +- ❗ Monitor cluster state regularly +- ❗ Verify AOF persistence working +- ❗ Regular backup of RDB files recommended + +--- +**Last Updated:** September 4, 2025 \ No newline at end of file diff --git a/secrets/signalcore/nats.yaml b/secrets/signalcore/nats.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8dd5029c4a24efa70c1a8b267f2d423fd8ffed4a --- /dev/null +++ b/secrets/signalcore/nats.yaml @@ -0,0 +1,2 @@ +user: signalcore +pass: Jsm8FDsuRRrHQI1rwBjlqVMd5eFZvSVOnWE1oeYm8E= diff --git a/secrets/signalcore/nats_connection.md b/secrets/signalcore/nats_connection.md new file mode 100644 index 0000000000000000000000000000000000000000..2351079b88f14fe4da94c100234f396101a5aa28 --- /dev/null +++ b/secrets/signalcore/nats_connection.md @@ -0,0 +1,155 @@ +# NATS Messaging System Connection Guide + +## Service Details +- **Port**: 18222 (client connections) +- **Monitoring**: 18222 (HTTP monitoring) +- **Server Name**: nova-nats-01 +- **Storage**: /data/adaptai/platform/dbops/data/nats/ +- **Logs**: /data/adaptai/platform/dbops/logs/nats/nats.log + +## Connection Examples + +### Command Line Interface +```bash +# Connect to NATS server +nats sub nova.> +nats pub nova.events '{"event": "test", "data": "message"}' + +# Stream management +nats stream ls +nats stream info NOVA_EVENTS +nats consumer add NOVA_EVENTS NOVA_CONSUMER + +# Server monitoring +curl http://localhost:18222/varz +curl http://localhost:18222/jsz +``` + +### Python Client Example +```python +import asyncio +import nats + +async def main(): + # Connect to NATS server + nc = await nats.connect("nats://localhost:18222") + + # Publish message + await nc.publish("nova.events", b'{"event": "test", "data": "message"}') + + # Subscribe to messages + async def message_handler(msg): + print(f"Received message: {msg.data.decode()}") + + sub = await nc.subscribe("nova.>", cb=message_handler) + + # Keep connection alive + await asyncio.sleep(3600) + await nc.close() + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### JetStream Python Example +```python +import asyncio +import nats +from nats.errors import TimeoutError + +async def main(): + nc = await nats.connect("nats://localhost:18222") + js = nc.jetstream() + + # Publish to stream + ack = await js.publish("nova.events.test", b'{"test": "data"}') + print(f"Published: {ack.seq}") + + # Create consumer + sub = await js.subscribe("nova.events.>") + + try: + msg = await sub.next_msg(timeout=5) + print(f"Received: {msg.data.decode()}") + await msg.ack() + except TimeoutError: + print("No messages") + + await nc.close() + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Health Checks +```bash +# Check server status +curl -s http://localhost:18222/varz | jq .version + +# Check JetStream status +curl -s http://localhost:18222/jsz | jq .memory + +# Test connection +nats server ping + +# List all streams +nats stream ls +``` + +## Stream Configuration +**NOVA_EVENTS Stream**: +- Subjects: `nova.>` (all Nova events) +- Retention: Limits (keep all messages) +- Storage: File (persistent) +- Max Age: 1 year +- Max Messages: Unlimited +- Max Bytes: Unlimited + +## Security Notes +- ❗ Bound to localhost only (127.0.0.1) +- ❗ No authentication configured (development) +- ❗ Monitor disk usage on /data/adaptai/platform/dbops/data/nats/ +- ❗ Regular backup of JetStream data recommended + +## Installation Details +- **Binary Location**: /data/adaptai/platform/dbops/binaries/nats/nats-server +- **Configuration**: /data/adaptai/platform/dbops/configs/nats/nats.conf +- **Data Directory**: /data/adaptai/platform/dbops/data/nats/ +- **Log Directory**: /data/adaptai/platform/dbops/logs/nats/ + +## Service Management +NATS is managed by `supervisord`. + +```bash +# Check if running via supervisord +supervisorctl -s unix:///data/adaptai/platform/dbops/run/supervisor.sock status nats + +# Check if port is listening +netstat -tln | grep 18222 +``` + +## Performance Monitoring +- Monitor memory usage: `nats server report mem` +- Monitor connections: `nats server report connections` +- Monitor JetStream: `nats stream report` +- Check disk usage: `du -sh /data/nats/data/` + +## Integration Points +- Nova instances can publish events to `nova.>` subjects +- Services can subscribe to specific event patterns +- JetStream provides persistent message storage +- Ideal for inter-service communication and event sourcing + +## Backup Procedures +```bash +# Backup JetStream data (if configured) +rsync -av /data/adaptai/platform/dbops/data/nats/ /data/adaptai/backups/nats-$(date +%Y%m%d)/ + +# Backup configuration +cp /data/adaptai/platform/dbops/configs/nats/nats.conf /data/adaptai/backups/nats-config-$(date +%Y%m%d).conf +``` + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Maintained by: Atlas, Head of DataOps +Last Updated: September 4, 2025 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ \ No newline at end of file